dp_tx_desc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool Handle to DP tx_desc_pool structure
  43. * @num_elem Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->num_free = num_elem;
  59. tx_desc_pool->num_allocated = 0;
  60. }
  61. #endif
  62. #ifdef DP_UMAC_HW_RESET_SUPPORT
  63. /**
  64. * dp_tx_desc_clean_up() - Clean up the tx dexcriptors
  65. * @ctxt: context passed
  66. * @elem: element to be cleaned up
  67. * @elem_list: element list
  68. *
  69. */
  70. void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
  71. {
  72. struct dp_soc *soc = (struct dp_soc *)ctxt;
  73. struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
  74. qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
  75. qdf_nbuf_t nbuf = NULL;
  76. if (tx_desc->nbuf) {
  77. nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
  78. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  79. if (nbuf) {
  80. if (!nbuf_list) {
  81. dp_err("potential memory leak");
  82. qdf_assert_always(0);
  83. }
  84. nbuf->next = *nbuf_list;
  85. *nbuf_list = nbuf;
  86. }
  87. }
  88. }
  89. /**
  90. * dp_tx_desc_pool_cleanup() - Clean up the tx dexcriptor pools
  91. * @soc: Handle to DP SoC structure
  92. * @nbuf_list: nbuf list for delayed free
  93. *
  94. */
  95. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  96. {
  97. int i;
  98. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  99. uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  100. for (i = 0; i < num_pool; i++) {
  101. tx_desc_pool = &soc->tx_desc[i];
  102. if (tx_desc_pool)
  103. qdf_tx_desc_pool_free_bufs(soc,
  104. &tx_desc_pool->desc_pages,
  105. tx_desc_pool->elem_size,
  106. tx_desc_pool->elem_count,
  107. true, &dp_tx_desc_clean_up,
  108. nbuf_list);
  109. }
  110. }
  111. #endif
  112. /**
  113. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  114. * @soc Handle to DP SoC structure
  115. * @pool_id pool to allocate
  116. * @num_elem Number of descriptor elements per pool
  117. *
  118. * This function allocates memory for SW tx descriptors
  119. * (used within host for tx data path).
  120. * The number of tx descriptors required will be large
  121. * since based on number of clients (1024 clients x 3 radios),
  122. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  123. * large.
  124. *
  125. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  126. * function to allocate memory
  127. * in multiple pages. It then iterates through the memory allocated across pages
  128. * and links each descriptor
  129. * to next descriptor, taking care of page boundaries.
  130. *
  131. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  132. * one for each ring;
  133. * This minimizes lock contention when hard_start_xmit is called
  134. * from multiple CPUs.
  135. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  136. * flow control.
  137. *
  138. * Return: Status code. 0 for success.
  139. */
  140. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  141. uint32_t num_elem)
  142. {
  143. uint32_t desc_size;
  144. struct dp_tx_desc_pool_s *tx_desc_pool;
  145. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  146. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  147. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  148. dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
  149. &tx_desc_pool->desc_pages,
  150. desc_size, num_elem,
  151. 0, true);
  152. if (!tx_desc_pool->desc_pages.num_pages) {
  153. dp_err("Multi page alloc fail, tx desc");
  154. return QDF_STATUS_E_NOMEM;
  155. }
  156. return QDF_STATUS_SUCCESS;
  157. }
  158. /**
  159. * dp_tx_desc_pool_free() - Free the tx dexcriptor pools
  160. * @soc: Handle to DP SoC structure
  161. * @pool_id: pool to free
  162. *
  163. */
  164. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  165. {
  166. struct dp_tx_desc_pool_s *tx_desc_pool;
  167. tx_desc_pool = &((soc)->tx_desc[pool_id]);
  168. if (tx_desc_pool->desc_pages.num_pages)
  169. dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
  170. &tx_desc_pool->desc_pages, 0,
  171. true);
  172. }
  173. /**
  174. * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
  175. * @soc: Handle to DP SoC structure
  176. * @pool_id: pool to allocate
  177. * @num_elem: Number of descriptor elements per pool
  178. *
  179. * Return: QDF_STATUS_SUCCESS
  180. * QDF_STATUS_E_FAULT
  181. */
  182. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  183. uint32_t num_elem)
  184. {
  185. struct dp_tx_desc_pool_s *tx_desc_pool;
  186. uint32_t desc_size;
  187. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  188. tx_desc_pool = &soc->tx_desc[pool_id];
  189. if (qdf_mem_multi_page_link(soc->osdev,
  190. &tx_desc_pool->desc_pages,
  191. desc_size, num_elem, true)) {
  192. dp_err("invalid tx desc allocation -overflow num link");
  193. return QDF_STATUS_E_FAULT;
  194. }
  195. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  196. *tx_desc_pool->desc_pages.cacheable_pages;
  197. /* Set unique IDs for each Tx descriptor */
  198. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  199. soc, num_elem, pool_id)) {
  200. dp_err("initialization per target failed");
  201. return QDF_STATUS_E_FAULT;
  202. }
  203. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  204. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  205. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  206. return QDF_STATUS_SUCCESS;
  207. }
  208. /**
  209. * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
  210. * @soc Handle to DP SoC structure
  211. * @pool_id: pool to de-initialize
  212. *
  213. */
  214. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
  215. {
  216. struct dp_tx_desc_pool_s *tx_desc_pool;
  217. tx_desc_pool = &soc->tx_desc[pool_id];
  218. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
  219. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  220. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  221. }
  222. /**
  223. * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
  224. * @soc: Handle to DP SoC structure
  225. * @num_pool: Number of pools to allocate
  226. * @num_elem: Number of descriptor elements per pool
  227. *
  228. * Return - QDF_STATUS_SUCCESS
  229. * QDF_STATUS_E_NOMEM
  230. */
  231. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  232. uint32_t num_elem)
  233. {
  234. QDF_STATUS status = QDF_STATUS_SUCCESS;
  235. qdf_dma_context_t memctx = 0;
  236. uint8_t pool_id, count;
  237. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  238. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  239. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  240. /* Coherent tx extension descriptor alloc */
  241. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  242. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  243. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  244. dp_desc_multi_pages_mem_alloc(
  245. soc, DP_TX_EXT_DESC_TYPE,
  246. &dp_tx_ext_desc_pool->desc_pages,
  247. elem_size,
  248. num_elem,
  249. memctx, false);
  250. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  251. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  252. "ext desc page alloc fail");
  253. status = QDF_STATUS_E_NOMEM;
  254. goto fail_exit;
  255. }
  256. }
  257. /*
  258. * Cacheable ext descriptor link alloc
  259. * This structure also large size already
  260. * single element is 24bytes, 2K elements are 48Kbytes
  261. * Have to alloc multi page cacheable memory
  262. */
  263. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  264. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  265. dp_desc_multi_pages_mem_alloc(
  266. soc,
  267. DP_TX_EXT_DESC_LINK_TYPE,
  268. &dp_tx_ext_desc_pool->desc_link_pages,
  269. link_elem_size,
  270. num_elem,
  271. 0, true);
  272. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  273. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  274. "ext link desc page alloc fail");
  275. status = QDF_STATUS_E_NOMEM;
  276. goto free_ext_desc_page;
  277. }
  278. }
  279. return status;
  280. free_ext_desc_page:
  281. for (count = 0; count < pool_id; count++) {
  282. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  283. dp_desc_multi_pages_mem_free(
  284. soc, DP_TX_EXT_DESC_LINK_TYPE,
  285. &dp_tx_ext_desc_pool->desc_link_pages,
  286. 0, true);
  287. }
  288. pool_id = num_pool;
  289. fail_exit:
  290. for (count = 0; count < pool_id; count++) {
  291. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  292. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  293. dp_desc_multi_pages_mem_free(
  294. soc, DP_TX_EXT_DESC_TYPE,
  295. &dp_tx_ext_desc_pool->desc_pages,
  296. memctx, false);
  297. }
  298. return status;
  299. }
  300. /**
  301. * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
  302. * @soc: Handle to DP SoC structure
  303. * @num_pool: Number of pools to initialize
  304. * @num_elem: Number of descriptor elements per pool
  305. *
  306. * Return - QDF_STATUS_SUCCESS
  307. * QDF_STATUS_E_NOMEM
  308. */
  309. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  310. uint32_t num_elem)
  311. {
  312. uint32_t i;
  313. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  314. struct qdf_mem_dma_page_t *page_info;
  315. struct qdf_mem_multi_page_t *pages;
  316. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  317. uint8_t pool_id;
  318. QDF_STATUS status;
  319. /* link tx descriptors into a freelist */
  320. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  321. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  322. soc->tx_ext_desc[pool_id].elem_size =
  323. HAL_TX_EXT_DESC_WITH_META_DATA;
  324. soc->tx_ext_desc[pool_id].link_elem_size =
  325. sizeof(struct dp_tx_ext_desc_elem_s);
  326. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  327. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  328. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  329. if (qdf_mem_multi_page_link(soc->osdev,
  330. &dp_tx_ext_desc_pool->
  331. desc_link_pages,
  332. dp_tx_ext_desc_pool->link_elem_size,
  333. dp_tx_ext_desc_pool->elem_count,
  334. true)) {
  335. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  336. "ext link desc page linking fail");
  337. status = QDF_STATUS_E_FAULT;
  338. goto fail;
  339. }
  340. /* Assign coherent memory pointer into linked free list */
  341. pages = &dp_tx_ext_desc_pool->desc_pages;
  342. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  343. c_elem = dp_tx_ext_desc_pool->freelist;
  344. p_elem = c_elem;
  345. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  346. if (!(i % pages->num_element_per_page)) {
  347. /**
  348. * First element for new page,
  349. * should point next page
  350. */
  351. if (!pages->dma_pages->page_v_addr_start) {
  352. QDF_TRACE(QDF_MODULE_ID_DP,
  353. QDF_TRACE_LEVEL_ERROR,
  354. "link over flow");
  355. status = QDF_STATUS_E_FAULT;
  356. goto fail;
  357. }
  358. c_elem->vaddr =
  359. (void *)page_info->page_v_addr_start;
  360. c_elem->paddr = page_info->page_p_addr;
  361. page_info++;
  362. } else {
  363. c_elem->vaddr = (void *)(p_elem->vaddr +
  364. dp_tx_ext_desc_pool->elem_size);
  365. c_elem->paddr = (p_elem->paddr +
  366. dp_tx_ext_desc_pool->elem_size);
  367. }
  368. p_elem = c_elem;
  369. c_elem = c_elem->next;
  370. if (!c_elem)
  371. break;
  372. }
  373. dp_tx_ext_desc_pool->num_free = num_elem;
  374. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  375. }
  376. return QDF_STATUS_SUCCESS;
  377. fail:
  378. return status;
  379. }
  380. /**
  381. * dp_tx_ext_desc_pool_free() - free Tx extension Descriptor pool(s)
  382. * @soc: Handle to DP SoC structure
  383. * @num_pool: Number of pools to free
  384. *
  385. */
  386. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  387. {
  388. uint8_t pool_id;
  389. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  390. qdf_dma_context_t memctx = 0;
  391. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  392. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  393. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  394. dp_desc_multi_pages_mem_free(
  395. soc, DP_TX_EXT_DESC_LINK_TYPE,
  396. &dp_tx_ext_desc_pool->desc_link_pages,
  397. 0, true);
  398. dp_desc_multi_pages_mem_free(
  399. soc, DP_TX_EXT_DESC_TYPE,
  400. &dp_tx_ext_desc_pool->desc_pages,
  401. memctx, false);
  402. }
  403. }
  404. /**
  405. * dp_tx_ext_desc_pool_deinit() - deinit Tx extension Descriptor pool(s)
  406. * @soc: Handle to DP SoC structure
  407. * @num_pool: Number of pools to de-initialize
  408. *
  409. */
  410. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  411. {
  412. uint8_t pool_id;
  413. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  414. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  415. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  416. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  417. }
  418. }
  419. #if defined(FEATURE_TSO)
  420. /**
  421. * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
  422. * @soc: Handle to DP SoC structure
  423. * @num_pool: Number of pools to allocate
  424. * @num_elem: Number of descriptor elements per pool
  425. *
  426. * Return - QDF_STATUS_SUCCESS
  427. * QDF_STATUS_E_NOMEM
  428. */
  429. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  430. uint32_t num_elem)
  431. {
  432. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  433. uint32_t desc_size, pool_id, i;
  434. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  435. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  436. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  437. tso_desc_pool->num_free = 0;
  438. dp_desc_multi_pages_mem_alloc(
  439. soc,
  440. DP_TX_TSO_DESC_TYPE,
  441. &tso_desc_pool->desc_pages,
  442. desc_size,
  443. num_elem, 0, true);
  444. if (!tso_desc_pool->desc_pages.num_pages) {
  445. dp_err("Multi page alloc fail, tx desc");
  446. goto fail;
  447. }
  448. }
  449. return QDF_STATUS_SUCCESS;
  450. fail:
  451. for (i = 0; i < pool_id; i++) {
  452. tso_desc_pool = &soc->tx_tso_desc[i];
  453. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  454. &tso_desc_pool->desc_pages,
  455. 0, true);
  456. }
  457. return QDF_STATUS_E_NOMEM;
  458. }
  459. /**
  460. * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
  461. * @soc: Handle to DP SoC structure
  462. * @num_pool: Number of pools to free
  463. *
  464. */
  465. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  466. {
  467. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  468. uint32_t pool_id;
  469. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  470. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  471. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  472. &tso_desc_pool->desc_pages,
  473. 0, true);
  474. }
  475. }
  476. /**
  477. * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
  478. * @soc: Handle to DP SoC structure
  479. * @num_pool: Number of pools to initialize
  480. * @num_elem: Number of descriptor elements per pool
  481. *
  482. * Return - QDF_STATUS_SUCCESS
  483. * QDF_STATUS_E_NOMEM
  484. */
  485. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  486. uint32_t num_elem)
  487. {
  488. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  489. uint32_t desc_size, pool_id;
  490. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  491. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  492. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  493. if (qdf_mem_multi_page_link(soc->osdev,
  494. &tso_desc_pool->desc_pages,
  495. desc_size,
  496. num_elem, true)) {
  497. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  498. "invalid tso desc allocation - overflow num link");
  499. return QDF_STATUS_E_FAULT;
  500. }
  501. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  502. *tso_desc_pool->desc_pages.cacheable_pages;
  503. tso_desc_pool->num_free = num_elem;
  504. TSO_DEBUG("Number of free descriptors: %u\n",
  505. tso_desc_pool->num_free);
  506. tso_desc_pool->pool_size = num_elem;
  507. qdf_spinlock_create(&tso_desc_pool->lock);
  508. }
  509. return QDF_STATUS_SUCCESS;
  510. }
  511. /**
  512. * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
  513. * @soc: Handle to DP SoC structure
  514. * @num_pool: Number of pools to free
  515. *
  516. */
  517. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  518. {
  519. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  520. uint32_t pool_id;
  521. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  522. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  523. qdf_spin_lock_bh(&tso_desc_pool->lock);
  524. tso_desc_pool->freelist = NULL;
  525. tso_desc_pool->num_free = 0;
  526. tso_desc_pool->pool_size = 0;
  527. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  528. qdf_spinlock_destroy(&tso_desc_pool->lock);
  529. }
  530. }
  531. /**
  532. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  533. * fragments in each tso segment
  534. *
  535. * @soc: handle to dp soc structure
  536. * @num_pool: number of pools to allocate
  537. * @num_elem: total number of descriptors to be allocated
  538. *
  539. * Return - QDF_STATUS_SUCCESS
  540. * QDF_STATUS_E_NOMEM
  541. */
  542. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  543. uint32_t num_elem)
  544. {
  545. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  546. uint32_t desc_size, pool_id, i;
  547. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  548. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  549. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  550. tso_num_seg_pool->num_free = 0;
  551. dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
  552. &tso_num_seg_pool->desc_pages,
  553. desc_size,
  554. num_elem, 0, true);
  555. if (!tso_num_seg_pool->desc_pages.num_pages) {
  556. dp_err("Multi page alloc fail, tso_num_seg_pool");
  557. goto fail;
  558. }
  559. }
  560. return QDF_STATUS_SUCCESS;
  561. fail:
  562. for (i = 0; i < pool_id; i++) {
  563. tso_num_seg_pool = &soc->tx_tso_num_seg[i];
  564. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  565. &tso_num_seg_pool->desc_pages,
  566. 0, true);
  567. }
  568. return QDF_STATUS_E_NOMEM;
  569. }
  570. /**
  571. * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
  572. * fragments in each tso segment
  573. *
  574. * @soc: handle to dp soc structure
  575. * @num_pool: number of pools to free
  576. */
  577. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  578. {
  579. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  580. uint32_t pool_id;
  581. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  582. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  583. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  584. &tso_num_seg_pool->desc_pages,
  585. 0, true);
  586. }
  587. }
  588. /**
  589. * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
  590. * fragments in each tso segment
  591. *
  592. * @soc: handle to dp soc structure
  593. * @num_pool: number of pools to initialize
  594. * @num_elem: total number of descriptors to be initialized
  595. *
  596. * Return - QDF_STATUS_SUCCESS
  597. * QDF_STATUS_E_FAULT
  598. */
  599. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  600. uint32_t num_elem)
  601. {
  602. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  603. uint32_t desc_size, pool_id;
  604. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  605. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  606. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  607. if (qdf_mem_multi_page_link(soc->osdev,
  608. &tso_num_seg_pool->desc_pages,
  609. desc_size,
  610. num_elem, true)) {
  611. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  612. "invalid tso desc allocation - overflow num link");
  613. return QDF_STATUS_E_FAULT;
  614. }
  615. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  616. *tso_num_seg_pool->desc_pages.cacheable_pages;
  617. tso_num_seg_pool->num_free = num_elem;
  618. tso_num_seg_pool->num_seg_pool_size = num_elem;
  619. qdf_spinlock_create(&tso_num_seg_pool->lock);
  620. }
  621. return QDF_STATUS_SUCCESS;
  622. }
  623. /**
  624. * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
  625. * fragments in each tso segment
  626. *
  627. * @soc: handle to dp soc structure
  628. * @num_pool: number of pools to de-initialize
  629. *
  630. * Return - QDF_STATUS_SUCCESS
  631. * QDF_STATUS_E_FAULT
  632. */
  633. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  634. {
  635. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  636. uint32_t pool_id;
  637. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  638. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  639. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  640. tso_num_seg_pool->freelist = NULL;
  641. tso_num_seg_pool->num_free = 0;
  642. tso_num_seg_pool->num_seg_pool_size = 0;
  643. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  644. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  645. }
  646. }
  647. #else
  648. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  649. uint32_t num_elem)
  650. {
  651. return QDF_STATUS_SUCCESS;
  652. }
  653. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  654. uint32_t num_elem)
  655. {
  656. return QDF_STATUS_SUCCESS;
  657. }
  658. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  659. {
  660. }
  661. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  662. {
  663. }
  664. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  665. uint32_t num_elem)
  666. {
  667. return QDF_STATUS_SUCCESS;
  668. }
  669. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  670. {
  671. }
  672. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  673. uint32_t num_elem)
  674. {
  675. return QDF_STATUS_SUCCESS;
  676. }
  677. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  678. {
  679. }
  680. #endif