dp_tx_desc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool Handle to DP tx_desc_pool structure
  43. * @num_elem Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->num_free = num_elem;
  59. tx_desc_pool->num_allocated = 0;
  60. }
  61. #endif
  62. /**
  63. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  64. * @soc Handle to DP SoC structure
  65. * @pool_id pool to allocate
  66. * @num_elem Number of descriptor elements per pool
  67. *
  68. * This function allocates memory for SW tx descriptors
  69. * (used within host for tx data path).
  70. * The number of tx descriptors required will be large
  71. * since based on number of clients (1024 clients x 3 radios),
  72. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  73. * large.
  74. *
  75. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  76. * function to allocate memory
  77. * in multiple pages. It then iterates through the memory allocated across pages
  78. * and links each descriptor
  79. * to next descriptor, taking care of page boundaries.
  80. *
  81. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  82. * one for each ring;
  83. * This minimizes lock contention when hard_start_xmit is called
  84. * from multiple CPUs.
  85. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  86. * flow control.
  87. *
  88. * Return: Status code. 0 for success.
  89. */
  90. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  91. uint32_t num_elem)
  92. {
  93. uint32_t desc_size;
  94. struct dp_tx_desc_pool_s *tx_desc_pool;
  95. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  96. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  97. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  98. dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
  99. &tx_desc_pool->desc_pages,
  100. desc_size, num_elem,
  101. 0, true);
  102. if (!tx_desc_pool->desc_pages.num_pages) {
  103. dp_err("Multi page alloc fail, tx desc");
  104. return QDF_STATUS_E_NOMEM;
  105. }
  106. return QDF_STATUS_SUCCESS;
  107. }
  108. /**
  109. * dp_tx_desc_pool_free() - Free the tx dexcriptor pools
  110. * @soc: Handle to DP SoC structure
  111. * @pool_id: pool to free
  112. *
  113. */
  114. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  115. {
  116. struct dp_tx_desc_pool_s *tx_desc_pool;
  117. tx_desc_pool = &((soc)->tx_desc[pool_id]);
  118. if (tx_desc_pool->desc_pages.num_pages)
  119. dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
  120. &tx_desc_pool->desc_pages, 0,
  121. true);
  122. }
  123. /**
  124. * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
  125. * @soc: Handle to DP SoC structure
  126. * @pool_id: pool to allocate
  127. * @num_elem: Number of descriptor elements per pool
  128. *
  129. * Return: QDF_STATUS_SUCCESS
  130. * QDF_STATUS_E_FAULT
  131. */
  132. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  133. uint32_t num_elem)
  134. {
  135. struct dp_tx_desc_pool_s *tx_desc_pool;
  136. uint32_t desc_size;
  137. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  138. tx_desc_pool = &soc->tx_desc[pool_id];
  139. if (qdf_mem_multi_page_link(soc->osdev,
  140. &tx_desc_pool->desc_pages,
  141. desc_size, num_elem, true)) {
  142. dp_err("invalid tx desc allocation -overflow num link");
  143. return QDF_STATUS_E_FAULT;
  144. }
  145. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  146. *tx_desc_pool->desc_pages.cacheable_pages;
  147. /* Set unique IDs for each Tx descriptor */
  148. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  149. soc, num_elem, pool_id)) {
  150. dp_err("initialization per target failed");
  151. return QDF_STATUS_E_FAULT;
  152. }
  153. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  154. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  155. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  156. return QDF_STATUS_SUCCESS;
  157. }
  158. /**
  159. * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
  160. * @soc Handle to DP SoC structure
  161. * @pool_id: pool to de-initialize
  162. *
  163. */
  164. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
  165. {
  166. struct dp_tx_desc_pool_s *tx_desc_pool;
  167. tx_desc_pool = &soc->tx_desc[pool_id];
  168. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
  169. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  170. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  171. }
  172. /**
  173. * dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s)
  174. * @soc: Handle to DP SoC structure
  175. * @num_pool: Number of pools to allocate
  176. * @num_elem: Number of descriptor elements per pool
  177. *
  178. * Return - QDF_STATUS_SUCCESS
  179. * QDF_STATUS_E_NOMEM
  180. */
  181. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  182. uint32_t num_elem)
  183. {
  184. QDF_STATUS status = QDF_STATUS_SUCCESS;
  185. qdf_dma_context_t memctx = 0;
  186. uint8_t pool_id, count;
  187. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  188. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  189. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  190. /* Coherent tx extension descriptor alloc */
  191. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  192. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  193. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  194. dp_desc_multi_pages_mem_alloc(
  195. soc, DP_TX_EXT_DESC_TYPE,
  196. &dp_tx_ext_desc_pool->desc_pages,
  197. elem_size,
  198. num_elem,
  199. memctx, false);
  200. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  201. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  202. "ext desc page alloc fail");
  203. status = QDF_STATUS_E_NOMEM;
  204. goto fail_exit;
  205. }
  206. }
  207. /*
  208. * Cacheable ext descriptor link alloc
  209. * This structure also large size already
  210. * single element is 24bytes, 2K elements are 48Kbytes
  211. * Have to alloc multi page cacheable memory
  212. */
  213. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  214. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  215. dp_desc_multi_pages_mem_alloc(
  216. soc,
  217. DP_TX_EXT_DESC_LINK_TYPE,
  218. &dp_tx_ext_desc_pool->desc_link_pages,
  219. link_elem_size,
  220. num_elem,
  221. 0, true);
  222. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  223. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  224. "ext link desc page alloc fail");
  225. status = QDF_STATUS_E_NOMEM;
  226. goto free_ext_desc_page;
  227. }
  228. }
  229. return status;
  230. free_ext_desc_page:
  231. for (count = 0; count < pool_id; count++) {
  232. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  233. dp_desc_multi_pages_mem_free(
  234. soc, DP_TX_EXT_DESC_LINK_TYPE,
  235. &dp_tx_ext_desc_pool->desc_link_pages,
  236. 0, true);
  237. }
  238. pool_id = num_pool;
  239. fail_exit:
  240. for (count = 0; count < pool_id; count++) {
  241. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  242. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  243. dp_desc_multi_pages_mem_free(
  244. soc, DP_TX_EXT_DESC_TYPE,
  245. &dp_tx_ext_desc_pool->desc_pages,
  246. memctx, false);
  247. }
  248. return status;
  249. }
  250. /**
  251. * dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s)
  252. * @soc: Handle to DP SoC structure
  253. * @num_pool: Number of pools to initialize
  254. * @num_elem: Number of descriptor elements per pool
  255. *
  256. * Return - QDF_STATUS_SUCCESS
  257. * QDF_STATUS_E_NOMEM
  258. */
  259. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  260. uint32_t num_elem)
  261. {
  262. uint32_t i;
  263. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  264. struct qdf_mem_dma_page_t *page_info;
  265. struct qdf_mem_multi_page_t *pages;
  266. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  267. uint8_t pool_id;
  268. QDF_STATUS status;
  269. /* link tx descriptors into a freelist */
  270. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  271. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  272. soc->tx_ext_desc[pool_id].elem_size =
  273. HAL_TX_EXT_DESC_WITH_META_DATA;
  274. soc->tx_ext_desc[pool_id].link_elem_size =
  275. sizeof(struct dp_tx_ext_desc_elem_s);
  276. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  277. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  278. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  279. if (qdf_mem_multi_page_link(soc->osdev,
  280. &dp_tx_ext_desc_pool->
  281. desc_link_pages,
  282. dp_tx_ext_desc_pool->link_elem_size,
  283. dp_tx_ext_desc_pool->elem_count,
  284. true)) {
  285. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  286. "ext link desc page linking fail");
  287. status = QDF_STATUS_E_FAULT;
  288. goto fail;
  289. }
  290. /* Assign coherent memory pointer into linked free list */
  291. pages = &dp_tx_ext_desc_pool->desc_pages;
  292. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  293. c_elem = dp_tx_ext_desc_pool->freelist;
  294. p_elem = c_elem;
  295. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  296. if (!(i % pages->num_element_per_page)) {
  297. /**
  298. * First element for new page,
  299. * should point next page
  300. */
  301. if (!pages->dma_pages->page_v_addr_start) {
  302. QDF_TRACE(QDF_MODULE_ID_DP,
  303. QDF_TRACE_LEVEL_ERROR,
  304. "link over flow");
  305. status = QDF_STATUS_E_FAULT;
  306. goto fail;
  307. }
  308. c_elem->vaddr =
  309. (void *)page_info->page_v_addr_start;
  310. c_elem->paddr = page_info->page_p_addr;
  311. page_info++;
  312. } else {
  313. c_elem->vaddr = (void *)(p_elem->vaddr +
  314. dp_tx_ext_desc_pool->elem_size);
  315. c_elem->paddr = (p_elem->paddr +
  316. dp_tx_ext_desc_pool->elem_size);
  317. }
  318. p_elem = c_elem;
  319. c_elem = c_elem->next;
  320. if (!c_elem)
  321. break;
  322. }
  323. dp_tx_ext_desc_pool->num_free = num_elem;
  324. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  325. }
  326. return QDF_STATUS_SUCCESS;
  327. fail:
  328. return status;
  329. }
  330. /**
  331. * dp_tx_ext_desc_pool_free() - free Tx extenstion Descriptor pool(s)
  332. * @soc: Handle to DP SoC structure
  333. * @num_pool: Number of pools to free
  334. *
  335. */
  336. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  337. {
  338. uint8_t pool_id;
  339. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  340. qdf_dma_context_t memctx = 0;
  341. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  342. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  343. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  344. dp_desc_multi_pages_mem_free(
  345. soc, DP_TX_EXT_DESC_LINK_TYPE,
  346. &dp_tx_ext_desc_pool->desc_link_pages,
  347. 0, true);
  348. dp_desc_multi_pages_mem_free(
  349. soc, DP_TX_EXT_DESC_TYPE,
  350. &dp_tx_ext_desc_pool->desc_pages,
  351. memctx, false);
  352. }
  353. }
  354. /**
  355. * dp_tx_ext_desc_pool_deinit() - deinit Tx extenstion Descriptor pool(s)
  356. * @soc: Handle to DP SoC structure
  357. * @num_pool: Number of pools to de-initialize
  358. *
  359. */
  360. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  361. {
  362. uint8_t pool_id;
  363. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  364. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  365. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  366. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  367. }
  368. }
  369. #if defined(FEATURE_TSO)
  370. /**
  371. * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
  372. * @soc: Handle to DP SoC structure
  373. * @num_pool: Number of pools to allocate
  374. * @num_elem: Number of descriptor elements per pool
  375. *
  376. * Return - QDF_STATUS_SUCCESS
  377. * QDF_STATUS_E_NOMEM
  378. */
  379. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  380. uint32_t num_elem)
  381. {
  382. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  383. uint32_t desc_size, pool_id, i;
  384. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  385. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  386. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  387. tso_desc_pool->num_free = 0;
  388. dp_desc_multi_pages_mem_alloc(
  389. soc,
  390. DP_TX_TSO_DESC_TYPE,
  391. &tso_desc_pool->desc_pages,
  392. desc_size,
  393. num_elem, 0, true);
  394. if (!tso_desc_pool->desc_pages.num_pages) {
  395. dp_err("Multi page alloc fail, tx desc");
  396. goto fail;
  397. }
  398. }
  399. return QDF_STATUS_SUCCESS;
  400. fail:
  401. for (i = 0; i < pool_id; i++) {
  402. tso_desc_pool = &soc->tx_tso_desc[i];
  403. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  404. &tso_desc_pool->desc_pages,
  405. 0, true);
  406. }
  407. return QDF_STATUS_E_NOMEM;
  408. }
  409. /**
  410. * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
  411. * @soc: Handle to DP SoC structure
  412. * @num_pool: Number of pools to free
  413. *
  414. */
  415. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  416. {
  417. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  418. uint32_t pool_id;
  419. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  420. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  421. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  422. &tso_desc_pool->desc_pages,
  423. 0, true);
  424. }
  425. }
  426. /**
  427. * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
  428. * @soc: Handle to DP SoC structure
  429. * @num_pool: Number of pools to initialize
  430. * @num_elem: Number of descriptor elements per pool
  431. *
  432. * Return - QDF_STATUS_SUCCESS
  433. * QDF_STATUS_E_NOMEM
  434. */
  435. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  436. uint32_t num_elem)
  437. {
  438. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  439. uint32_t desc_size, pool_id;
  440. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  441. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  442. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  443. if (qdf_mem_multi_page_link(soc->osdev,
  444. &tso_desc_pool->desc_pages,
  445. desc_size,
  446. num_elem, true)) {
  447. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  448. "invalid tso desc allocation - overflow num link");
  449. return QDF_STATUS_E_FAULT;
  450. }
  451. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  452. *tso_desc_pool->desc_pages.cacheable_pages;
  453. tso_desc_pool->num_free = num_elem;
  454. TSO_DEBUG("Number of free descriptors: %u\n",
  455. tso_desc_pool->num_free);
  456. tso_desc_pool->pool_size = num_elem;
  457. qdf_spinlock_create(&tso_desc_pool->lock);
  458. }
  459. return QDF_STATUS_SUCCESS;
  460. }
  461. /**
  462. * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
  463. * @soc: Handle to DP SoC structure
  464. * @num_pool: Number of pools to free
  465. *
  466. */
  467. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  468. {
  469. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  470. uint32_t pool_id;
  471. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  472. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  473. qdf_spin_lock_bh(&tso_desc_pool->lock);
  474. tso_desc_pool->freelist = NULL;
  475. tso_desc_pool->num_free = 0;
  476. tso_desc_pool->pool_size = 0;
  477. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  478. qdf_spinlock_destroy(&tso_desc_pool->lock);
  479. }
  480. }
  481. /**
  482. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  483. * fragments in each tso segment
  484. *
  485. * @soc: handle to dp soc structure
  486. * @num_pool: number of pools to allocate
  487. * @num_elem: total number of descriptors to be allocated
  488. *
  489. * Return - QDF_STATUS_SUCCESS
  490. * QDF_STATUS_E_NOMEM
  491. */
  492. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  493. uint32_t num_elem)
  494. {
  495. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  496. uint32_t desc_size, pool_id, i;
  497. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  498. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  499. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  500. tso_num_seg_pool->num_free = 0;
  501. dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
  502. &tso_num_seg_pool->desc_pages,
  503. desc_size,
  504. num_elem, 0, true);
  505. if (!tso_num_seg_pool->desc_pages.num_pages) {
  506. dp_err("Multi page alloc fail, tso_num_seg_pool");
  507. goto fail;
  508. }
  509. }
  510. return QDF_STATUS_SUCCESS;
  511. fail:
  512. for (i = 0; i < pool_id; i++) {
  513. tso_num_seg_pool = &soc->tx_tso_num_seg[i];
  514. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  515. &tso_num_seg_pool->desc_pages,
  516. 0, true);
  517. }
  518. return QDF_STATUS_E_NOMEM;
  519. }
  520. /**
  521. * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
  522. * fragments in each tso segment
  523. *
  524. * @soc: handle to dp soc structure
  525. * @num_pool: number of pools to free
  526. */
  527. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  528. {
  529. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  530. uint32_t pool_id;
  531. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  532. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  533. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  534. &tso_num_seg_pool->desc_pages,
  535. 0, true);
  536. }
  537. }
  538. /**
  539. * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
  540. * fragments in each tso segment
  541. *
  542. * @soc: handle to dp soc structure
  543. * @num_pool: number of pools to initialize
  544. * @num_elem: total number of descriptors to be initialized
  545. *
  546. * Return - QDF_STATUS_SUCCESS
  547. * QDF_STATUS_E_FAULT
  548. */
  549. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  550. uint32_t num_elem)
  551. {
  552. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  553. uint32_t desc_size, pool_id;
  554. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  555. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  556. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  557. if (qdf_mem_multi_page_link(soc->osdev,
  558. &tso_num_seg_pool->desc_pages,
  559. desc_size,
  560. num_elem, true)) {
  561. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  562. "invalid tso desc allocation - overflow num link");
  563. return QDF_STATUS_E_FAULT;
  564. }
  565. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  566. *tso_num_seg_pool->desc_pages.cacheable_pages;
  567. tso_num_seg_pool->num_free = num_elem;
  568. tso_num_seg_pool->num_seg_pool_size = num_elem;
  569. qdf_spinlock_create(&tso_num_seg_pool->lock);
  570. }
  571. return QDF_STATUS_SUCCESS;
  572. }
  573. /**
  574. * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
  575. * fragments in each tso segment
  576. *
  577. * @soc: handle to dp soc structure
  578. * @num_pool: number of pools to de-initialize
  579. *
  580. * Return - QDF_STATUS_SUCCESS
  581. * QDF_STATUS_E_FAULT
  582. */
  583. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  584. {
  585. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  586. uint32_t pool_id;
  587. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  588. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  589. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  590. tso_num_seg_pool->freelist = NULL;
  591. tso_num_seg_pool->num_free = 0;
  592. tso_num_seg_pool->num_seg_pool_size = 0;
  593. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  594. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  595. }
  596. }
  597. #else
  598. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  599. uint32_t num_elem)
  600. {
  601. return QDF_STATUS_SUCCESS;
  602. }
  603. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  604. uint32_t num_elem)
  605. {
  606. return QDF_STATUS_SUCCESS;
  607. }
  608. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  609. {
  610. }
  611. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  612. {
  613. }
  614. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  615. uint32_t num_elem)
  616. {
  617. return QDF_STATUS_SUCCESS;
  618. }
  619. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  620. {
  621. }
  622. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  623. uint32_t num_elem)
  624. {
  625. return QDF_STATUS_SUCCESS;
  626. }
  627. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  628. {
  629. }
  630. #endif