dp_tx_desc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_tx_desc.h"
  22. #ifndef DESC_PARTITION
  23. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  24. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  25. do { \
  26. uint8_t sig_bit; \
  27. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  28. /* Calculate page divider to find page number */ \
  29. sig_bit = 0; \
  30. while (num_desc_per_page) { \
  31. sig_bit++; \
  32. num_desc_per_page = num_desc_per_page >> 1; \
  33. } \
  34. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  35. } while (0)
  36. #else
  37. #define DP_TX_DESC_SIZE(a) a
  38. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  39. #endif /* DESC_PARTITION */
  40. /**
  41. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  42. * @tx_desc_pool Handle to DP tx_desc_pool structure
  43. * @num_elem Number of descriptor elements per pool
  44. *
  45. * Return: None
  46. */
  47. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  48. static void
  49. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  50. uint16_t num_elem)
  51. {
  52. }
  53. #else
  54. static void
  55. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  56. uint16_t num_elem)
  57. {
  58. tx_desc_pool->elem_count = num_elem;
  59. tx_desc_pool->num_free = num_elem;
  60. tx_desc_pool->num_allocated = 0;
  61. }
  62. #endif
  63. #ifdef DP_UMAC_HW_RESET_SUPPORT
  64. /**
  65. * dp_tx_desc_clean_up() - Clean up the tx descriptors
  66. * @ctxt: context passed
  67. * @elem: element to be cleaned up
  68. * @elem_list: element list
  69. *
  70. */
  71. static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
  72. {
  73. struct dp_soc *soc = (struct dp_soc *)ctxt;
  74. struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
  75. qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
  76. qdf_nbuf_t nbuf = NULL;
  77. if (tx_desc->nbuf) {
  78. nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
  79. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  80. if (nbuf) {
  81. if (!nbuf_list) {
  82. dp_err("potential memory leak");
  83. qdf_assert_always(0);
  84. }
  85. nbuf->next = *nbuf_list;
  86. *nbuf_list = nbuf;
  87. }
  88. }
  89. }
  90. /**
  91. * dp_tx_desc_pool_cleanup() - Clean up the tx dexcriptor pools
  92. * @soc: Handle to DP SoC structure
  93. * @nbuf_list: nbuf list for delayed free
  94. *
  95. */
  96. void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  97. {
  98. int i;
  99. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  100. uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  101. for (i = 0; i < num_pool; i++) {
  102. tx_desc_pool = &soc->tx_desc[i];
  103. if (tx_desc_pool)
  104. qdf_tx_desc_pool_free_bufs(soc,
  105. &tx_desc_pool->desc_pages,
  106. tx_desc_pool->elem_size,
  107. tx_desc_pool->elem_count,
  108. true, &dp_tx_desc_clean_up,
  109. nbuf_list);
  110. }
  111. }
  112. #endif
  113. /**
  114. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  115. * @soc Handle to DP SoC structure
  116. * @pool_id pool to allocate
  117. * @num_elem Number of descriptor elements per pool
  118. *
  119. * This function allocates memory for SW tx descriptors
  120. * (used within host for tx data path).
  121. * The number of tx descriptors required will be large
  122. * since based on number of clients (1024 clients x 3 radios),
  123. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  124. * large.
  125. *
  126. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  127. * function to allocate memory
  128. * in multiple pages. It then iterates through the memory allocated across pages
  129. * and links each descriptor
  130. * to next descriptor, taking care of page boundaries.
  131. *
  132. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  133. * one for each ring;
  134. * This minimizes lock contention when hard_start_xmit is called
  135. * from multiple CPUs.
  136. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  137. * flow control.
  138. *
  139. * Return: Status code. 0 for success.
  140. */
  141. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  142. uint32_t num_elem)
  143. {
  144. uint32_t desc_size;
  145. struct dp_tx_desc_pool_s *tx_desc_pool;
  146. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  147. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  148. tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
  149. dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
  150. &tx_desc_pool->desc_pages,
  151. desc_size, num_elem,
  152. 0, true);
  153. if (!tx_desc_pool->desc_pages.num_pages) {
  154. dp_err("Multi page alloc fail, tx desc");
  155. return QDF_STATUS_E_NOMEM;
  156. }
  157. return QDF_STATUS_SUCCESS;
  158. }
  159. /**
  160. * dp_tx_desc_pool_free() - Free the tx dexcriptor pools
  161. * @soc: Handle to DP SoC structure
  162. * @pool_id: pool to free
  163. *
  164. */
  165. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  166. {
  167. struct dp_tx_desc_pool_s *tx_desc_pool;
  168. tx_desc_pool = &((soc)->tx_desc[pool_id]);
  169. if (tx_desc_pool->desc_pages.num_pages)
  170. dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
  171. &tx_desc_pool->desc_pages, 0,
  172. true);
  173. }
  174. /**
  175. * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
  176. * @soc: Handle to DP SoC structure
  177. * @pool_id: pool to allocate
  178. * @num_elem: Number of descriptor elements per pool
  179. *
  180. * Return: QDF_STATUS_SUCCESS
  181. * QDF_STATUS_E_FAULT
  182. */
  183. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  184. uint32_t num_elem)
  185. {
  186. struct dp_tx_desc_pool_s *tx_desc_pool;
  187. uint32_t desc_size;
  188. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  189. tx_desc_pool = &soc->tx_desc[pool_id];
  190. if (qdf_mem_multi_page_link(soc->osdev,
  191. &tx_desc_pool->desc_pages,
  192. desc_size, num_elem, true)) {
  193. dp_err("invalid tx desc allocation -overflow num link");
  194. return QDF_STATUS_E_FAULT;
  195. }
  196. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  197. *tx_desc_pool->desc_pages.cacheable_pages;
  198. /* Set unique IDs for each Tx descriptor */
  199. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  200. soc, num_elem, pool_id)) {
  201. dp_err("initialization per target failed");
  202. return QDF_STATUS_E_FAULT;
  203. }
  204. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  205. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  206. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  207. return QDF_STATUS_SUCCESS;
  208. }
  209. /**
  210. * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
  211. * @soc Handle to DP SoC structure
  212. * @pool_id: pool to de-initialize
  213. *
  214. */
  215. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
  216. {
  217. struct dp_tx_desc_pool_s *tx_desc_pool;
  218. tx_desc_pool = &soc->tx_desc[pool_id];
  219. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
  220. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  221. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  222. }
  223. /**
  224. * dp_tx_ext_desc_pool_alloc() - allocate Tx extension Descriptor pool(s)
  225. * @soc: Handle to DP SoC structure
  226. * @num_pool: Number of pools to allocate
  227. * @num_elem: Number of descriptor elements per pool
  228. *
  229. * Return - QDF_STATUS_SUCCESS
  230. * QDF_STATUS_E_NOMEM
  231. */
  232. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  233. uint32_t num_elem)
  234. {
  235. QDF_STATUS status = QDF_STATUS_SUCCESS;
  236. qdf_dma_context_t memctx = 0;
  237. uint8_t pool_id, count;
  238. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  239. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  240. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  241. /* Coherent tx extension descriptor alloc */
  242. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  243. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  244. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  245. dp_desc_multi_pages_mem_alloc(
  246. soc, DP_TX_EXT_DESC_TYPE,
  247. &dp_tx_ext_desc_pool->desc_pages,
  248. elem_size,
  249. num_elem,
  250. memctx, false);
  251. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  252. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  253. "ext desc page alloc fail");
  254. status = QDF_STATUS_E_NOMEM;
  255. goto fail_exit;
  256. }
  257. }
  258. /*
  259. * Cacheable ext descriptor link alloc
  260. * This structure also large size already
  261. * single element is 24bytes, 2K elements are 48Kbytes
  262. * Have to alloc multi page cacheable memory
  263. */
  264. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  265. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  266. dp_desc_multi_pages_mem_alloc(
  267. soc,
  268. DP_TX_EXT_DESC_LINK_TYPE,
  269. &dp_tx_ext_desc_pool->desc_link_pages,
  270. link_elem_size,
  271. num_elem,
  272. 0, true);
  273. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  274. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  275. "ext link desc page alloc fail");
  276. status = QDF_STATUS_E_NOMEM;
  277. goto free_ext_desc_page;
  278. }
  279. }
  280. return status;
  281. free_ext_desc_page:
  282. for (count = 0; count < pool_id; count++) {
  283. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  284. dp_desc_multi_pages_mem_free(
  285. soc, DP_TX_EXT_DESC_LINK_TYPE,
  286. &dp_tx_ext_desc_pool->desc_link_pages,
  287. 0, true);
  288. }
  289. pool_id = num_pool;
  290. fail_exit:
  291. for (count = 0; count < pool_id; count++) {
  292. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  293. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  294. dp_desc_multi_pages_mem_free(
  295. soc, DP_TX_EXT_DESC_TYPE,
  296. &dp_tx_ext_desc_pool->desc_pages,
  297. memctx, false);
  298. }
  299. return status;
  300. }
  301. /**
  302. * dp_tx_ext_desc_pool_init() - initialize Tx extension Descriptor pool(s)
  303. * @soc: Handle to DP SoC structure
  304. * @num_pool: Number of pools to initialize
  305. * @num_elem: Number of descriptor elements per pool
  306. *
  307. * Return - QDF_STATUS_SUCCESS
  308. * QDF_STATUS_E_NOMEM
  309. */
  310. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  311. uint32_t num_elem)
  312. {
  313. uint32_t i;
  314. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  315. struct qdf_mem_dma_page_t *page_info;
  316. struct qdf_mem_multi_page_t *pages;
  317. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  318. uint8_t pool_id;
  319. QDF_STATUS status;
  320. /* link tx descriptors into a freelist */
  321. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  322. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  323. soc->tx_ext_desc[pool_id].elem_size =
  324. HAL_TX_EXT_DESC_WITH_META_DATA;
  325. soc->tx_ext_desc[pool_id].link_elem_size =
  326. sizeof(struct dp_tx_ext_desc_elem_s);
  327. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  328. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  329. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  330. if (qdf_mem_multi_page_link(soc->osdev,
  331. &dp_tx_ext_desc_pool->
  332. desc_link_pages,
  333. dp_tx_ext_desc_pool->link_elem_size,
  334. dp_tx_ext_desc_pool->elem_count,
  335. true)) {
  336. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  337. "ext link desc page linking fail");
  338. status = QDF_STATUS_E_FAULT;
  339. goto fail;
  340. }
  341. /* Assign coherent memory pointer into linked free list */
  342. pages = &dp_tx_ext_desc_pool->desc_pages;
  343. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  344. c_elem = dp_tx_ext_desc_pool->freelist;
  345. p_elem = c_elem;
  346. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  347. if (!(i % pages->num_element_per_page)) {
  348. /**
  349. * First element for new page,
  350. * should point next page
  351. */
  352. if (!pages->dma_pages->page_v_addr_start) {
  353. QDF_TRACE(QDF_MODULE_ID_DP,
  354. QDF_TRACE_LEVEL_ERROR,
  355. "link over flow");
  356. status = QDF_STATUS_E_FAULT;
  357. goto fail;
  358. }
  359. c_elem->vaddr =
  360. (void *)page_info->page_v_addr_start;
  361. c_elem->paddr = page_info->page_p_addr;
  362. page_info++;
  363. } else {
  364. c_elem->vaddr = (void *)(p_elem->vaddr +
  365. dp_tx_ext_desc_pool->elem_size);
  366. c_elem->paddr = (p_elem->paddr +
  367. dp_tx_ext_desc_pool->elem_size);
  368. }
  369. p_elem = c_elem;
  370. c_elem = c_elem->next;
  371. if (!c_elem)
  372. break;
  373. }
  374. dp_tx_ext_desc_pool->num_free = num_elem;
  375. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  376. }
  377. return QDF_STATUS_SUCCESS;
  378. fail:
  379. return status;
  380. }
  381. /**
  382. * dp_tx_ext_desc_pool_free() - free Tx extension Descriptor pool(s)
  383. * @soc: Handle to DP SoC structure
  384. * @num_pool: Number of pools to free
  385. *
  386. */
  387. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  388. {
  389. uint8_t pool_id;
  390. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  391. qdf_dma_context_t memctx = 0;
  392. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  393. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  394. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  395. dp_desc_multi_pages_mem_free(
  396. soc, DP_TX_EXT_DESC_LINK_TYPE,
  397. &dp_tx_ext_desc_pool->desc_link_pages,
  398. 0, true);
  399. dp_desc_multi_pages_mem_free(
  400. soc, DP_TX_EXT_DESC_TYPE,
  401. &dp_tx_ext_desc_pool->desc_pages,
  402. memctx, false);
  403. }
  404. }
  405. /**
  406. * dp_tx_ext_desc_pool_deinit() - deinit Tx extension Descriptor pool(s)
  407. * @soc: Handle to DP SoC structure
  408. * @num_pool: Number of pools to de-initialize
  409. *
  410. */
  411. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  412. {
  413. uint8_t pool_id;
  414. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  415. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  416. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  417. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  418. }
  419. }
  420. #if defined(FEATURE_TSO)
  421. /**
  422. * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
  423. * @soc: Handle to DP SoC structure
  424. * @num_pool: Number of pools to allocate
  425. * @num_elem: Number of descriptor elements per pool
  426. *
  427. * Return - QDF_STATUS_SUCCESS
  428. * QDF_STATUS_E_NOMEM
  429. */
  430. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  431. uint32_t num_elem)
  432. {
  433. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  434. uint32_t desc_size, pool_id, i;
  435. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  436. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  437. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  438. tso_desc_pool->num_free = 0;
  439. dp_desc_multi_pages_mem_alloc(
  440. soc,
  441. DP_TX_TSO_DESC_TYPE,
  442. &tso_desc_pool->desc_pages,
  443. desc_size,
  444. num_elem, 0, true);
  445. if (!tso_desc_pool->desc_pages.num_pages) {
  446. dp_err("Multi page alloc fail, tx desc");
  447. goto fail;
  448. }
  449. }
  450. return QDF_STATUS_SUCCESS;
  451. fail:
  452. for (i = 0; i < pool_id; i++) {
  453. tso_desc_pool = &soc->tx_tso_desc[i];
  454. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  455. &tso_desc_pool->desc_pages,
  456. 0, true);
  457. }
  458. return QDF_STATUS_E_NOMEM;
  459. }
  460. /**
  461. * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
  462. * @soc: Handle to DP SoC structure
  463. * @num_pool: Number of pools to free
  464. *
  465. */
  466. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  467. {
  468. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  469. uint32_t pool_id;
  470. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  471. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  472. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  473. &tso_desc_pool->desc_pages,
  474. 0, true);
  475. }
  476. }
  477. /**
  478. * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
  479. * @soc: Handle to DP SoC structure
  480. * @num_pool: Number of pools to initialize
  481. * @num_elem: Number of descriptor elements per pool
  482. *
  483. * Return - QDF_STATUS_SUCCESS
  484. * QDF_STATUS_E_NOMEM
  485. */
  486. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  487. uint32_t num_elem)
  488. {
  489. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  490. uint32_t desc_size, pool_id;
  491. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  492. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  493. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  494. if (qdf_mem_multi_page_link(soc->osdev,
  495. &tso_desc_pool->desc_pages,
  496. desc_size,
  497. num_elem, true)) {
  498. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  499. "invalid tso desc allocation - overflow num link");
  500. return QDF_STATUS_E_FAULT;
  501. }
  502. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  503. *tso_desc_pool->desc_pages.cacheable_pages;
  504. tso_desc_pool->num_free = num_elem;
  505. TSO_DEBUG("Number of free descriptors: %u\n",
  506. tso_desc_pool->num_free);
  507. tso_desc_pool->pool_size = num_elem;
  508. qdf_spinlock_create(&tso_desc_pool->lock);
  509. }
  510. return QDF_STATUS_SUCCESS;
  511. }
  512. /**
  513. * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
  514. * @soc: Handle to DP SoC structure
  515. * @num_pool: Number of pools to free
  516. *
  517. */
  518. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  519. {
  520. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  521. uint32_t pool_id;
  522. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  523. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  524. qdf_spin_lock_bh(&tso_desc_pool->lock);
  525. tso_desc_pool->freelist = NULL;
  526. tso_desc_pool->num_free = 0;
  527. tso_desc_pool->pool_size = 0;
  528. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  529. qdf_spinlock_destroy(&tso_desc_pool->lock);
  530. }
  531. }
  532. /**
  533. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  534. * fragments in each tso segment
  535. *
  536. * @soc: handle to dp soc structure
  537. * @num_pool: number of pools to allocate
  538. * @num_elem: total number of descriptors to be allocated
  539. *
  540. * Return - QDF_STATUS_SUCCESS
  541. * QDF_STATUS_E_NOMEM
  542. */
  543. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  544. uint32_t num_elem)
  545. {
  546. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  547. uint32_t desc_size, pool_id, i;
  548. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  549. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  550. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  551. tso_num_seg_pool->num_free = 0;
  552. dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
  553. &tso_num_seg_pool->desc_pages,
  554. desc_size,
  555. num_elem, 0, true);
  556. if (!tso_num_seg_pool->desc_pages.num_pages) {
  557. dp_err("Multi page alloc fail, tso_num_seg_pool");
  558. goto fail;
  559. }
  560. }
  561. return QDF_STATUS_SUCCESS;
  562. fail:
  563. for (i = 0; i < pool_id; i++) {
  564. tso_num_seg_pool = &soc->tx_tso_num_seg[i];
  565. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  566. &tso_num_seg_pool->desc_pages,
  567. 0, true);
  568. }
  569. return QDF_STATUS_E_NOMEM;
  570. }
  571. /**
  572. * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
  573. * fragments in each tso segment
  574. *
  575. * @soc: handle to dp soc structure
  576. * @num_pool: number of pools to free
  577. */
  578. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  579. {
  580. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  581. uint32_t pool_id;
  582. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  583. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  584. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  585. &tso_num_seg_pool->desc_pages,
  586. 0, true);
  587. }
  588. }
  589. /**
  590. * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
  591. * fragments in each tso segment
  592. *
  593. * @soc: handle to dp soc structure
  594. * @num_pool: number of pools to initialize
  595. * @num_elem: total number of descriptors to be initialized
  596. *
  597. * Return - QDF_STATUS_SUCCESS
  598. * QDF_STATUS_E_FAULT
  599. */
  600. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  601. uint32_t num_elem)
  602. {
  603. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  604. uint32_t desc_size, pool_id;
  605. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  606. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  607. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  608. if (qdf_mem_multi_page_link(soc->osdev,
  609. &tso_num_seg_pool->desc_pages,
  610. desc_size,
  611. num_elem, true)) {
  612. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  613. "invalid tso desc allocation - overflow num link");
  614. return QDF_STATUS_E_FAULT;
  615. }
  616. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  617. *tso_num_seg_pool->desc_pages.cacheable_pages;
  618. tso_num_seg_pool->num_free = num_elem;
  619. tso_num_seg_pool->num_seg_pool_size = num_elem;
  620. qdf_spinlock_create(&tso_num_seg_pool->lock);
  621. }
  622. return QDF_STATUS_SUCCESS;
  623. }
  624. /**
  625. * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
  626. * fragments in each tso segment
  627. *
  628. * @soc: handle to dp soc structure
  629. * @num_pool: number of pools to de-initialize
  630. *
  631. * Return - QDF_STATUS_SUCCESS
  632. * QDF_STATUS_E_FAULT
  633. */
  634. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  635. {
  636. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  637. uint32_t pool_id;
  638. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  639. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  640. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  641. tso_num_seg_pool->freelist = NULL;
  642. tso_num_seg_pool->num_free = 0;
  643. tso_num_seg_pool->num_seg_pool_size = 0;
  644. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  645. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  646. }
  647. }
  648. #else
  649. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  650. uint32_t num_elem)
  651. {
  652. return QDF_STATUS_SUCCESS;
  653. }
  654. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  655. uint32_t num_elem)
  656. {
  657. return QDF_STATUS_SUCCESS;
  658. }
  659. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  660. {
  661. }
  662. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  663. {
  664. }
  665. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  666. uint32_t num_elem)
  667. {
  668. return QDF_STATUS_SUCCESS;
  669. }
  670. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  671. {
  672. }
  673. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  674. uint32_t num_elem)
  675. {
  676. return QDF_STATUS_SUCCESS;
  677. }
  678. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  679. {
  680. }
  681. #endif