dp_tx_desc.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_tx_desc.h"
  21. #ifndef DESC_PARTITION
  22. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  23. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  24. do { \
  25. uint8_t sig_bit; \
  26. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  27. /* Calculate page divider to find page number */ \
  28. sig_bit = 0; \
  29. while (num_desc_per_page) { \
  30. sig_bit++; \
  31. num_desc_per_page = num_desc_per_page >> 1; \
  32. } \
  33. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  34. } while (0)
  35. #else
  36. #define DP_TX_DESC_SIZE(a) a
  37. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  38. #endif /* DESC_PARTITION */
  39. /**
  40. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  41. * @tx_desc_pool Handle to DP tx_desc_pool structure
  42. * @num_elem Number of descriptor elements per pool
  43. *
  44. * Return: None
  45. */
  46. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  47. static void
  48. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  49. uint16_t num_elem)
  50. {
  51. }
  52. #else
  53. static void
  54. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  55. uint16_t num_elem)
  56. {
  57. tx_desc_pool->num_free = num_elem;
  58. tx_desc_pool->num_allocated = 0;
  59. }
  60. #endif
  61. /**
  62. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  63. * @soc Handle to DP SoC structure
  64. * @pool_id pool to allocate
  65. * @num_elem Number of descriptor elements per pool
  66. *
  67. * This function allocates memory for SW tx descriptors
  68. * (used within host for tx data path).
  69. * The number of tx descriptors required will be large
  70. * since based on number of clients (1024 clients x 3 radios),
  71. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  72. * large.
  73. *
  74. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  75. * function to allocate memory
  76. * in multiple pages. It then iterates through the memory allocated across pages
  77. * and links each descriptor
  78. * to next descriptor, taking care of page boundaries.
  79. *
  80. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  81. * one for each ring;
  82. * This minimizes lock contention when hard_start_xmit is called
  83. * from multiple CPUs.
  84. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  85. * flow control.
  86. *
  87. * Return: Status code. 0 for success.
  88. */
  89. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  90. uint16_t num_elem)
  91. {
  92. uint32_t desc_size;
  93. struct dp_tx_desc_pool_s *tx_desc_pool;
  94. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  95. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  96. dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
  97. &tx_desc_pool->desc_pages,
  98. desc_size, num_elem,
  99. 0, true);
  100. if (!tx_desc_pool->desc_pages.num_pages) {
  101. dp_err("Multi page alloc fail, tx desc");
  102. return QDF_STATUS_E_NOMEM;
  103. }
  104. return QDF_STATUS_SUCCESS;
  105. }
  106. /**
  107. * dp_tx_desc_pool_free() - Free the tx dexcriptor pools
  108. * @soc: Handle to DP SoC structure
  109. * @pool_id: pool to free
  110. *
  111. */
  112. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  113. {
  114. struct dp_tx_desc_pool_s *tx_desc_pool;
  115. tx_desc_pool = &((soc)->tx_desc[pool_id]);
  116. if (tx_desc_pool->desc_pages.num_pages)
  117. dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
  118. &tx_desc_pool->desc_pages, 0,
  119. true);
  120. }
  121. /**
  122. * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
  123. * @soc: Handle to DP SoC structure
  124. * @pool_id: pool to allocate
  125. * @num_elem: Number of descriptor elements per pool
  126. *
  127. * Return: QDF_STATUS_SUCCESS
  128. * QDF_STATUS_E_FAULT
  129. */
  130. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  131. uint16_t num_elem)
  132. {
  133. struct dp_tx_desc_pool_s *tx_desc_pool;
  134. uint32_t desc_size;
  135. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  136. tx_desc_pool = &soc->tx_desc[pool_id];
  137. if (qdf_mem_multi_page_link(soc->osdev,
  138. &tx_desc_pool->desc_pages,
  139. desc_size, num_elem, true)) {
  140. dp_err("invalid tx desc allocation -overflow num link");
  141. return QDF_STATUS_E_FAULT;
  142. }
  143. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  144. *tx_desc_pool->desc_pages.cacheable_pages;
  145. /* Set unique IDs for each Tx descriptor */
  146. if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
  147. soc, num_elem, pool_id)) {
  148. dp_err("initialization per target failed");
  149. return QDF_STATUS_E_FAULT;
  150. }
  151. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  152. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  153. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  154. return QDF_STATUS_SUCCESS;
  155. }
  156. /**
  157. * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
  158. * @soc Handle to DP SoC structure
  159. * @pool_id: pool to de-initialize
  160. *
  161. */
  162. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
  163. {
  164. struct dp_tx_desc_pool_s *tx_desc_pool;
  165. tx_desc_pool = &soc->tx_desc[pool_id];
  166. soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool, pool_id);
  167. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  168. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  169. }
  170. /**
  171. * dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s)
  172. * @soc: Handle to DP SoC structure
  173. * @num_pool: Number of pools to allocate
  174. * @num_elem: Number of descriptor elements per pool
  175. *
  176. * Return - QDF_STATUS_SUCCESS
  177. * QDF_STATUS_E_NOMEM
  178. */
  179. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  180. uint16_t num_elem)
  181. {
  182. QDF_STATUS status = QDF_STATUS_SUCCESS;
  183. qdf_dma_context_t memctx = 0;
  184. uint8_t pool_id, count;
  185. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  186. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  187. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  188. /* Coherent tx extension descriptor alloc */
  189. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  190. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  191. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  192. dp_desc_multi_pages_mem_alloc(
  193. soc, DP_TX_EXT_DESC_TYPE,
  194. &dp_tx_ext_desc_pool->desc_pages,
  195. elem_size,
  196. num_elem,
  197. memctx, false);
  198. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  199. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  200. "ext desc page alloc fail");
  201. status = QDF_STATUS_E_NOMEM;
  202. goto fail_exit;
  203. }
  204. }
  205. /*
  206. * Cacheable ext descriptor link alloc
  207. * This structure also large size already
  208. * single element is 24bytes, 2K elements are 48Kbytes
  209. * Have to alloc multi page cacheable memory
  210. */
  211. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  212. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  213. dp_desc_multi_pages_mem_alloc(
  214. soc,
  215. DP_TX_EXT_DESC_LINK_TYPE,
  216. &dp_tx_ext_desc_pool->desc_link_pages,
  217. link_elem_size,
  218. num_elem,
  219. 0, true);
  220. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  221. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  222. "ext link desc page alloc fail");
  223. status = QDF_STATUS_E_NOMEM;
  224. goto free_ext_desc_page;
  225. }
  226. }
  227. return status;
  228. free_ext_desc_page:
  229. for (count = 0; count < pool_id; count++) {
  230. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  231. dp_desc_multi_pages_mem_free(
  232. soc, DP_TX_EXT_DESC_LINK_TYPE,
  233. &dp_tx_ext_desc_pool->desc_link_pages,
  234. 0, true);
  235. }
  236. pool_id = num_pool;
  237. fail_exit:
  238. for (count = 0; count < pool_id; count++) {
  239. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  240. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  241. dp_desc_multi_pages_mem_free(
  242. soc, DP_TX_EXT_DESC_TYPE,
  243. &dp_tx_ext_desc_pool->desc_pages,
  244. memctx, false);
  245. }
  246. return status;
  247. }
  248. /**
  249. * dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s)
  250. * @soc: Handle to DP SoC structure
  251. * @num_pool: Number of pools to initialize
  252. * @num_elem: Number of descriptor elements per pool
  253. *
  254. * Return - QDF_STATUS_SUCCESS
  255. * QDF_STATUS_E_NOMEM
  256. */
  257. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  258. uint16_t num_elem)
  259. {
  260. uint32_t i;
  261. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  262. struct qdf_mem_dma_page_t *page_info;
  263. struct qdf_mem_multi_page_t *pages;
  264. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  265. uint8_t pool_id;
  266. QDF_STATUS status;
  267. /* link tx descriptors into a freelist */
  268. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  269. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  270. soc->tx_ext_desc[pool_id].elem_size =
  271. HAL_TX_EXT_DESC_WITH_META_DATA;
  272. soc->tx_ext_desc[pool_id].link_elem_size =
  273. sizeof(struct dp_tx_ext_desc_elem_s);
  274. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  275. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  276. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  277. if (qdf_mem_multi_page_link(soc->osdev,
  278. &dp_tx_ext_desc_pool->
  279. desc_link_pages,
  280. dp_tx_ext_desc_pool->link_elem_size,
  281. dp_tx_ext_desc_pool->elem_count,
  282. true)) {
  283. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  284. "ext link desc page linking fail");
  285. status = QDF_STATUS_E_FAULT;
  286. goto fail;
  287. }
  288. /* Assign coherent memory pointer into linked free list */
  289. pages = &dp_tx_ext_desc_pool->desc_pages;
  290. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  291. c_elem = dp_tx_ext_desc_pool->freelist;
  292. p_elem = c_elem;
  293. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  294. if (!(i % pages->num_element_per_page)) {
  295. /**
  296. * First element for new page,
  297. * should point next page
  298. */
  299. if (!pages->dma_pages->page_v_addr_start) {
  300. QDF_TRACE(QDF_MODULE_ID_DP,
  301. QDF_TRACE_LEVEL_ERROR,
  302. "link over flow");
  303. status = QDF_STATUS_E_FAULT;
  304. goto fail;
  305. }
  306. c_elem->vaddr =
  307. (void *)page_info->page_v_addr_start;
  308. c_elem->paddr = page_info->page_p_addr;
  309. page_info++;
  310. } else {
  311. c_elem->vaddr = (void *)(p_elem->vaddr +
  312. dp_tx_ext_desc_pool->elem_size);
  313. c_elem->paddr = (p_elem->paddr +
  314. dp_tx_ext_desc_pool->elem_size);
  315. }
  316. p_elem = c_elem;
  317. c_elem = c_elem->next;
  318. if (!c_elem)
  319. break;
  320. }
  321. dp_tx_ext_desc_pool->num_free = num_elem;
  322. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  323. }
  324. return QDF_STATUS_SUCCESS;
  325. fail:
  326. return status;
  327. }
  328. /**
  329. * dp_tx_ext_desc_pool_free() - free Tx extenstion Descriptor pool(s)
  330. * @soc: Handle to DP SoC structure
  331. * @num_pool: Number of pools to free
  332. *
  333. */
  334. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  335. {
  336. uint8_t pool_id;
  337. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  338. qdf_dma_context_t memctx = 0;
  339. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  340. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  341. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  342. dp_desc_multi_pages_mem_free(
  343. soc, DP_TX_EXT_DESC_LINK_TYPE,
  344. &dp_tx_ext_desc_pool->desc_link_pages,
  345. 0, true);
  346. dp_desc_multi_pages_mem_free(
  347. soc, DP_TX_EXT_DESC_TYPE,
  348. &dp_tx_ext_desc_pool->desc_pages,
  349. memctx, false);
  350. }
  351. }
  352. /**
  353. * dp_tx_ext_desc_pool_deinit() - deinit Tx extenstion Descriptor pool(s)
  354. * @soc: Handle to DP SoC structure
  355. * @num_pool: Number of pools to de-initialize
  356. *
  357. */
  358. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  359. {
  360. uint8_t pool_id;
  361. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  362. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  363. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  364. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  365. }
  366. }
  367. #if defined(FEATURE_TSO)
  368. /**
  369. * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
  370. * @soc: Handle to DP SoC structure
  371. * @num_pool: Number of pools to allocate
  372. * @num_elem: Number of descriptor elements per pool
  373. *
  374. * Return - QDF_STATUS_SUCCESS
  375. * QDF_STATUS_E_NOMEM
  376. */
  377. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  378. uint16_t num_elem)
  379. {
  380. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  381. uint32_t desc_size, pool_id, i;
  382. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  383. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  384. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  385. tso_desc_pool->num_free = 0;
  386. dp_desc_multi_pages_mem_alloc(
  387. soc,
  388. DP_TX_TSO_DESC_TYPE,
  389. &tso_desc_pool->desc_pages,
  390. desc_size,
  391. num_elem, 0, true);
  392. if (!tso_desc_pool->desc_pages.num_pages) {
  393. dp_err("Multi page alloc fail, tx desc");
  394. goto fail;
  395. }
  396. }
  397. return QDF_STATUS_SUCCESS;
  398. fail:
  399. for (i = 0; i < pool_id; i++) {
  400. tso_desc_pool = &soc->tx_tso_desc[i];
  401. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  402. &tso_desc_pool->desc_pages,
  403. 0, true);
  404. }
  405. return QDF_STATUS_E_NOMEM;
  406. }
  407. /**
  408. * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
  409. * @soc: Handle to DP SoC structure
  410. * @num_pool: Number of pools to free
  411. *
  412. */
  413. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  414. {
  415. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  416. uint32_t pool_id;
  417. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  418. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  419. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  420. &tso_desc_pool->desc_pages,
  421. 0, true);
  422. }
  423. }
  424. /**
  425. * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
  426. * @soc: Handle to DP SoC structure
  427. * @num_pool: Number of pools to initialize
  428. * @num_elem: Number of descriptor elements per pool
  429. *
  430. * Return - QDF_STATUS_SUCCESS
  431. * QDF_STATUS_E_NOMEM
  432. */
  433. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  434. uint16_t num_elem)
  435. {
  436. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  437. uint32_t desc_size, pool_id;
  438. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  439. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  440. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  441. if (qdf_mem_multi_page_link(soc->osdev,
  442. &tso_desc_pool->desc_pages,
  443. desc_size,
  444. num_elem, true)) {
  445. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  446. "invalid tso desc allocation - overflow num link");
  447. return QDF_STATUS_E_FAULT;
  448. }
  449. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  450. *tso_desc_pool->desc_pages.cacheable_pages;
  451. tso_desc_pool->num_free = num_elem;
  452. TSO_DEBUG("Number of free descriptors: %u\n",
  453. tso_desc_pool->num_free);
  454. tso_desc_pool->pool_size = num_elem;
  455. qdf_spinlock_create(&tso_desc_pool->lock);
  456. }
  457. return QDF_STATUS_SUCCESS;
  458. }
  459. /**
  460. * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
  461. * @soc: Handle to DP SoC structure
  462. * @num_pool: Number of pools to free
  463. *
  464. */
  465. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  466. {
  467. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  468. uint32_t pool_id;
  469. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  470. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  471. qdf_spin_lock_bh(&tso_desc_pool->lock);
  472. tso_desc_pool->freelist = NULL;
  473. tso_desc_pool->num_free = 0;
  474. tso_desc_pool->pool_size = 0;
  475. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  476. qdf_spinlock_destroy(&tso_desc_pool->lock);
  477. }
  478. }
  479. /**
  480. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  481. * fragments in each tso segment
  482. *
  483. * @soc: handle to dp soc structure
  484. * @num_pool: number of pools to allocate
  485. * @num_elem: total number of descriptors to be allocated
  486. *
  487. * Return - QDF_STATUS_SUCCESS
  488. * QDF_STATUS_E_NOMEM
  489. */
  490. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  491. uint16_t num_elem)
  492. {
  493. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  494. uint32_t desc_size, pool_id, i;
  495. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  496. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  497. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  498. tso_num_seg_pool->num_free = 0;
  499. dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
  500. &tso_num_seg_pool->desc_pages,
  501. desc_size,
  502. num_elem, 0, true);
  503. if (!tso_num_seg_pool->desc_pages.num_pages) {
  504. dp_err("Multi page alloc fail, tso_num_seg_pool");
  505. goto fail;
  506. }
  507. }
  508. return QDF_STATUS_SUCCESS;
  509. fail:
  510. for (i = 0; i < pool_id; i++) {
  511. tso_num_seg_pool = &soc->tx_tso_num_seg[i];
  512. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  513. &tso_num_seg_pool->desc_pages,
  514. 0, true);
  515. }
  516. return QDF_STATUS_E_NOMEM;
  517. }
  518. /**
  519. * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
  520. * fragments in each tso segment
  521. *
  522. * @soc: handle to dp soc structure
  523. * @num_pool: number of pools to free
  524. */
  525. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  526. {
  527. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  528. uint32_t pool_id;
  529. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  530. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  531. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  532. &tso_num_seg_pool->desc_pages,
  533. 0, true);
  534. }
  535. }
  536. /**
  537. * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
  538. * fragments in each tso segment
  539. *
  540. * @soc: handle to dp soc structure
  541. * @num_pool: number of pools to initialize
  542. * @num_elem: total number of descriptors to be initialized
  543. *
  544. * Return - QDF_STATUS_SUCCESS
  545. * QDF_STATUS_E_FAULT
  546. */
  547. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  548. uint16_t num_elem)
  549. {
  550. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  551. uint32_t desc_size, pool_id;
  552. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  553. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  554. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  555. if (qdf_mem_multi_page_link(soc->osdev,
  556. &tso_num_seg_pool->desc_pages,
  557. desc_size,
  558. num_elem, true)) {
  559. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  560. "invalid tso desc allocation - overflow num link");
  561. return QDF_STATUS_E_FAULT;
  562. }
  563. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  564. *tso_num_seg_pool->desc_pages.cacheable_pages;
  565. tso_num_seg_pool->num_free = num_elem;
  566. tso_num_seg_pool->num_seg_pool_size = num_elem;
  567. qdf_spinlock_create(&tso_num_seg_pool->lock);
  568. }
  569. return QDF_STATUS_SUCCESS;
  570. }
  571. /**
  572. * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
  573. * fragments in each tso segment
  574. *
  575. * @soc: handle to dp soc structure
  576. * @num_pool: number of pools to de-initialize
  577. *
  578. * Return - QDF_STATUS_SUCCESS
  579. * QDF_STATUS_E_FAULT
  580. */
  581. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  582. {
  583. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  584. uint32_t pool_id;
  585. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  586. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  587. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  588. tso_num_seg_pool->freelist = NULL;
  589. tso_num_seg_pool->num_free = 0;
  590. tso_num_seg_pool->num_seg_pool_size = 0;
  591. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  592. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  593. }
  594. }
  595. #else
  596. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  597. uint16_t num_elem)
  598. {
  599. return QDF_STATUS_SUCCESS;
  600. }
  601. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  602. uint16_t num_elem)
  603. {
  604. return QDF_STATUS_SUCCESS;
  605. }
  606. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  607. {
  608. }
  609. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  610. {
  611. }
  612. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  613. uint16_t num_elem)
  614. {
  615. return QDF_STATUS_SUCCESS;
  616. }
  617. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  618. {
  619. }
  620. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  621. uint16_t num_elem)
  622. {
  623. return QDF_STATUS_SUCCESS;
  624. }
  625. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  626. {
  627. }
  628. #endif