dp_tx_desc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_tx_desc.h"
  21. #ifndef DESC_PARTITION
  22. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  23. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  24. do { \
  25. uint8_t sig_bit; \
  26. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  27. /* Calculate page divider to find page number */ \
  28. sig_bit = 0; \
  29. while (num_desc_per_page) { \
  30. sig_bit++; \
  31. num_desc_per_page = num_desc_per_page >> 1; \
  32. } \
  33. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  34. } while (0)
  35. #else
  36. #define DP_TX_DESC_SIZE(a) a
  37. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  38. #endif /* DESC_PARTITION */
  39. /**
  40. * dp_tx_desc_pool_counter_initialize() - Initialize counters
  41. * @tx_desc_pool Handle to DP tx_desc_pool structure
  42. * @num_elem Number of descriptor elements per pool
  43. *
  44. * Return: None
  45. */
  46. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  47. static void
  48. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  49. uint16_t num_elem)
  50. {
  51. }
  52. #else
  53. static void
  54. dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
  55. uint16_t num_elem)
  56. {
  57. tx_desc_pool->num_free = num_elem;
  58. tx_desc_pool->num_allocated = 0;
  59. }
  60. #endif
  61. /**
  62. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  63. * @soc Handle to DP SoC structure
  64. * @pool_id pool to allocate
  65. * @num_elem Number of descriptor elements per pool
  66. *
  67. * This function allocates memory for SW tx descriptors
  68. * (used within host for tx data path).
  69. * The number of tx descriptors required will be large
  70. * since based on number of clients (1024 clients x 3 radios),
  71. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  72. * large.
  73. *
  74. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  75. * function to allocate memory
  76. * in multiple pages. It then iterates through the memory allocated across pages
  77. * and links each descriptor
  78. * to next descriptor, taking care of page boundaries.
  79. *
  80. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  81. * one for each ring;
  82. * This minimizes lock contention when hard_start_xmit is called
  83. * from multiple CPUs.
  84. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  85. * flow control.
  86. *
  87. * Return: Status code. 0 for success.
  88. */
  89. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  90. uint16_t num_elem)
  91. {
  92. uint32_t desc_size;
  93. struct dp_tx_desc_pool_s *tx_desc_pool;
  94. desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
  95. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  96. dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
  97. &tx_desc_pool->desc_pages,
  98. desc_size, num_elem,
  99. 0, true);
  100. if (!tx_desc_pool->desc_pages.num_pages) {
  101. dp_err("Multi page alloc fail, tx desc");
  102. return QDF_STATUS_E_NOMEM;
  103. }
  104. return QDF_STATUS_SUCCESS;
  105. }
  106. /**
  107. * dp_tx_desc_pool_free() - Free the tx dexcriptor pools
  108. * @soc: Handle to DP SoC structure
  109. * @pool_id: pool to free
  110. *
  111. */
  112. void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  113. {
  114. struct dp_tx_desc_pool_s *tx_desc_pool;
  115. tx_desc_pool = &((soc)->tx_desc[pool_id]);
  116. if (tx_desc_pool->desc_pages.num_pages)
  117. dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
  118. &tx_desc_pool->desc_pages, 0,
  119. true);
  120. }
  121. /**
  122. * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
  123. * @soc: Handle to DP SoC structure
  124. * @pool_id: pool to allocate
  125. * @num_elem: Number of descriptor elements per pool
  126. *
  127. * Return: QDF_STATUS_SUCCESS
  128. * QDF_STATUS_E_FAULT
  129. */
  130. QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
  131. uint16_t num_elem)
  132. {
  133. uint32_t id, count, page_id, offset, pool_id_32;
  134. struct dp_tx_desc_pool_s *tx_desc_pool;
  135. struct dp_tx_desc_s *tx_desc_elem;
  136. uint16_t num_desc_per_page;
  137. uint32_t desc_size;
  138. desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
  139. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  140. if (qdf_mem_multi_page_link(soc->osdev,
  141. &tx_desc_pool->desc_pages,
  142. desc_size, num_elem, true)) {
  143. dp_err("invalid tx desc allocation -overflow num link");
  144. return QDF_STATUS_E_FAULT;
  145. }
  146. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  147. *tx_desc_pool->desc_pages.cacheable_pages;
  148. /* Set unique IDs for each Tx descriptor */
  149. tx_desc_elem = tx_desc_pool->freelist;
  150. count = 0;
  151. pool_id_32 = (uint32_t)pool_id;
  152. num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
  153. while (tx_desc_elem) {
  154. page_id = count / num_desc_per_page;
  155. offset = count % num_desc_per_page;
  156. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  157. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  158. tx_desc_elem->id = id;
  159. tx_desc_elem->pool_id = pool_id;
  160. tx_desc_elem = tx_desc_elem->next;
  161. count++;
  162. }
  163. tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
  164. dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
  165. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  166. return QDF_STATUS_SUCCESS;
  167. }
  168. /**
  169. * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
  170. * @soc Handle to DP SoC structure
  171. * @pool_id: pool to de-initialize
  172. *
  173. */
  174. void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
  175. {
  176. struct dp_tx_desc_pool_s *tx_desc_pool;
  177. tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  178. TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
  179. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  180. }
  181. /**
  182. * dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s)
  183. * @soc: Handle to DP SoC structure
  184. * @num_pool: Number of pools to allocate
  185. * @num_elem: Number of descriptor elements per pool
  186. *
  187. * Return - QDF_STATUS_SUCCESS
  188. * QDF_STATUS_E_NOMEM
  189. */
  190. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  191. uint16_t num_elem)
  192. {
  193. QDF_STATUS status = QDF_STATUS_SUCCESS;
  194. qdf_dma_context_t memctx = 0;
  195. uint8_t pool_id, count;
  196. uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  197. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  198. uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
  199. /* Coherent tx extension descriptor alloc */
  200. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  201. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  202. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  203. dp_desc_multi_pages_mem_alloc(
  204. soc, DP_TX_EXT_DESC_TYPE,
  205. &dp_tx_ext_desc_pool->desc_pages,
  206. elem_size,
  207. num_elem,
  208. memctx, false);
  209. if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
  210. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  211. "ext desc page alloc fail");
  212. status = QDF_STATUS_E_NOMEM;
  213. goto fail_exit;
  214. }
  215. }
  216. /*
  217. * Cacheable ext descriptor link alloc
  218. * This structure also large size already
  219. * single element is 24bytes, 2K elements are 48Kbytes
  220. * Have to alloc multi page cacheable memory
  221. */
  222. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  223. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  224. dp_desc_multi_pages_mem_alloc(
  225. soc,
  226. DP_TX_EXT_DESC_LINK_TYPE,
  227. &dp_tx_ext_desc_pool->desc_link_pages,
  228. link_elem_size,
  229. num_elem,
  230. 0, true);
  231. if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
  232. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  233. "ext link desc page alloc fail");
  234. status = QDF_STATUS_E_NOMEM;
  235. goto free_ext_desc_page;
  236. }
  237. }
  238. return status;
  239. free_ext_desc_page:
  240. for (count = 0; count < pool_id; count++) {
  241. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  242. dp_desc_multi_pages_mem_free(
  243. soc, DP_TX_EXT_DESC_LINK_TYPE,
  244. &dp_tx_ext_desc_pool->desc_link_pages,
  245. 0, true);
  246. }
  247. pool_id = num_pool;
  248. fail_exit:
  249. for (count = 0; count < pool_id; count++) {
  250. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[count]);
  251. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  252. dp_desc_multi_pages_mem_free(
  253. soc, DP_TX_EXT_DESC_TYPE,
  254. &dp_tx_ext_desc_pool->desc_pages,
  255. memctx, false);
  256. }
  257. return status;
  258. }
  259. /**
  260. * dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s)
  261. * @soc: Handle to DP SoC structure
  262. * @num_pool: Number of pools to initialize
  263. * @num_elem: Number of descriptor elements per pool
  264. *
  265. * Return - QDF_STATUS_SUCCESS
  266. * QDF_STATUS_E_NOMEM
  267. */
  268. QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  269. uint16_t num_elem)
  270. {
  271. uint32_t i;
  272. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  273. struct qdf_mem_dma_page_t *page_info;
  274. struct qdf_mem_multi_page_t *pages;
  275. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  276. uint8_t pool_id;
  277. QDF_STATUS status;
  278. /* link tx descriptors into a freelist */
  279. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  280. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  281. soc->tx_ext_desc[pool_id].elem_size =
  282. HAL_TX_EXT_DESC_WITH_META_DATA;
  283. soc->tx_ext_desc[pool_id].link_elem_size =
  284. sizeof(struct dp_tx_ext_desc_elem_s);
  285. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  286. dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
  287. *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
  288. if (qdf_mem_multi_page_link(soc->osdev,
  289. &dp_tx_ext_desc_pool->
  290. desc_link_pages,
  291. dp_tx_ext_desc_pool->link_elem_size,
  292. dp_tx_ext_desc_pool->elem_count,
  293. true)) {
  294. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  295. "ext link desc page linking fail");
  296. status = QDF_STATUS_E_FAULT;
  297. goto fail;
  298. }
  299. /* Assign coherent memory pointer into linked free list */
  300. pages = &dp_tx_ext_desc_pool->desc_pages;
  301. page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
  302. c_elem = dp_tx_ext_desc_pool->freelist;
  303. p_elem = c_elem;
  304. for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
  305. if (!(i % pages->num_element_per_page)) {
  306. /**
  307. * First element for new page,
  308. * should point next page
  309. */
  310. if (!pages->dma_pages->page_v_addr_start) {
  311. QDF_TRACE(QDF_MODULE_ID_DP,
  312. QDF_TRACE_LEVEL_ERROR,
  313. "link over flow");
  314. status = QDF_STATUS_E_FAULT;
  315. goto fail;
  316. }
  317. c_elem->vaddr =
  318. (void *)page_info->page_v_addr_start;
  319. c_elem->paddr = page_info->page_p_addr;
  320. page_info++;
  321. } else {
  322. c_elem->vaddr = (void *)(p_elem->vaddr +
  323. dp_tx_ext_desc_pool->elem_size);
  324. c_elem->paddr = (p_elem->paddr +
  325. dp_tx_ext_desc_pool->elem_size);
  326. }
  327. p_elem = c_elem;
  328. c_elem = c_elem->next;
  329. if (!c_elem)
  330. break;
  331. }
  332. dp_tx_ext_desc_pool->num_free = num_elem;
  333. qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
  334. }
  335. return QDF_STATUS_SUCCESS;
  336. fail:
  337. return status;
  338. }
  339. /**
  340. * dp_tx_ext_desc_pool_free() - free Tx extenstion Descriptor pool(s)
  341. * @soc: Handle to DP SoC structure
  342. * @num_pool: Number of pools to free
  343. *
  344. */
  345. void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  346. {
  347. uint8_t pool_id;
  348. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  349. qdf_dma_context_t memctx = 0;
  350. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  351. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  352. memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
  353. dp_desc_multi_pages_mem_free(
  354. soc, DP_TX_EXT_DESC_LINK_TYPE,
  355. &dp_tx_ext_desc_pool->desc_link_pages,
  356. 0, true);
  357. dp_desc_multi_pages_mem_free(
  358. soc, DP_TX_EXT_DESC_TYPE,
  359. &dp_tx_ext_desc_pool->desc_pages,
  360. memctx, false);
  361. }
  362. }
  363. /**
  364. * dp_tx_ext_desc_pool_deinit() - deinit Tx extenstion Descriptor pool(s)
  365. * @soc: Handle to DP SoC structure
  366. * @num_pool: Number of pools to de-initialize
  367. *
  368. */
  369. void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  370. {
  371. uint8_t pool_id;
  372. struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
  373. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  374. dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
  375. qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
  376. }
  377. }
  378. #if defined(FEATURE_TSO)
  379. /**
  380. * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
  381. * @soc: Handle to DP SoC structure
  382. * @num_pool: Number of pools to allocate
  383. * @num_elem: Number of descriptor elements per pool
  384. *
  385. * Return - QDF_STATUS_SUCCESS
  386. * QDF_STATUS_E_NOMEM
  387. */
  388. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  389. uint16_t num_elem)
  390. {
  391. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  392. uint32_t desc_size, pool_id, i;
  393. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  394. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  395. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  396. tso_desc_pool->num_free = 0;
  397. dp_desc_multi_pages_mem_alloc(
  398. soc,
  399. DP_TX_TSO_DESC_TYPE,
  400. &tso_desc_pool->desc_pages,
  401. desc_size,
  402. num_elem, 0, true);
  403. if (!tso_desc_pool->desc_pages.num_pages) {
  404. dp_err("Multi page alloc fail, tx desc");
  405. goto fail;
  406. }
  407. }
  408. return QDF_STATUS_SUCCESS;
  409. fail:
  410. for (i = 0; i < pool_id; i++) {
  411. tso_desc_pool = &soc->tx_tso_desc[i];
  412. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  413. &tso_desc_pool->desc_pages,
  414. 0, true);
  415. }
  416. return QDF_STATUS_E_NOMEM;
  417. }
  418. /**
  419. * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
  420. * @soc: Handle to DP SoC structure
  421. * @num_pool: Number of pools to free
  422. *
  423. */
  424. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  425. {
  426. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  427. uint32_t pool_id;
  428. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  429. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  430. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
  431. &tso_desc_pool->desc_pages,
  432. 0, true);
  433. }
  434. }
  435. /**
  436. * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
  437. * @soc: Handle to DP SoC structure
  438. * @num_pool: Number of pools to initialize
  439. * @num_elem: Number of descriptor elements per pool
  440. *
  441. * Return - QDF_STATUS_SUCCESS
  442. * QDF_STATUS_E_NOMEM
  443. */
  444. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  445. uint16_t num_elem)
  446. {
  447. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  448. uint32_t desc_size, pool_id;
  449. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
  450. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  451. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  452. if (qdf_mem_multi_page_link(soc->osdev,
  453. &tso_desc_pool->desc_pages,
  454. desc_size,
  455. num_elem, true)) {
  456. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  457. "invalid tso desc allocation - overflow num link");
  458. return QDF_STATUS_E_FAULT;
  459. }
  460. tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
  461. *tso_desc_pool->desc_pages.cacheable_pages;
  462. tso_desc_pool->num_free = num_elem;
  463. TSO_DEBUG("Number of free descriptors: %u\n",
  464. tso_desc_pool->num_free);
  465. tso_desc_pool->pool_size = num_elem;
  466. qdf_spinlock_create(&tso_desc_pool->lock);
  467. }
  468. return QDF_STATUS_SUCCESS;
  469. }
  470. /**
  471. * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
  472. * @soc: Handle to DP SoC structure
  473. * @num_pool: Number of pools to free
  474. *
  475. */
  476. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  477. {
  478. struct dp_tx_tso_seg_pool_s *tso_desc_pool;
  479. uint32_t pool_id;
  480. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  481. tso_desc_pool = &soc->tx_tso_desc[pool_id];
  482. qdf_spin_lock_bh(&tso_desc_pool->lock);
  483. tso_desc_pool->freelist = NULL;
  484. tso_desc_pool->num_free = 0;
  485. tso_desc_pool->pool_size = 0;
  486. qdf_spin_unlock_bh(&tso_desc_pool->lock);
  487. qdf_spinlock_destroy(&tso_desc_pool->lock);
  488. }
  489. }
  490. /**
  491. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  492. * fragments in each tso segment
  493. *
  494. * @soc: handle to dp soc structure
  495. * @num_pool: number of pools to allocate
  496. * @num_elem: total number of descriptors to be allocated
  497. *
  498. * Return - QDF_STATUS_SUCCESS
  499. * QDF_STATUS_E_NOMEM
  500. */
  501. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  502. uint16_t num_elem)
  503. {
  504. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  505. uint32_t desc_size, pool_id, i;
  506. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  507. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  508. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  509. tso_num_seg_pool->num_free = 0;
  510. dp_desc_multi_pages_mem_alloc(soc, DP_TX_TSO_NUM_SEG_TYPE,
  511. &tso_num_seg_pool->desc_pages,
  512. desc_size,
  513. num_elem, 0, true);
  514. if (!tso_num_seg_pool->desc_pages.num_pages) {
  515. dp_err("Multi page alloc fail, tso_num_seg_pool");
  516. goto fail;
  517. }
  518. }
  519. return QDF_STATUS_SUCCESS;
  520. fail:
  521. for (i = 0; i < pool_id; i++) {
  522. tso_num_seg_pool = &soc->tx_tso_num_seg[i];
  523. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  524. &tso_num_seg_pool->desc_pages,
  525. 0, true);
  526. }
  527. return QDF_STATUS_E_NOMEM;
  528. }
  529. /**
  530. * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
  531. * fragments in each tso segment
  532. *
  533. * @soc: handle to dp soc structure
  534. * @num_pool: number of pools to free
  535. */
  536. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  537. {
  538. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  539. uint32_t pool_id;
  540. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  541. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  542. dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
  543. &tso_num_seg_pool->desc_pages,
  544. 0, true);
  545. }
  546. }
  547. /**
  548. * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
  549. * fragments in each tso segment
  550. *
  551. * @soc: handle to dp soc structure
  552. * @num_pool: number of pools to initialize
  553. * @num_elem: total number of descriptors to be initialized
  554. *
  555. * Return - QDF_STATUS_SUCCESS
  556. * QDF_STATUS_E_FAULT
  557. */
  558. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  559. uint16_t num_elem)
  560. {
  561. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  562. uint32_t desc_size, pool_id;
  563. desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
  564. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  565. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  566. if (qdf_mem_multi_page_link(soc->osdev,
  567. &tso_num_seg_pool->desc_pages,
  568. desc_size,
  569. num_elem, true)) {
  570. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  571. "invalid tso desc allocation - overflow num link");
  572. return QDF_STATUS_E_FAULT;
  573. }
  574. tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
  575. *tso_num_seg_pool->desc_pages.cacheable_pages;
  576. tso_num_seg_pool->num_free = num_elem;
  577. tso_num_seg_pool->num_seg_pool_size = num_elem;
  578. qdf_spinlock_create(&tso_num_seg_pool->lock);
  579. }
  580. return QDF_STATUS_SUCCESS;
  581. }
  582. /**
  583. * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
  584. * fragments in each tso segment
  585. *
  586. * @soc: handle to dp soc structure
  587. * @num_pool: number of pools to de-initialize
  588. *
  589. * Return - QDF_STATUS_SUCCESS
  590. * QDF_STATUS_E_FAULT
  591. */
  592. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  593. {
  594. struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
  595. uint32_t pool_id;
  596. for (pool_id = 0; pool_id < num_pool; pool_id++) {
  597. tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
  598. qdf_spin_lock_bh(&tso_num_seg_pool->lock);
  599. tso_num_seg_pool->freelist = NULL;
  600. tso_num_seg_pool->num_free = 0;
  601. tso_num_seg_pool->num_seg_pool_size = 0;
  602. qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
  603. qdf_spinlock_destroy(&tso_num_seg_pool->lock);
  604. }
  605. }
  606. #else
  607. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  608. uint16_t num_elem)
  609. {
  610. return QDF_STATUS_SUCCESS;
  611. }
  612. QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
  613. uint16_t num_elem)
  614. {
  615. return QDF_STATUS_SUCCESS;
  616. }
  617. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  618. {
  619. }
  620. void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  621. {
  622. }
  623. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
  624. uint16_t num_elem)
  625. {
  626. return QDF_STATUS_SUCCESS;
  627. }
  628. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
  629. {
  630. }
  631. QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
  632. uint16_t num_elem)
  633. {
  634. return QDF_STATUS_SUCCESS;
  635. }
  636. void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  637. {
  638. }
  639. #endif