dp_tx_desc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_tx_desc.h"
  20. #ifndef DESC_PARTITION
  21. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  22. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  23. do { \
  24. uint8_t sig_bit; \
  25. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  26. /* Calculate page divider to find page number */ \
  27. sig_bit = 0; \
  28. while (num_desc_per_page) { \
  29. sig_bit++; \
  30. num_desc_per_page = num_desc_per_page >> 1; \
  31. } \
  32. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  33. } while (0)
  34. #else
  35. #define DP_TX_DESC_SIZE(a) a
  36. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  37. #endif /* DESC_PARTITION */
  38. /**
  39. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  40. * @soc Handle to DP SoC structure
  41. * @num_pool Number of pools to allocate
  42. * @num_elem Number of descriptor elements per pool
  43. *
  44. * This function allocates memory for SW tx descriptors
  45. * (used within host for tx data path).
  46. * The number of tx descriptors required will be large
  47. * since based on number of clients (1024 clients x 3 radios),
  48. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  49. * large.
  50. *
  51. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  52. * function to allocate memory
  53. * in multiple pages. It then iterates through the memory allocated across pages
  54. * and links each descriptor
  55. * to next descriptor, taking care of page boundaries.
  56. *
  57. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  58. * one for each ring;
  59. * This minimizes lock contention when hard_start_xmit is called
  60. * from multiple CPUs.
  61. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  62. * flow control.
  63. *
  64. * Return: Status code. 0 for success.
  65. */
  66. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  67. uint16_t num_elem)
  68. {
  69. uint32_t id, count, page_id, offset, pool_id_32;
  70. uint16_t num_page, num_desc_per_page;
  71. struct dp_tx_desc_s *tx_desc_elem;
  72. uint32_t desc_size;
  73. struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
  74. desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
  75. tx_desc_pool->elem_size = desc_size;
  76. qdf_mem_multi_pages_alloc(soc->osdev,
  77. &tx_desc_pool->desc_pages, desc_size, num_elem,
  78. 0, true);
  79. if (!tx_desc_pool->desc_pages.num_pages) {
  80. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  81. "Multi page alloc fail, tx desc");
  82. goto fail_exit;
  83. }
  84. num_page = tx_desc_pool->desc_pages.num_pages;
  85. num_desc_per_page =
  86. tx_desc_pool->desc_pages.num_element_per_page;
  87. tx_desc_pool->freelist = (struct dp_tx_desc_s *)
  88. *tx_desc_pool->desc_pages.cacheable_pages;
  89. if (qdf_mem_multi_page_link(soc->osdev,
  90. &tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
  91. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  92. "invalid tx desc allocation - overflow num link");
  93. goto free_tx_desc;
  94. }
  95. /* Set unique IDs for each Tx descriptor */
  96. tx_desc_elem = tx_desc_pool->freelist;
  97. count = 0;
  98. pool_id_32 = (uint32_t)pool_id;
  99. while (tx_desc_elem) {
  100. page_id = count / num_desc_per_page;
  101. offset = count % num_desc_per_page;
  102. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  103. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  104. tx_desc_elem->id = id;
  105. tx_desc_elem->pool_id = pool_id;
  106. tx_desc_elem = tx_desc_elem->next;
  107. count++;
  108. }
  109. TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
  110. return QDF_STATUS_SUCCESS;
  111. free_tx_desc:
  112. qdf_mem_multi_pages_free(soc->osdev,
  113. &tx_desc_pool->desc_pages, 0, true);
  114. fail_exit:
  115. return QDF_STATUS_E_FAULT;
  116. }
  117. /**
  118. * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
  119. *
  120. * @soc Handle to DP SoC structure
  121. * @pool_id
  122. *
  123. * Return:
  124. */
  125. QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  126. {
  127. struct dp_tx_desc_pool_s *tx_desc_pool =
  128. &((soc)->tx_desc[(pool_id)]);
  129. qdf_mem_multi_pages_free(soc->osdev,
  130. &tx_desc_pool->desc_pages, 0, true);
  131. TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
  132. return QDF_STATUS_SUCCESS;
  133. }
  134. /**
  135. * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
  136. * @soc Handle to DP SoC structure
  137. * @pool_id
  138. *
  139. * Return: NONE
  140. */
  141. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  142. uint16_t num_elem)
  143. {
  144. uint16_t num_page;
  145. uint32_t count;
  146. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  147. struct qdf_mem_dma_page_t *page_info;
  148. struct qdf_mem_multi_page_t *pages;
  149. QDF_STATUS status;
  150. /* Coherent tx extension descriptor alloc */
  151. soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  152. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  153. qdf_mem_multi_pages_alloc(soc->osdev,
  154. &soc->tx_ext_desc[pool_id].desc_pages,
  155. soc->tx_ext_desc[pool_id].elem_size,
  156. soc->tx_ext_desc[pool_id].elem_count,
  157. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  158. false);
  159. if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
  160. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  161. "ext desc page alloc fail");
  162. status = QDF_STATUS_E_NOMEM;
  163. goto fail_exit;
  164. }
  165. num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
  166. /*
  167. * Cacheable ext descriptor link alloc
  168. * This structure also large size already
  169. * single element is 24bytes, 2K elements are 48Kbytes
  170. * Have to alloc multi page cacheable memory
  171. */
  172. soc->tx_ext_desc[pool_id].link_elem_size =
  173. sizeof(struct dp_tx_ext_desc_elem_s);
  174. qdf_mem_multi_pages_alloc(soc->osdev,
  175. &soc->tx_ext_desc[pool_id].desc_link_pages,
  176. soc->tx_ext_desc[pool_id].link_elem_size,
  177. soc->tx_ext_desc[pool_id].elem_count, 0,
  178. true);
  179. if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
  180. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  181. "ext link desc page alloc fail");
  182. status = QDF_STATUS_E_NOMEM;
  183. goto free_ext_desc_page;
  184. }
  185. /* link tx descriptors into a freelist */
  186. soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
  187. *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
  188. if (qdf_mem_multi_page_link(soc->osdev,
  189. &soc->tx_ext_desc[pool_id].desc_link_pages,
  190. soc->tx_ext_desc[pool_id].link_elem_size,
  191. soc->tx_ext_desc[pool_id].elem_count, true)) {
  192. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  193. "ext link desc page linking fail");
  194. status = QDF_STATUS_E_FAULT;
  195. goto free_ext_link_desc_page;
  196. }
  197. /* Assign coherent memory pointer into linked free list */
  198. pages = &soc->tx_ext_desc[pool_id].desc_pages;
  199. page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
  200. c_elem = soc->tx_ext_desc[pool_id].freelist;
  201. p_elem = c_elem;
  202. for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
  203. if (!(count % pages->num_element_per_page)) {
  204. /**
  205. * First element for new page,
  206. * should point next page
  207. */
  208. if (!pages->dma_pages->page_v_addr_start) {
  209. QDF_TRACE(QDF_MODULE_ID_DP,
  210. QDF_TRACE_LEVEL_ERROR,
  211. "link over flow");
  212. status = QDF_STATUS_E_FAULT;
  213. goto free_ext_link_desc_page;
  214. }
  215. c_elem->vaddr = (void *)page_info->page_v_addr_start;
  216. c_elem->paddr = page_info->page_p_addr;
  217. page_info++;
  218. } else {
  219. c_elem->vaddr = (void *)(p_elem->vaddr +
  220. soc->tx_ext_desc[pool_id].elem_size);
  221. c_elem->paddr = (p_elem->paddr +
  222. soc->tx_ext_desc[pool_id].elem_size);
  223. }
  224. p_elem = c_elem;
  225. c_elem = c_elem->next;
  226. if (!c_elem)
  227. break;
  228. }
  229. soc->tx_ext_desc[pool_id].num_free = num_elem;
  230. qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
  231. return QDF_STATUS_SUCCESS;
  232. free_ext_link_desc_page:
  233. qdf_mem_multi_pages_free(soc->osdev,
  234. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  235. free_ext_desc_page:
  236. qdf_mem_multi_pages_free(soc->osdev,
  237. &soc->tx_ext_desc[pool_id].desc_pages,
  238. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  239. false);
  240. fail_exit:
  241. return status;
  242. }
  243. /**
  244. * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
  245. * @soc: Handle to DP SoC structure
  246. * @pool_id: extension descriptor pool id
  247. *
  248. * Return: NONE
  249. */
  250. QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  251. {
  252. qdf_mem_multi_pages_free(soc->osdev,
  253. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  254. qdf_mem_multi_pages_free(soc->osdev,
  255. &soc->tx_ext_desc[pool_id].desc_pages,
  256. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  257. false);
  258. qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
  259. return QDF_STATUS_SUCCESS;
  260. }
  261. /**
  262. * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
  263. * @soc: Handle to DP SoC structure
  264. * @pool_id: tso descriptor pool id
  265. * @num_elem: number of element
  266. *
  267. * Return: QDF_STATUS_SUCCESS
  268. */
  269. #if defined(FEATURE_TSO)
  270. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  271. uint16_t num_elem)
  272. {
  273. int i;
  274. struct qdf_tso_seg_elem_t *c_element;
  275. struct qdf_tso_seg_elem_t *temp;
  276. soc->tx_tso_desc[pool_id].num_free = 0;
  277. c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
  278. if (!c_element) {
  279. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  280. FL("Alloc Failed %pK pool_id %d"),
  281. soc, pool_id);
  282. return QDF_STATUS_E_NOMEM;
  283. }
  284. soc->tx_tso_desc[pool_id].freelist = c_element;
  285. soc->tx_tso_desc[pool_id].num_free++;
  286. for (i = 0; i < (num_elem - 1); i++) {
  287. c_element->next =
  288. qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
  289. if (!c_element->next) {
  290. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  291. FL("Alloc Failed %pK pool_id %d"),
  292. soc, pool_id);
  293. goto fail;
  294. }
  295. soc->tx_tso_desc[pool_id].num_free++;
  296. c_element = c_element->next;
  297. c_element->next = NULL;
  298. }
  299. TSO_DEBUG("Number of free descriptors: %u\n",
  300. soc->tx_tso_desc[pool_id].num_free);
  301. soc->tx_tso_desc[pool_id].pool_size = num_elem;
  302. qdf_spinlock_create(&soc->tx_tso_desc[pool_id].lock);
  303. return QDF_STATUS_SUCCESS;
  304. fail:
  305. c_element = soc->tx_tso_desc[pool_id].freelist;
  306. while (c_element) {
  307. temp = c_element->next;
  308. qdf_mem_free(c_element);
  309. c_element = temp;
  310. }
  311. return QDF_STATUS_E_NOMEM;
  312. }
  313. /**
  314. * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
  315. * @soc: Handle to DP SoC structure
  316. * @pool_id: extension descriptor pool id
  317. *
  318. * Return: NONE
  319. */
  320. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  321. {
  322. int i;
  323. struct qdf_tso_seg_elem_t *c_element;
  324. struct qdf_tso_seg_elem_t *temp;
  325. qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock);
  326. c_element = soc->tx_tso_desc[pool_id].freelist;
  327. if (!c_element) {
  328. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  329. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  330. FL("Desc Pool Corrupt %d"), pool_id);
  331. return;
  332. }
  333. for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
  334. temp = c_element->next;
  335. qdf_mem_free(c_element);
  336. c_element = temp;
  337. if (!c_element)
  338. break;
  339. }
  340. soc->tx_tso_desc[pool_id].freelist = NULL;
  341. soc->tx_tso_desc[pool_id].num_free = 0;
  342. soc->tx_tso_desc[pool_id].pool_size = 0;
  343. qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
  344. qdf_spinlock_destroy(&soc->tx_tso_desc[pool_id].lock);
  345. return;
  346. }
  347. /**
  348. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  349. * fragments in each tso segment
  350. *
  351. * @soc: handle to dp soc structure
  352. * @pool_id: descriptor pool id
  353. * @num_elem: total number of descriptors to be allocated
  354. */
  355. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  356. uint16_t num_elem)
  357. {
  358. int i;
  359. struct qdf_tso_num_seg_elem_t *c_element;
  360. struct qdf_tso_num_seg_elem_t *temp;
  361. soc->tx_tso_num_seg[pool_id].num_free = 0;
  362. c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
  363. if (!c_element) {
  364. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  365. FL("Alloc Failed %pK pool_id %d"),
  366. soc, pool_id);
  367. return QDF_STATUS_E_NOMEM;
  368. }
  369. soc->tx_tso_num_seg[pool_id].freelist = c_element;
  370. soc->tx_tso_num_seg[pool_id].num_free++;
  371. for (i = 0; i < (num_elem - 1); i++) {
  372. c_element->next =
  373. qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
  374. if (!c_element->next) {
  375. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  376. FL("Alloc Failed %pK pool_id %d"),
  377. soc, pool_id);
  378. goto fail;
  379. }
  380. soc->tx_tso_num_seg[pool_id].num_free++;
  381. c_element = c_element->next;
  382. c_element->next = NULL;
  383. }
  384. soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
  385. qdf_spinlock_create(&soc->tx_tso_num_seg[pool_id].lock);
  386. return QDF_STATUS_SUCCESS;
  387. fail:
  388. c_element = soc->tx_tso_num_seg[pool_id].freelist;
  389. while (c_element) {
  390. temp = c_element->next;
  391. qdf_mem_free(c_element);
  392. c_element = temp;
  393. }
  394. return QDF_STATUS_E_NOMEM;
  395. }
  396. /**
  397. * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
  398. * the fragments in tso segment
  399. *
  400. *
  401. * @soc: handle to dp soc structure
  402. * @pool_id: descriptor pool_id
  403. */
  404. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
  405. {
  406. int i;
  407. struct qdf_tso_num_seg_elem_t *c_element;
  408. struct qdf_tso_num_seg_elem_t *temp;
  409. qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  410. c_element = soc->tx_tso_num_seg[pool_id].freelist;
  411. if (!c_element) {
  412. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  413. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  414. FL("Desc Pool Corrupt %d"), pool_id);
  415. return;
  416. }
  417. for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
  418. temp = c_element->next;
  419. qdf_mem_free(c_element);
  420. c_element = temp;
  421. if (!c_element)
  422. break;
  423. }
  424. soc->tx_tso_num_seg[pool_id].freelist = NULL;
  425. soc->tx_tso_num_seg[pool_id].num_free = 0;
  426. soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
  427. qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
  428. qdf_spinlock_destroy(&soc->tx_tso_num_seg[pool_id].lock);
  429. return;
  430. }
  431. #else
  432. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  433. uint16_t num_elem)
  434. {
  435. return QDF_STATUS_SUCCESS;
  436. }
  437. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  438. {
  439. return;
  440. }
  441. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  442. uint16_t num_elem)
  443. {
  444. return QDF_STATUS_SUCCESS;
  445. }
  446. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
  447. {
  448. return;
  449. }
  450. #endif