dp_tx_desc.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_tx_desc.h"
  20. #ifndef DESC_PARTITION
  21. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  22. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  23. do { \
  24. uint8_t sig_bit; \
  25. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  26. /* Calculate page divider to find page number */ \
  27. sig_bit = 0; \
  28. while (num_desc_per_page) { \
  29. sig_bit++; \
  30. num_desc_per_page = num_desc_per_page >> 1; \
  31. } \
  32. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  33. } while (0)
  34. #else
  35. #define DP_TX_DESC_SIZE(a) a
  36. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  37. #endif /* DESC_PARTITION */
  38. /**
  39. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  40. * @soc Handle to DP SoC structure
  41. * @num_pool Number of pools to allocate
  42. * @num_elem Number of descriptor elements per pool
  43. *
  44. * This function allocates memory for SW tx descriptors
  45. * (used within host for tx data path).
  46. * The number of tx descriptors required will be large
  47. * since based on number of clients (1024 clients x 3 radios),
  48. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  49. * large.
  50. *
  51. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  52. * function to allocate memory
  53. * in multiple pages. It then iterates through the memory allocated across pages
  54. * and links each descriptor
  55. * to next descriptor, taking care of page boundaries.
  56. *
  57. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  58. * one for each ring;
  59. * This minimizes lock contention when hard_start_xmit is called
  60. * from multiple CPUs.
  61. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  62. * flow control.
  63. *
  64. * Return: Status code. 0 for success.
  65. */
  66. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  67. uint16_t num_elem)
  68. {
  69. uint32_t id, count, page_id, offset, pool_id_32;
  70. uint16_t num_page, num_desc_per_page;
  71. struct dp_tx_desc_s *tx_desc_elem;
  72. uint32_t desc_size;
  73. desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
  74. soc->tx_desc[pool_id].elem_size = desc_size;
  75. qdf_mem_multi_pages_alloc(soc->osdev,
  76. &soc->tx_desc[pool_id].desc_pages, desc_size, num_elem,
  77. 0, true);
  78. if (!soc->tx_desc[pool_id].desc_pages.num_pages) {
  79. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  80. "Multi page alloc fail, tx desc");
  81. goto fail_exit;
  82. }
  83. num_page = soc->tx_desc[pool_id].desc_pages.num_pages;
  84. num_desc_per_page =
  85. soc->tx_desc[pool_id].desc_pages.num_element_per_page;
  86. soc->tx_desc[pool_id].freelist = (struct dp_tx_desc_s *)
  87. *soc->tx_desc[pool_id].desc_pages.cacheable_pages;
  88. if (qdf_mem_multi_page_link(soc->osdev,
  89. &soc->tx_desc[pool_id].desc_pages, desc_size, num_elem, true)) {
  90. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  91. "invalid tx desc allocation - overflow num link");
  92. goto free_tx_desc;
  93. }
  94. /* Set unique IDs for each Tx descriptor */
  95. tx_desc_elem = soc->tx_desc[pool_id].freelist;
  96. count = 0;
  97. pool_id_32 = (uint32_t)pool_id;
  98. while (tx_desc_elem) {
  99. page_id = count / num_desc_per_page;
  100. offset = count % num_desc_per_page;
  101. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  102. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  103. tx_desc_elem->id = id;
  104. tx_desc_elem->pool_id = pool_id;
  105. tx_desc_elem = tx_desc_elem->next;
  106. count++;
  107. }
  108. TX_DESC_LOCK_CREATE(&soc->tx_desc[pool_id].lock);
  109. return QDF_STATUS_SUCCESS;
  110. free_tx_desc:
  111. qdf_mem_multi_pages_free(soc->osdev,
  112. &soc->tx_desc[pool_id].desc_pages, 0, true);
  113. fail_exit:
  114. return QDF_STATUS_E_FAULT;
  115. }
  116. /**
  117. * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
  118. *
  119. * @soc Handle to DP SoC structure
  120. * @pool_id
  121. *
  122. * Return:
  123. */
  124. QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  125. {
  126. qdf_mem_multi_pages_free(soc->osdev,
  127. &soc->tx_desc[pool_id].desc_pages, 0, true);
  128. TX_DESC_LOCK_DESTROY(&soc->tx_desc[pool_id].lock);
  129. return QDF_STATUS_SUCCESS;
  130. }
  131. /**
  132. * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
  133. * @soc Handle to DP SoC structure
  134. * @pool_id
  135. *
  136. * Return: NONE
  137. */
  138. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  139. uint16_t num_elem)
  140. {
  141. uint16_t num_page;
  142. uint32_t count;
  143. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  144. struct qdf_mem_dma_page_t *page_info;
  145. struct qdf_mem_multi_page_t *pages;
  146. QDF_STATUS status;
  147. /* Coherent tx extension descriptor alloc */
  148. soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
  149. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  150. qdf_mem_multi_pages_alloc(soc->osdev,
  151. &soc->tx_ext_desc[pool_id].desc_pages,
  152. soc->tx_ext_desc[pool_id].elem_size,
  153. soc->tx_ext_desc[pool_id].elem_count,
  154. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  155. false);
  156. if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
  157. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  158. "ext desc page alloc fail");
  159. status = QDF_STATUS_E_NOMEM;
  160. goto fail_exit;
  161. }
  162. num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
  163. /*
  164. * Cacheable ext descriptor link alloc
  165. * This structure also large size already
  166. * single element is 24bytes, 2K elements are 48Kbytes
  167. * Have to alloc multi page cacheable memory
  168. */
  169. soc->tx_ext_desc[pool_id].link_elem_size =
  170. sizeof(struct dp_tx_ext_desc_elem_s);
  171. qdf_mem_multi_pages_alloc(soc->osdev,
  172. &soc->tx_ext_desc[pool_id].desc_link_pages,
  173. soc->tx_ext_desc[pool_id].link_elem_size,
  174. soc->tx_ext_desc[pool_id].elem_count, 0,
  175. true);
  176. if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
  177. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  178. "ext link desc page alloc fail");
  179. status = QDF_STATUS_E_NOMEM;
  180. goto free_ext_desc_page;
  181. }
  182. /* link tx descriptors into a freelist */
  183. soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
  184. *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
  185. if (qdf_mem_multi_page_link(soc->osdev,
  186. &soc->tx_ext_desc[pool_id].desc_link_pages,
  187. soc->tx_ext_desc[pool_id].link_elem_size,
  188. soc->tx_ext_desc[pool_id].elem_count, true)) {
  189. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  190. "ext link desc page linking fail");
  191. status = QDF_STATUS_E_FAULT;
  192. goto free_ext_link_desc_page;
  193. }
  194. /* Assign coherent memory pointer into linked free list */
  195. pages = &soc->tx_ext_desc[pool_id].desc_pages;
  196. page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
  197. c_elem = soc->tx_ext_desc[pool_id].freelist;
  198. p_elem = c_elem;
  199. for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
  200. if (!(count % pages->num_element_per_page)) {
  201. /**
  202. * First element for new page,
  203. * should point next page
  204. */
  205. if (!pages->dma_pages->page_v_addr_start) {
  206. QDF_TRACE(QDF_MODULE_ID_DP,
  207. QDF_TRACE_LEVEL_ERROR,
  208. "link over flow");
  209. status = QDF_STATUS_E_FAULT;
  210. goto free_ext_link_desc_page;
  211. }
  212. c_elem->vaddr = (void *)page_info->page_v_addr_start;
  213. c_elem->paddr = page_info->page_p_addr;
  214. page_info++;
  215. } else {
  216. c_elem->vaddr = (void *)(p_elem->vaddr +
  217. soc->tx_ext_desc[pool_id].elem_size);
  218. c_elem->paddr = (p_elem->paddr +
  219. soc->tx_ext_desc[pool_id].elem_size);
  220. }
  221. p_elem = c_elem;
  222. c_elem = c_elem->next;
  223. if (!c_elem)
  224. break;
  225. }
  226. soc->tx_ext_desc[pool_id].num_free = num_elem;
  227. TX_DESC_LOCK_CREATE(&soc->tx_ext_desc[pool_id].lock);
  228. return QDF_STATUS_SUCCESS;
  229. free_ext_link_desc_page:
  230. qdf_mem_multi_pages_free(soc->osdev,
  231. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  232. free_ext_desc_page:
  233. qdf_mem_multi_pages_free(soc->osdev,
  234. &soc->tx_ext_desc[pool_id].desc_pages,
  235. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  236. false);
  237. fail_exit:
  238. return status;
  239. }
  240. /**
  241. * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
  242. * @soc: Handle to DP SoC structure
  243. * @pool_id: extension descriptor pool id
  244. *
  245. * Return: NONE
  246. */
  247. QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  248. {
  249. qdf_mem_multi_pages_free(soc->osdev,
  250. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  251. qdf_mem_multi_pages_free(soc->osdev,
  252. &soc->tx_ext_desc[pool_id].desc_pages,
  253. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  254. false);
  255. TX_DESC_LOCK_DESTROY(&soc->tx_ext_desc[pool_id].lock);
  256. return QDF_STATUS_SUCCESS;
  257. }
  258. /**
  259. * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
  260. * @soc: Handle to DP SoC structure
  261. * @pool_id: tso descriptor pool id
  262. * @num_elem: number of element
  263. *
  264. * Return: QDF_STATUS_SUCCESS
  265. */
  266. #if defined(FEATURE_TSO)
  267. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  268. uint16_t num_elem)
  269. {
  270. int i;
  271. struct qdf_tso_seg_elem_t *c_element;
  272. struct qdf_tso_seg_elem_t *temp;
  273. soc->tx_tso_desc[pool_id].num_free = 0;
  274. c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
  275. if (!c_element) {
  276. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  277. FL("Alloc Failed %p pool_id %d"),
  278. soc, pool_id);
  279. return QDF_STATUS_E_NOMEM;
  280. }
  281. soc->tx_tso_desc[pool_id].freelist = c_element;
  282. soc->tx_tso_desc[pool_id].num_free++;
  283. for (i = 0; i < (num_elem - 1); i++) {
  284. c_element->next =
  285. qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t));
  286. if (!c_element->next) {
  287. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  288. FL("Alloc Failed %p pool_id %d"),
  289. soc, pool_id);
  290. goto fail;
  291. }
  292. soc->tx_tso_desc[pool_id].num_free++;
  293. c_element = c_element->next;
  294. c_element->next = NULL;
  295. }
  296. TSO_DEBUG("Number of free descriptors: %u\n",
  297. soc->tx_tso_desc[pool_id].num_free);
  298. soc->tx_tso_desc[pool_id].pool_size = num_elem;
  299. TX_DESC_LOCK_CREATE(&soc->tx_tso_desc[pool_id].lock);
  300. return QDF_STATUS_SUCCESS;
  301. fail:
  302. c_element = soc->tx_tso_desc[pool_id].freelist;
  303. while (c_element) {
  304. temp = c_element->next;
  305. qdf_mem_free(c_element);
  306. c_element = temp;
  307. }
  308. return QDF_STATUS_E_NOMEM;
  309. }
  310. /**
  311. * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
  312. * @soc: Handle to DP SoC structure
  313. * @pool_id: extension descriptor pool id
  314. *
  315. * Return: NONE
  316. */
  317. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  318. {
  319. int i;
  320. struct qdf_tso_seg_elem_t *c_element;
  321. struct qdf_tso_seg_elem_t *temp;
  322. TX_DESC_LOCK_LOCK(&soc->tx_tso_desc[pool_id].lock);
  323. c_element = soc->tx_tso_desc[pool_id].freelist;
  324. if (!c_element) {
  325. TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
  326. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  327. FL("Desc Pool Corrupt %d"), pool_id);
  328. return;
  329. }
  330. for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) {
  331. temp = c_element->next;
  332. qdf_mem_free(c_element);
  333. c_element = temp;
  334. if (!c_element)
  335. break;
  336. }
  337. soc->tx_tso_desc[pool_id].freelist = NULL;
  338. soc->tx_tso_desc[pool_id].num_free = 0;
  339. soc->tx_tso_desc[pool_id].pool_size = 0;
  340. TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
  341. TX_DESC_LOCK_DESTROY(&soc->tx_tso_desc[pool_id].lock);
  342. return;
  343. }
  344. /**
  345. * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
  346. * fragments in each tso segment
  347. *
  348. * @soc: handle to dp soc structure
  349. * @pool_id: descriptor pool id
  350. * @num_elem: total number of descriptors to be allocated
  351. */
  352. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  353. uint16_t num_elem)
  354. {
  355. int i;
  356. struct qdf_tso_num_seg_elem_t *c_element;
  357. struct qdf_tso_num_seg_elem_t *temp;
  358. soc->tx_tso_num_seg[pool_id].num_free = 0;
  359. c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
  360. if (!c_element) {
  361. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  362. FL("Alloc Failed %p pool_id %d"),
  363. soc, pool_id);
  364. return QDF_STATUS_E_NOMEM;
  365. }
  366. soc->tx_tso_num_seg[pool_id].freelist = c_element;
  367. soc->tx_tso_num_seg[pool_id].num_free++;
  368. for (i = 0; i < (num_elem - 1); i++) {
  369. c_element->next =
  370. qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
  371. if (!c_element->next) {
  372. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  373. FL("Alloc Failed %p pool_id %d"),
  374. soc, pool_id);
  375. goto fail;
  376. }
  377. soc->tx_tso_num_seg[pool_id].num_free++;
  378. c_element = c_element->next;
  379. c_element->next = NULL;
  380. }
  381. soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem;
  382. TX_DESC_LOCK_CREATE(&soc->tx_tso_num_seg[pool_id].lock);
  383. return QDF_STATUS_SUCCESS;
  384. fail:
  385. c_element = soc->tx_tso_num_seg[pool_id].freelist;
  386. while (c_element) {
  387. temp = c_element->next;
  388. qdf_mem_free(c_element);
  389. c_element = temp;
  390. }
  391. return QDF_STATUS_E_NOMEM;
  392. }
  393. /**
  394. * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
  395. * the fragments in tso segment
  396. *
  397. *
  398. * @soc: handle to dp soc structure
  399. * @pool_id: descriptor pool_id
  400. */
  401. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
  402. {
  403. int i;
  404. struct qdf_tso_num_seg_elem_t *c_element;
  405. struct qdf_tso_num_seg_elem_t *temp;
  406. TX_DESC_LOCK_LOCK(&soc->tx_tso_num_seg[pool_id].lock);
  407. c_element = soc->tx_tso_num_seg[pool_id].freelist;
  408. if (!c_element) {
  409. TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
  410. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  411. FL("Desc Pool Corrupt %d"), pool_id);
  412. return;
  413. }
  414. for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
  415. temp = c_element->next;
  416. qdf_mem_free(c_element);
  417. c_element = temp;
  418. if (!c_element)
  419. break;
  420. }
  421. soc->tx_tso_num_seg[pool_id].freelist = NULL;
  422. soc->tx_tso_num_seg[pool_id].num_free = 0;
  423. soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
  424. TX_DESC_LOCK_UNLOCK(&soc->tx_tso_num_seg[pool_id].lock);
  425. TX_DESC_LOCK_DESTROY(&soc->tx_tso_num_seg[pool_id].lock);
  426. return;
  427. }
  428. #else
  429. QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  430. uint16_t num_elem)
  431. {
  432. return QDF_STATUS_SUCCESS;
  433. }
  434. void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  435. {
  436. return;
  437. }
  438. QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  439. uint16_t num_elem)
  440. {
  441. return QDF_STATUS_SUCCESS;
  442. }
  443. void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
  444. {
  445. return;
  446. }
  447. #endif