dp_tx_desc.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright (c) 2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_tx_desc.h"
  20. #ifndef DESC_PARTITION
  21. #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
  22. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
  23. do { \
  24. uint8_t sig_bit; \
  25. soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
  26. /* Calculate page divider to find page number */ \
  27. sig_bit = 0; \
  28. while (num_desc_per_page) { \
  29. sig_bit++; \
  30. num_desc_per_page = num_desc_per_page >> 1; \
  31. } \
  32. soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
  33. } while (0)
  34. #else
  35. #define DP_TX_DESC_SIZE(a) a
  36. #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
  37. #endif /* DESC_PARTITION */
  38. /**
  39. * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
  40. * @soc Handle to DP SoC structure
  41. * @num_pool Number of pools to allocate
  42. * @num_elem Number of descriptor elements per pool
  43. *
  44. * This function allocates memory for SW tx descriptors
  45. * (used within host for tx data path).
  46. * The number of tx descriptors required will be large
  47. * since based on number of clients (1024 clients x 3 radios),
  48. * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
  49. * large.
  50. *
  51. * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
  52. * function to allocate memory
  53. * in multiple pages. It then iterates through the memory allocated across pages
  54. * and links each descriptor
  55. * to next descriptor, taking care of page boundaries.
  56. *
  57. * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
  58. * one for each ring;
  59. * This minimizes lock contention when hard_start_xmit is called
  60. * from multiple CPUs.
  61. * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
  62. * flow control.
  63. *
  64. * Return: Status code. 0 for success.
  65. */
  66. QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  67. uint16_t num_elem)
  68. {
  69. uint32_t id, count, page_id, offset, pool_id_32;
  70. uint16_t num_page, num_desc_per_page;
  71. struct dp_tx_desc_s *tx_desc_elem;
  72. uint32_t desc_size;
  73. desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
  74. soc->tx_desc[pool_id].elem_size = desc_size;
  75. qdf_mem_multi_pages_alloc(soc->osdev,
  76. &soc->tx_desc[pool_id].desc_pages, desc_size, num_elem,
  77. 0, true);
  78. if (!soc->tx_desc[pool_id].desc_pages.num_pages) {
  79. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  80. "Multi page alloc fail, tx desc");
  81. goto fail_exit;
  82. }
  83. num_page = soc->tx_desc[pool_id].desc_pages.num_pages;
  84. num_desc_per_page =
  85. soc->tx_desc[pool_id].desc_pages.num_element_per_page;
  86. soc->tx_desc[pool_id].freelist = (struct dp_tx_desc_s *)
  87. *soc->tx_desc[pool_id].desc_pages.cacheable_pages;
  88. if (qdf_mem_multi_page_link(soc->osdev,
  89. &soc->tx_desc[pool_id].desc_pages, desc_size, num_elem, true)) {
  90. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  91. "invalid tx desc allocation - overflow num link");
  92. goto free_tx_desc;
  93. }
  94. /* Set unique IDs for each Tx descriptor */
  95. tx_desc_elem = soc->tx_desc[pool_id].freelist;
  96. count = 0;
  97. pool_id_32 = (uint32_t)pool_id;
  98. while (tx_desc_elem) {
  99. page_id = count / num_desc_per_page;
  100. offset = count % num_desc_per_page;
  101. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  102. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  103. tx_desc_elem->id = id;
  104. tx_desc_elem->pool_id = pool_id;
  105. tx_desc_elem = tx_desc_elem->next;
  106. count++;
  107. }
  108. TX_DESC_LOCK_CREATE(&soc->tx_desc[pool_id].lock);
  109. return QDF_STATUS_SUCCESS;
  110. free_tx_desc:
  111. qdf_mem_multi_pages_free(soc->osdev,
  112. &soc->tx_desc[pool_id].desc_pages, 0, true);
  113. fail_exit:
  114. return QDF_STATUS_E_FAULT;
  115. }
  116. /**
  117. * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
  118. *
  119. * @soc Handle to DP SoC structure
  120. * @pool_id
  121. *
  122. * Return:
  123. */
  124. QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  125. {
  126. qdf_mem_multi_pages_free(soc->osdev,
  127. &soc->tx_desc[pool_id].desc_pages, 0, true);
  128. TX_DESC_LOCK_DESTROY(&soc->tx_desc[pool_id].lock);
  129. return QDF_STATUS_SUCCESS;
  130. }
  131. /**
  132. * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
  133. * @soc Handle to DP SoC structure
  134. * @pool_id
  135. *
  136. * Return: NONE
  137. */
  138. QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
  139. uint16_t num_elem)
  140. {
  141. uint16_t num_page;
  142. uint32_t count;
  143. struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
  144. struct qdf_mem_dma_page_t *page_info;
  145. struct qdf_mem_multi_page_t *pages;
  146. QDF_STATUS status;
  147. /* Coherent tx extension descriptor alloc */
  148. soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  149. soc->tx_ext_desc[pool_id].elem_count = num_elem;
  150. qdf_mem_multi_pages_alloc(soc->osdev,
  151. &soc->tx_ext_desc[pool_id].desc_pages,
  152. soc->tx_ext_desc[pool_id].elem_size,
  153. soc->tx_ext_desc[pool_id].elem_count,
  154. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  155. false);
  156. if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
  157. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  158. "ext desc page alloc fail");
  159. status = QDF_STATUS_E_NOMEM;
  160. goto fail_exit;
  161. }
  162. num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
  163. /*
  164. * Cacheable ext descriptor link alloc
  165. * This structure also large size already
  166. * single element is 24bytes, 2K elements are 48Kbytes
  167. * Have to alloc multi page cacheable memory
  168. */
  169. soc->tx_ext_desc[pool_id].link_elem_size =
  170. sizeof(struct dp_tx_ext_desc_elem_s);
  171. qdf_mem_multi_pages_alloc(soc->osdev,
  172. &soc->tx_ext_desc[pool_id].desc_link_pages,
  173. soc->tx_ext_desc[pool_id].link_elem_size,
  174. soc->tx_ext_desc[pool_id].elem_count, 0,
  175. true);
  176. if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
  177. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  178. "ext link desc page alloc fail");
  179. status = QDF_STATUS_E_NOMEM;
  180. goto free_ext_desc_page;
  181. }
  182. /* link tx descriptors into a freelist */
  183. soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
  184. *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
  185. if (qdf_mem_multi_page_link(soc->osdev,
  186. &soc->tx_ext_desc[pool_id].desc_link_pages,
  187. soc->tx_ext_desc[pool_id].link_elem_size,
  188. soc->tx_ext_desc[pool_id].elem_count, true)) {
  189. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  190. "ext link desc page linking fail");
  191. status = QDF_STATUS_E_FAULT;
  192. goto free_ext_link_desc_page;
  193. }
  194. /* Assign coherent memory pointer into linked free list */
  195. pages = &soc->tx_ext_desc[pool_id].desc_pages;
  196. page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
  197. c_elem = soc->tx_ext_desc[pool_id].freelist;
  198. p_elem = c_elem;
  199. for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
  200. if (!(count % pages->num_element_per_page)) {
  201. /**
  202. * First element for new page,
  203. * should point next page
  204. */
  205. if (!pages->dma_pages->page_v_addr_start) {
  206. QDF_TRACE(QDF_MODULE_ID_DP,
  207. QDF_TRACE_LEVEL_ERROR,
  208. "link over flow");
  209. status = QDF_STATUS_E_FAULT;
  210. goto free_ext_link_desc_page;
  211. }
  212. c_elem->vaddr = (void *)page_info->page_v_addr_start;
  213. c_elem->paddr = page_info->page_p_addr;
  214. page_info++;
  215. } else {
  216. c_elem->vaddr = (void *)(p_elem->vaddr +
  217. soc->tx_ext_desc[pool_id].elem_size);
  218. c_elem->paddr = (p_elem->paddr +
  219. soc->tx_ext_desc[pool_id].elem_size);
  220. }
  221. p_elem = c_elem;
  222. c_elem = c_elem->next;
  223. if (!c_elem)
  224. break;
  225. }
  226. TX_DESC_LOCK_CREATE(&soc->tx_ext_desc[pool_id].lock);
  227. return QDF_STATUS_SUCCESS;
  228. free_ext_link_desc_page:
  229. qdf_mem_multi_pages_free(soc->osdev,
  230. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  231. free_ext_desc_page:
  232. qdf_mem_multi_pages_free(soc->osdev,
  233. &soc->tx_ext_desc[pool_id].desc_pages,
  234. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  235. false);
  236. fail_exit:
  237. return status;
  238. }
  239. /**
  240. * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
  241. * @soc: Handle to DP SoC structure
  242. * @pool_id: extension descriptor pool id
  243. *
  244. * Return: NONE
  245. */
  246. QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
  247. {
  248. qdf_mem_multi_pages_free(soc->osdev,
  249. &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
  250. qdf_mem_multi_pages_free(soc->osdev,
  251. &soc->tx_ext_desc[pool_id].desc_pages,
  252. qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
  253. false);
  254. TX_DESC_LOCK_DESTROY(&soc->tx_ext_desc[pool_id].lock);
  255. return QDF_STATUS_SUCCESS;
  256. }