dp_rx_desc.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_ipa.h"
  21. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  22. A_COMPILE_TIME_ASSERT(cookie_size_check,
  23. PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
  24. 1 << DP_RX_DESC_PAGE_ID_SHIFT);
  25. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
  26. uint32_t num_elem,
  27. struct rx_desc_pool *rx_desc_pool)
  28. {
  29. uint32_t id, page_id, offset, desc_size, num_desc_per_page;
  30. uint32_t count = 0;
  31. union dp_rx_desc_list_elem_t *rx_desc_elem;
  32. desc_size = sizeof(*rx_desc_elem);
  33. rx_desc_pool->elem_size = desc_size;
  34. if (!dp_is_soc_reinit(soc)) {
  35. qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
  36. desc_size, num_elem, 0, true);
  37. if (!rx_desc_pool->desc_pages.num_pages) {
  38. qdf_err("Multi page alloc fail,size=%d, elem=%d",
  39. desc_size, num_elem);
  40. return QDF_STATUS_E_NOMEM;
  41. }
  42. }
  43. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  44. rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
  45. *rx_desc_pool->desc_pages.cacheable_pages;
  46. if (qdf_mem_multi_page_link(soc->osdev,
  47. &rx_desc_pool->desc_pages,
  48. desc_size, num_elem, true)) {
  49. qdf_err("overflow num link,size=%d, elem=%d",
  50. desc_size, num_elem);
  51. goto free_rx_desc_pool;
  52. }
  53. /* Initialize the lock */
  54. qdf_spinlock_create(&rx_desc_pool->lock);
  55. qdf_spin_lock_bh(&rx_desc_pool->lock);
  56. rx_desc_pool->pool_size = num_elem;
  57. rx_desc_elem = rx_desc_pool->freelist;
  58. while (rx_desc_elem) {
  59. page_id = count / num_desc_per_page;
  60. offset = count % num_desc_per_page;
  61. /*
  62. * Below cookie size is from REO destination ring
  63. * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
  64. * cookie size = 21 bits
  65. * 8 bits - offset
  66. * 8 bits - page ID
  67. * 4 bits - pool ID
  68. */
  69. id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
  70. (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
  71. offset);
  72. rx_desc_elem->rx_desc.cookie = id;
  73. rx_desc_elem->rx_desc.pool_id = pool_id;
  74. rx_desc_elem->rx_desc.in_use = 0;
  75. rx_desc_elem = rx_desc_elem->next;
  76. count++;
  77. }
  78. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  79. return QDF_STATUS_SUCCESS;
  80. free_rx_desc_pool:
  81. dp_rx_desc_pool_free(soc, rx_desc_pool);
  82. return QDF_STATUS_E_FAULT;
  83. }
  84. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  85. struct rx_desc_pool *rx_desc_pool)
  86. {
  87. return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  88. rx_desc_pool->elem_size * offset;
  89. }
  90. static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
  91. struct rx_desc_pool *rx_desc_pool)
  92. {
  93. uint32_t i, num_desc, page_id, offset, num_desc_per_page;
  94. union dp_rx_desc_list_elem_t *rx_desc_elem;
  95. struct dp_rx_desc *rx_desc;
  96. qdf_nbuf_t nbuf;
  97. if (qdf_unlikely(!(rx_desc_pool->
  98. desc_pages.cacheable_pages))) {
  99. qdf_err("No pages found on this desc pool");
  100. return QDF_STATUS_E_INVAL;
  101. }
  102. num_desc = rx_desc_pool->pool_size;
  103. num_desc_per_page =
  104. rx_desc_pool->desc_pages.num_element_per_page;
  105. for (i = 0; i < num_desc; i++) {
  106. page_id = i / num_desc_per_page;
  107. offset = i % num_desc_per_page;
  108. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
  109. rx_desc = &rx_desc_elem->rx_desc;
  110. if (rx_desc->in_use) {
  111. nbuf = rx_desc->nbuf;
  112. if (!rx_desc->unmapped) {
  113. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  114. false);
  115. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  116. QDF_DMA_BIDIRECTIONAL);
  117. }
  118. qdf_nbuf_free(nbuf);
  119. }
  120. }
  121. return QDF_STATUS_SUCCESS;
  122. }
  123. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  124. struct rx_desc_pool *rx_desc_pool)
  125. {
  126. QDF_STATUS qdf_status;
  127. qdf_spin_lock_bh(&rx_desc_pool->lock);
  128. qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  129. if (QDF_IS_STATUS_SUCCESS(qdf_status))
  130. dp_rx_desc_pool_free(soc, rx_desc_pool);
  131. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  132. qdf_spinlock_destroy(&rx_desc_pool->lock);
  133. }
  134. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  135. struct rx_desc_pool *rx_desc_pool)
  136. {
  137. qdf_spin_lock_bh(&rx_desc_pool->lock);
  138. __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  139. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  140. qdf_spinlock_destroy(&rx_desc_pool->lock);
  141. }
  142. void dp_rx_desc_pool_free(struct dp_soc *soc,
  143. struct rx_desc_pool *rx_desc_pool)
  144. {
  145. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
  146. return;
  147. qdf_mem_multi_pages_free(soc->osdev,
  148. &rx_desc_pool->desc_pages, 0, true);
  149. }
  150. #else
  151. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
  152. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  153. {
  154. uint32_t i;
  155. if (!dp_is_soc_reinit(soc)) {
  156. rx_desc_pool->array =
  157. qdf_mem_malloc(pool_size *
  158. sizeof(union dp_rx_desc_list_elem_t));
  159. if (!(rx_desc_pool->array)) {
  160. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  161. "%s: RX Desc Pool[%d] allocation failed",
  162. __func__, pool_id);
  163. return QDF_STATUS_E_NOMEM;
  164. }
  165. }
  166. /* Initialize the lock */
  167. qdf_spinlock_create(&rx_desc_pool->lock);
  168. qdf_spin_lock_bh(&rx_desc_pool->lock);
  169. rx_desc_pool->pool_size = pool_size;
  170. /* link SW rx descs into a freelist */
  171. rx_desc_pool->freelist = &rx_desc_pool->array[0];
  172. for (i = 0; i < rx_desc_pool->pool_size-1; i++) {
  173. rx_desc_pool->array[i].next = &rx_desc_pool->array[i+1];
  174. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  175. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  176. rx_desc_pool->array[i].rx_desc.in_use = 0;
  177. }
  178. rx_desc_pool->array[i].next = NULL;
  179. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  180. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  181. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  182. return QDF_STATUS_SUCCESS;
  183. }
  184. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  185. struct rx_desc_pool *rx_desc_pool)
  186. {
  187. qdf_nbuf_t nbuf;
  188. int i;
  189. qdf_spin_lock_bh(&rx_desc_pool->lock);
  190. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  191. if (rx_desc_pool->array[i].rx_desc.in_use) {
  192. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  193. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  194. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  195. false);
  196. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  197. QDF_DMA_FROM_DEVICE);
  198. }
  199. qdf_nbuf_free(nbuf);
  200. }
  201. }
  202. qdf_mem_free(rx_desc_pool->array);
  203. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  204. qdf_spinlock_destroy(&rx_desc_pool->lock);
  205. }
  206. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  207. struct rx_desc_pool *rx_desc_pool)
  208. {
  209. qdf_nbuf_t nbuf;
  210. int i;
  211. qdf_spin_lock_bh(&rx_desc_pool->lock);
  212. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  213. if (rx_desc_pool->array[i].rx_desc.in_use) {
  214. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  215. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  216. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  217. false);
  218. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  219. QDF_DMA_FROM_DEVICE);
  220. }
  221. qdf_nbuf_free(nbuf);
  222. }
  223. }
  224. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  225. qdf_spinlock_destroy(&rx_desc_pool->lock);
  226. }
  227. void dp_rx_desc_pool_free(struct dp_soc *soc,
  228. struct rx_desc_pool *rx_desc_pool)
  229. {
  230. qdf_mem_free(rx_desc_pool->array);
  231. }
  232. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  233. /*
  234. * dp_rx_get_free_desc_list() - provide a list of descriptors from
  235. * the free rx desc pool.
  236. *
  237. * @soc: core txrx main context
  238. * @pool_id: pool_id which is one of 3 mac_ids
  239. * @rx_desc_pool: rx descriptor pool pointer
  240. * @num_descs: number of descs requested from freelist
  241. * @desc_list: attach the descs to this list (output parameter)
  242. * @tail: attach the point to last desc of free list (output parameter)
  243. *
  244. * Return: number of descs allocated from free list.
  245. */
  246. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  247. struct rx_desc_pool *rx_desc_pool,
  248. uint16_t num_descs,
  249. union dp_rx_desc_list_elem_t **desc_list,
  250. union dp_rx_desc_list_elem_t **tail)
  251. {
  252. uint16_t count;
  253. qdf_spin_lock_bh(&rx_desc_pool->lock);
  254. *desc_list = *tail = rx_desc_pool->freelist;
  255. for (count = 0; count < num_descs; count++) {
  256. if (qdf_unlikely(!rx_desc_pool->freelist)) {
  257. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  258. return count;
  259. }
  260. *tail = rx_desc_pool->freelist;
  261. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  262. }
  263. (*tail)->next = NULL;
  264. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  265. return count;
  266. }
  267. /*
  268. * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
  269. * freelist.
  270. *
  271. * @soc: core txrx main context
  272. * @local_desc_list: local desc list provided by the caller
  273. * @tail: attach the point to last desc of local desc list
  274. * @pool_id: pool_id which is one of 3 mac_ids
  275. * @rx_desc_pool: rx descriptor pool pointer
  276. */
  277. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  278. union dp_rx_desc_list_elem_t **local_desc_list,
  279. union dp_rx_desc_list_elem_t **tail,
  280. uint16_t pool_id,
  281. struct rx_desc_pool *rx_desc_pool)
  282. {
  283. union dp_rx_desc_list_elem_t *temp_list = NULL;
  284. qdf_spin_lock_bh(&rx_desc_pool->lock);
  285. temp_list = rx_desc_pool->freelist;
  286. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  287. "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
  288. temp_list, *local_desc_list, *tail, (*tail)->next);
  289. rx_desc_pool->freelist = *local_desc_list;
  290. (*tail)->next = temp_list;
  291. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  292. }