dp_rx_desc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_ipa.h"
  21. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  22. A_COMPILE_TIME_ASSERT(cookie_size_check,
  23. PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
  24. 1 << DP_RX_DESC_PAGE_ID_SHIFT);
  25. /*
  26. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  27. * rx descriptor pool
  28. *
  29. * @rx_desc_pool: rx descriptor pool pointer
  30. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  31. * QDF_STATUS_E_NOMEM
  32. */
  33. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  34. {
  35. if (!rx_desc_pool->desc_pages.num_pages) {
  36. dp_err("Multi page alloc fail, size=%d, elem=%d",
  37. rx_desc_pool->elem_size, rx_desc_pool->pool_size);
  38. return QDF_STATUS_E_NOMEM;
  39. }
  40. return QDF_STATUS_SUCCESS;
  41. }
  42. /*
  43. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  44. * descriptors
  45. *
  46. * @soc: core txrx main context
  47. * @num_elem: number of rx descriptors (size of the pool)
  48. * @rx_desc_pool: rx descriptor pool pointer
  49. *
  50. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  51. * QDF_STATUS_E_NOMEM
  52. * QDF_STATUS_E_FAULT
  53. */
  54. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  55. uint32_t num_elem,
  56. struct rx_desc_pool *rx_desc_pool)
  57. {
  58. uint32_t desc_size;
  59. union dp_rx_desc_list_elem_t *rx_desc_elem;
  60. desc_size = sizeof(*rx_desc_elem);
  61. rx_desc_pool->elem_size = desc_size;
  62. qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
  63. desc_size, num_elem, 0, true);
  64. if (!rx_desc_pool->desc_pages.num_pages) {
  65. qdf_err("Multi page alloc fail,size=%d, elem=%d",
  66. desc_size, num_elem);
  67. return QDF_STATUS_E_NOMEM;
  68. }
  69. if (qdf_mem_multi_page_link(soc->osdev,
  70. &rx_desc_pool->desc_pages,
  71. desc_size, num_elem, true)) {
  72. qdf_err("overflow num link,size=%d, elem=%d",
  73. desc_size, num_elem);
  74. goto free_rx_desc_pool;
  75. }
  76. return QDF_STATUS_SUCCESS;
  77. free_rx_desc_pool:
  78. dp_rx_desc_pool_free(soc, rx_desc_pool);
  79. return QDF_STATUS_E_FAULT;
  80. }
  81. /*
  82. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  83. * convert the pool of memory into a list of
  84. * rx descriptors and create locks to access this
  85. * list of rx descriptors.
  86. *
  87. * @soc: core txrx main context
  88. * @pool_id: pool_id which is one of 3 mac_ids
  89. * @pool_size: size of the rx descriptor pool
  90. * @rx_desc_pool: rx descriptor pool pointer
  91. */
  92. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  93. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  94. {
  95. uint32_t id, page_id, offset, num_desc_per_page;
  96. uint32_t count = 0;
  97. union dp_rx_desc_list_elem_t *rx_desc_elem;
  98. /* Initialize the lock */
  99. qdf_spinlock_create(&rx_desc_pool->lock);
  100. qdf_spin_lock_bh(&rx_desc_pool->lock);
  101. rx_desc_pool->pool_size = pool_size;
  102. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  103. rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
  104. *rx_desc_pool->desc_pages.cacheable_pages;
  105. rx_desc_elem = rx_desc_pool->freelist;
  106. while (rx_desc_elem) {
  107. page_id = count / num_desc_per_page;
  108. offset = count % num_desc_per_page;
  109. /*
  110. * Below cookie size is from REO destination ring
  111. * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
  112. * cookie size = 21 bits
  113. * 8 bits - offset
  114. * 8 bits - page ID
  115. * 4 bits - pool ID
  116. */
  117. id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
  118. (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
  119. offset);
  120. rx_desc_elem->rx_desc.cookie = id;
  121. rx_desc_elem->rx_desc.pool_id = pool_id;
  122. rx_desc_elem->rx_desc.in_use = 0;
  123. rx_desc_elem = rx_desc_elem->next;
  124. count++;
  125. }
  126. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  127. }
  128. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  129. struct rx_desc_pool *rx_desc_pool)
  130. {
  131. return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  132. rx_desc_pool->elem_size * offset;
  133. }
  134. static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
  135. struct rx_desc_pool *rx_desc_pool)
  136. {
  137. uint32_t i, num_desc, page_id, offset, num_desc_per_page;
  138. union dp_rx_desc_list_elem_t *rx_desc_elem;
  139. struct dp_rx_desc *rx_desc;
  140. qdf_nbuf_t nbuf;
  141. if (qdf_unlikely(!(rx_desc_pool->
  142. desc_pages.cacheable_pages))) {
  143. qdf_err("No pages found on this desc pool");
  144. return QDF_STATUS_E_INVAL;
  145. }
  146. num_desc = rx_desc_pool->pool_size;
  147. num_desc_per_page =
  148. rx_desc_pool->desc_pages.num_element_per_page;
  149. for (i = 0; i < num_desc; i++) {
  150. page_id = i / num_desc_per_page;
  151. offset = i % num_desc_per_page;
  152. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
  153. rx_desc = &rx_desc_elem->rx_desc;
  154. dp_rx_desc_free_dbg_info(rx_desc);
  155. if (rx_desc->in_use) {
  156. nbuf = rx_desc->nbuf;
  157. if (!rx_desc->unmapped) {
  158. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  159. false);
  160. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  161. QDF_DMA_BIDIRECTIONAL);
  162. }
  163. qdf_nbuf_free(nbuf);
  164. }
  165. }
  166. return QDF_STATUS_SUCCESS;
  167. }
  168. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  169. struct rx_desc_pool *rx_desc_pool)
  170. {
  171. QDF_STATUS qdf_status;
  172. qdf_spin_lock_bh(&rx_desc_pool->lock);
  173. qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  174. if (QDF_IS_STATUS_SUCCESS(qdf_status))
  175. dp_rx_desc_pool_free(soc, rx_desc_pool);
  176. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  177. qdf_spinlock_destroy(&rx_desc_pool->lock);
  178. }
  179. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  180. struct rx_desc_pool *rx_desc_pool)
  181. {
  182. qdf_spin_lock_bh(&rx_desc_pool->lock);
  183. __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  184. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  185. }
  186. void dp_rx_desc_pool_free(struct dp_soc *soc,
  187. struct rx_desc_pool *rx_desc_pool)
  188. {
  189. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
  190. return;
  191. qdf_mem_multi_pages_free(soc->osdev,
  192. &rx_desc_pool->desc_pages, 0, true);
  193. }
  194. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  195. struct rx_desc_pool *rx_desc_pool)
  196. {
  197. qdf_spin_lock_bh(&rx_desc_pool->lock);
  198. rx_desc_pool->freelist = NULL;
  199. rx_desc_pool->pool_size = 0;
  200. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  201. qdf_spinlock_destroy(&rx_desc_pool->lock);
  202. }
  203. #else
  204. /*
  205. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  206. * rx descriptor pool
  207. *
  208. * @rx_desc_pool: rx descriptor pool pointer
  209. *
  210. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  211. * QDF_STATUS_E_NOMEM
  212. */
  213. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  214. {
  215. if (!rx_desc_pool->array) {
  216. dp_err("nss-wifi<4> skip Rx refil");
  217. return QDF_STATUS_E_NOMEM;
  218. }
  219. return QDF_STATUS_SUCCESS;
  220. }
  221. /*
  222. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  223. * descriptors
  224. *
  225. * @soc: core txrx main context
  226. * @num_elem: number of rx descriptors (size of the pool)
  227. * @rx_desc_pool: rx descriptor pool pointer
  228. *
  229. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  230. * QDF_STATUS_E_NOMEM
  231. * QDF_STATUS_E_FAULT
  232. */
  233. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  234. uint32_t pool_size,
  235. struct rx_desc_pool *rx_desc_pool)
  236. {
  237. rx_desc_pool->array = qdf_mem_malloc(pool_size *
  238. sizeof(union dp_rx_desc_list_elem_t));
  239. if (!(rx_desc_pool->array)) {
  240. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  241. "RX Desc Pool allocation failed");
  242. return QDF_STATUS_E_NOMEM;
  243. }
  244. return QDF_STATUS_SUCCESS;
  245. }
  246. /*
  247. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  248. * convert the pool of memory into a list of
  249. * rx descriptors and create locks to access this
  250. * list of rx descriptors.
  251. *
  252. * @soc: core txrx main context
  253. * @pool_id: pool_id which is one of 3 mac_ids
  254. * @pool_size: size of the rx descriptor pool
  255. * @rx_desc_pool: rx descriptor pool pointer
  256. */
  257. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  258. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  259. {
  260. int i;
  261. /* Initialize the lock */
  262. qdf_spinlock_create(&rx_desc_pool->lock);
  263. qdf_spin_lock_bh(&rx_desc_pool->lock);
  264. rx_desc_pool->pool_size = pool_size;
  265. /* link SW rx descs into a freelist */
  266. rx_desc_pool->freelist = &rx_desc_pool->array[0];
  267. qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
  268. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  269. if (i == rx_desc_pool->pool_size - 1)
  270. rx_desc_pool->array[i].next = NULL;
  271. else
  272. rx_desc_pool->array[i].next =
  273. &rx_desc_pool->array[i + 1];
  274. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  275. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  276. rx_desc_pool->array[i].rx_desc.in_use = 0;
  277. }
  278. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  279. }
  280. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  281. struct rx_desc_pool *rx_desc_pool)
  282. {
  283. qdf_nbuf_t nbuf;
  284. int i;
  285. qdf_spin_lock_bh(&rx_desc_pool->lock);
  286. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  287. if (rx_desc_pool->array[i].rx_desc.in_use) {
  288. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  289. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  290. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  291. false);
  292. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  293. QDF_DMA_FROM_DEVICE);
  294. }
  295. qdf_nbuf_free(nbuf);
  296. }
  297. }
  298. qdf_mem_free(rx_desc_pool->array);
  299. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  300. qdf_spinlock_destroy(&rx_desc_pool->lock);
  301. }
  302. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  303. struct rx_desc_pool *rx_desc_pool)
  304. {
  305. qdf_nbuf_t nbuf;
  306. int i;
  307. qdf_spin_lock_bh(&rx_desc_pool->lock);
  308. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  309. if (rx_desc_pool->array[i].rx_desc.in_use) {
  310. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  311. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  312. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  313. false);
  314. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  315. QDF_DMA_FROM_DEVICE);
  316. }
  317. qdf_nbuf_free(nbuf);
  318. }
  319. }
  320. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  321. }
  322. void dp_rx_desc_pool_free(struct dp_soc *soc,
  323. struct rx_desc_pool *rx_desc_pool)
  324. {
  325. qdf_mem_free(rx_desc_pool->array);
  326. }
  327. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  328. struct rx_desc_pool *rx_desc_pool)
  329. {
  330. qdf_spin_lock_bh(&rx_desc_pool->lock);
  331. rx_desc_pool->freelist = NULL;
  332. rx_desc_pool->pool_size = 0;
  333. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  334. qdf_spinlock_destroy(&rx_desc_pool->lock);
  335. }
  336. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  337. /*
  338. * dp_rx_get_free_desc_list() - provide a list of descriptors from
  339. * the free rx desc pool.
  340. *
  341. * @soc: core txrx main context
  342. * @pool_id: pool_id which is one of 3 mac_ids
  343. * @rx_desc_pool: rx descriptor pool pointer
  344. * @num_descs: number of descs requested from freelist
  345. * @desc_list: attach the descs to this list (output parameter)
  346. * @tail: attach the point to last desc of free list (output parameter)
  347. *
  348. * Return: number of descs allocated from free list.
  349. */
  350. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  351. struct rx_desc_pool *rx_desc_pool,
  352. uint16_t num_descs,
  353. union dp_rx_desc_list_elem_t **desc_list,
  354. union dp_rx_desc_list_elem_t **tail)
  355. {
  356. uint16_t count;
  357. qdf_spin_lock_bh(&rx_desc_pool->lock);
  358. *desc_list = *tail = rx_desc_pool->freelist;
  359. for (count = 0; count < num_descs; count++) {
  360. if (qdf_unlikely(!rx_desc_pool->freelist)) {
  361. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  362. return count;
  363. }
  364. *tail = rx_desc_pool->freelist;
  365. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  366. }
  367. (*tail)->next = NULL;
  368. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  369. return count;
  370. }
  371. /*
  372. * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
  373. * freelist.
  374. *
  375. * @soc: core txrx main context
  376. * @local_desc_list: local desc list provided by the caller
  377. * @tail: attach the point to last desc of local desc list
  378. * @pool_id: pool_id which is one of 3 mac_ids
  379. * @rx_desc_pool: rx descriptor pool pointer
  380. */
  381. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  382. union dp_rx_desc_list_elem_t **local_desc_list,
  383. union dp_rx_desc_list_elem_t **tail,
  384. uint16_t pool_id,
  385. struct rx_desc_pool *rx_desc_pool)
  386. {
  387. union dp_rx_desc_list_elem_t *temp_list = NULL;
  388. qdf_spin_lock_bh(&rx_desc_pool->lock);
  389. temp_list = rx_desc_pool->freelist;
  390. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  391. "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
  392. temp_list, *local_desc_list, *tail, (*tail)->next);
  393. rx_desc_pool->freelist = *local_desc_list;
  394. (*tail)->next = temp_list;
  395. *tail = NULL;
  396. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  397. }