dp_rx_desc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_ipa.h"
  21. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  22. A_COMPILE_TIME_ASSERT(cookie_size_check,
  23. PAGE_SIZE / sizeof(union dp_rx_desc_list_elem_t) <=
  24. 1 << DP_RX_DESC_PAGE_ID_SHIFT);
  25. /*
  26. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  27. * rx descriptor pool
  28. *
  29. * @rx_desc_pool: rx descriptor pool pointer
  30. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  31. * QDF_STATUS_E_NOMEM
  32. */
  33. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  34. {
  35. if (!rx_desc_pool->desc_pages.num_pages) {
  36. dp_err("Multi page alloc fail, size=%d, elem=%d",
  37. rx_desc_pool->elem_size, rx_desc_pool->pool_size);
  38. return QDF_STATUS_E_NOMEM;
  39. }
  40. return QDF_STATUS_SUCCESS;
  41. }
  42. /*
  43. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  44. * descriptors
  45. *
  46. * @soc: core txrx main context
  47. * @num_elem: number of rx descriptors (size of the pool)
  48. * @rx_desc_pool: rx descriptor pool pointer
  49. *
  50. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  51. * QDF_STATUS_E_NOMEM
  52. * QDF_STATUS_E_FAULT
  53. */
  54. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  55. uint32_t num_elem,
  56. struct rx_desc_pool *rx_desc_pool)
  57. {
  58. uint32_t desc_size;
  59. union dp_rx_desc_list_elem_t *rx_desc_elem;
  60. desc_size = sizeof(*rx_desc_elem);
  61. rx_desc_pool->elem_size = desc_size;
  62. dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
  63. &rx_desc_pool->desc_pages,
  64. desc_size, num_elem, 0, true);
  65. if (!rx_desc_pool->desc_pages.num_pages) {
  66. qdf_err("Multi page alloc fail,size=%d, elem=%d",
  67. desc_size, num_elem);
  68. return QDF_STATUS_E_NOMEM;
  69. }
  70. if (qdf_mem_multi_page_link(soc->osdev,
  71. &rx_desc_pool->desc_pages,
  72. desc_size, num_elem, true)) {
  73. qdf_err("overflow num link,size=%d, elem=%d",
  74. desc_size, num_elem);
  75. goto free_rx_desc_pool;
  76. }
  77. return QDF_STATUS_SUCCESS;
  78. free_rx_desc_pool:
  79. dp_rx_desc_pool_free(soc, rx_desc_pool);
  80. return QDF_STATUS_E_FAULT;
  81. }
  82. /*
  83. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  84. * convert the pool of memory into a list of
  85. * rx descriptors and create locks to access this
  86. * list of rx descriptors.
  87. *
  88. * @soc: core txrx main context
  89. * @pool_id: pool_id which is one of 3 mac_ids
  90. * @pool_size: size of the rx descriptor pool
  91. * @rx_desc_pool: rx descriptor pool pointer
  92. */
  93. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  94. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  95. {
  96. uint32_t id, page_id, offset, num_desc_per_page;
  97. uint32_t count = 0;
  98. union dp_rx_desc_list_elem_t *rx_desc_elem;
  99. /* Initialize the lock */
  100. qdf_spinlock_create(&rx_desc_pool->lock);
  101. qdf_spin_lock_bh(&rx_desc_pool->lock);
  102. rx_desc_pool->pool_size = pool_size;
  103. num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
  104. rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
  105. *rx_desc_pool->desc_pages.cacheable_pages;
  106. rx_desc_elem = rx_desc_pool->freelist;
  107. while (rx_desc_elem) {
  108. page_id = count / num_desc_per_page;
  109. offset = count % num_desc_per_page;
  110. /*
  111. * Below cookie size is from REO destination ring
  112. * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
  113. * cookie size = 21 bits
  114. * 8 bits - offset
  115. * 8 bits - page ID
  116. * 4 bits - pool ID
  117. */
  118. id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
  119. (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
  120. offset);
  121. rx_desc_elem->rx_desc.cookie = id;
  122. rx_desc_elem->rx_desc.pool_id = pool_id;
  123. rx_desc_elem->rx_desc.in_use = 0;
  124. rx_desc_elem = rx_desc_elem->next;
  125. count++;
  126. }
  127. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  128. }
  129. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  130. struct rx_desc_pool *rx_desc_pool)
  131. {
  132. return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  133. rx_desc_pool->elem_size * offset;
  134. }
  135. static QDF_STATUS __dp_rx_desc_nbuf_free(struct dp_soc *soc,
  136. struct rx_desc_pool *rx_desc_pool)
  137. {
  138. uint32_t i, num_desc, page_id, offset, num_desc_per_page;
  139. union dp_rx_desc_list_elem_t *rx_desc_elem;
  140. struct dp_rx_desc *rx_desc;
  141. qdf_nbuf_t nbuf;
  142. if (qdf_unlikely(!(rx_desc_pool->
  143. desc_pages.cacheable_pages))) {
  144. qdf_err("No pages found on this desc pool");
  145. return QDF_STATUS_E_INVAL;
  146. }
  147. num_desc = rx_desc_pool->pool_size;
  148. num_desc_per_page =
  149. rx_desc_pool->desc_pages.num_element_per_page;
  150. for (i = 0; i < num_desc; i++) {
  151. page_id = i / num_desc_per_page;
  152. offset = i % num_desc_per_page;
  153. rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
  154. rx_desc = &rx_desc_elem->rx_desc;
  155. dp_rx_desc_free_dbg_info(rx_desc);
  156. if (rx_desc->in_use) {
  157. nbuf = rx_desc->nbuf;
  158. if (!rx_desc->unmapped) {
  159. dp_ipa_handle_rx_buf_smmu_mapping(
  160. soc, nbuf,
  161. rx_desc_pool->buf_size,
  162. false);
  163. qdf_nbuf_unmap_nbytes_single(
  164. soc->osdev,
  165. rx_desc->nbuf,
  166. QDF_DMA_BIDIRECTIONAL,
  167. rx_desc_pool->buf_size);
  168. rx_desc->unmapped = 1;
  169. }
  170. qdf_nbuf_free(nbuf);
  171. }
  172. }
  173. return QDF_STATUS_SUCCESS;
  174. }
  175. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  176. struct rx_desc_pool *rx_desc_pool)
  177. {
  178. QDF_STATUS qdf_status;
  179. qdf_spin_lock_bh(&rx_desc_pool->lock);
  180. qdf_status = __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  181. if (QDF_IS_STATUS_SUCCESS(qdf_status))
  182. dp_rx_desc_pool_free(soc, rx_desc_pool);
  183. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  184. qdf_spinlock_destroy(&rx_desc_pool->lock);
  185. }
  186. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  187. struct rx_desc_pool *rx_desc_pool)
  188. {
  189. qdf_spin_lock_bh(&rx_desc_pool->lock);
  190. __dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  191. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  192. }
  193. void dp_rx_desc_pool_free(struct dp_soc *soc,
  194. struct rx_desc_pool *rx_desc_pool)
  195. {
  196. if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
  197. return;
  198. dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
  199. &rx_desc_pool->desc_pages, 0, true);
  200. }
  201. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  202. struct rx_desc_pool *rx_desc_pool)
  203. {
  204. qdf_spin_lock_bh(&rx_desc_pool->lock);
  205. rx_desc_pool->freelist = NULL;
  206. rx_desc_pool->pool_size = 0;
  207. /* Deinitialize rx mon desr frag flag */
  208. rx_desc_pool->rx_mon_dest_frag_enable = false;
  209. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  210. qdf_spinlock_destroy(&rx_desc_pool->lock);
  211. }
  212. #else
  213. /*
  214. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  215. * rx descriptor pool
  216. *
  217. * @rx_desc_pool: rx descriptor pool pointer
  218. *
  219. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  220. * QDF_STATUS_E_NOMEM
  221. */
  222. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
  223. {
  224. if (!rx_desc_pool->array) {
  225. dp_err("nss-wifi<4> skip Rx refil");
  226. return QDF_STATUS_E_NOMEM;
  227. }
  228. return QDF_STATUS_SUCCESS;
  229. }
  230. /*
  231. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  232. * descriptors
  233. *
  234. * @soc: core txrx main context
  235. * @num_elem: number of rx descriptors (size of the pool)
  236. * @rx_desc_pool: rx descriptor pool pointer
  237. *
  238. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  239. * QDF_STATUS_E_NOMEM
  240. * QDF_STATUS_E_FAULT
  241. */
  242. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  243. uint32_t pool_size,
  244. struct rx_desc_pool *rx_desc_pool)
  245. {
  246. rx_desc_pool->array = qdf_mem_malloc(pool_size *
  247. sizeof(union dp_rx_desc_list_elem_t));
  248. if (!(rx_desc_pool->array)) {
  249. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  250. "RX Desc Pool allocation failed");
  251. return QDF_STATUS_E_NOMEM;
  252. }
  253. return QDF_STATUS_SUCCESS;
  254. }
  255. /*
  256. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  257. * convert the pool of memory into a list of
  258. * rx descriptors and create locks to access this
  259. * list of rx descriptors.
  260. *
  261. * @soc: core txrx main context
  262. * @pool_id: pool_id which is one of 3 mac_ids
  263. * @pool_size: size of the rx descriptor pool
  264. * @rx_desc_pool: rx descriptor pool pointer
  265. */
  266. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  267. uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
  268. {
  269. int i;
  270. /* Initialize the lock */
  271. qdf_spinlock_create(&rx_desc_pool->lock);
  272. qdf_spin_lock_bh(&rx_desc_pool->lock);
  273. rx_desc_pool->pool_size = pool_size;
  274. /* link SW rx descs into a freelist */
  275. rx_desc_pool->freelist = &rx_desc_pool->array[0];
  276. qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
  277. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  278. if (i == rx_desc_pool->pool_size - 1)
  279. rx_desc_pool->array[i].next = NULL;
  280. else
  281. rx_desc_pool->array[i].next =
  282. &rx_desc_pool->array[i + 1];
  283. rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
  284. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  285. rx_desc_pool->array[i].rx_desc.in_use = 0;
  286. }
  287. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  288. }
  289. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  290. struct rx_desc_pool *rx_desc_pool)
  291. {
  292. qdf_nbuf_t nbuf;
  293. int i;
  294. qdf_spin_lock_bh(&rx_desc_pool->lock);
  295. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  296. if (rx_desc_pool->array[i].rx_desc.in_use) {
  297. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  298. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  299. dp_ipa_handle_rx_buf_smmu_mapping(
  300. soc, nbuf,
  301. rx_desc_pool->buf_size,
  302. false);
  303. qdf_nbuf_unmap_nbytes_single(
  304. soc->osdev, nbuf,
  305. QDF_DMA_FROM_DEVICE,
  306. rx_desc_pool->buf_size);
  307. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  308. }
  309. qdf_nbuf_free(nbuf);
  310. }
  311. }
  312. qdf_mem_free(rx_desc_pool->array);
  313. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  314. qdf_spinlock_destroy(&rx_desc_pool->lock);
  315. }
  316. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  317. struct rx_desc_pool *rx_desc_pool)
  318. {
  319. qdf_nbuf_t nbuf;
  320. int i;
  321. qdf_spin_lock_bh(&rx_desc_pool->lock);
  322. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  323. if (rx_desc_pool->array[i].rx_desc.in_use) {
  324. nbuf = rx_desc_pool->array[i].rx_desc.nbuf;
  325. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  326. dp_ipa_handle_rx_buf_smmu_mapping(
  327. soc, nbuf,
  328. rx_desc_pool->buf_size,
  329. false);
  330. qdf_nbuf_unmap_nbytes_single(
  331. soc->osdev, nbuf,
  332. QDF_DMA_FROM_DEVICE,
  333. rx_desc_pool->buf_size);
  334. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  335. }
  336. qdf_nbuf_free(nbuf);
  337. }
  338. }
  339. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  340. }
  341. /**
  342. * dp_rx_desc_frag_free() - Free desc frag buffer
  343. *
  344. * @soc: core txrx main context
  345. * @rx_desc_pool: rx descriptor pool pointer
  346. *
  347. * Return: None
  348. */
  349. #ifdef DP_RX_MON_MEM_FRAG
  350. void dp_rx_desc_frag_free(struct dp_soc *soc,
  351. struct rx_desc_pool *rx_desc_pool)
  352. {
  353. qdf_dma_addr_t paddr;
  354. qdf_frag_t vaddr;
  355. int i;
  356. qdf_spin_lock_bh(&rx_desc_pool->lock);
  357. for (i = 0; i < rx_desc_pool->pool_size; i++) {
  358. if (rx_desc_pool->array[i].rx_desc.in_use) {
  359. paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
  360. vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
  361. if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
  362. qdf_mem_unmap_page(soc->osdev, paddr,
  363. rx_desc_pool->buf_size,
  364. QDF_DMA_FROM_DEVICE);
  365. rx_desc_pool->array[i].rx_desc.unmapped = 1;
  366. }
  367. qdf_frag_free(vaddr);
  368. }
  369. }
  370. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  371. }
  372. #endif
  373. void dp_rx_desc_pool_free(struct dp_soc *soc,
  374. struct rx_desc_pool *rx_desc_pool)
  375. {
  376. qdf_mem_free(rx_desc_pool->array);
  377. }
  378. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  379. struct rx_desc_pool *rx_desc_pool)
  380. {
  381. qdf_spin_lock_bh(&rx_desc_pool->lock);
  382. rx_desc_pool->freelist = NULL;
  383. rx_desc_pool->pool_size = 0;
  384. /* Deinitialize rx mon desr frag flag */
  385. rx_desc_pool->rx_mon_dest_frag_enable = false;
  386. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  387. qdf_spinlock_destroy(&rx_desc_pool->lock);
  388. }
  389. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  390. /*
  391. * dp_rx_get_free_desc_list() - provide a list of descriptors from
  392. * the free rx desc pool.
  393. *
  394. * @soc: core txrx main context
  395. * @pool_id: pool_id which is one of 3 mac_ids
  396. * @rx_desc_pool: rx descriptor pool pointer
  397. * @num_descs: number of descs requested from freelist
  398. * @desc_list: attach the descs to this list (output parameter)
  399. * @tail: attach the point to last desc of free list (output parameter)
  400. *
  401. * Return: number of descs allocated from free list.
  402. */
  403. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  404. struct rx_desc_pool *rx_desc_pool,
  405. uint16_t num_descs,
  406. union dp_rx_desc_list_elem_t **desc_list,
  407. union dp_rx_desc_list_elem_t **tail)
  408. {
  409. uint16_t count;
  410. qdf_spin_lock_bh(&rx_desc_pool->lock);
  411. *desc_list = *tail = rx_desc_pool->freelist;
  412. for (count = 0; count < num_descs; count++) {
  413. if (qdf_unlikely(!rx_desc_pool->freelist)) {
  414. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  415. return count;
  416. }
  417. *tail = rx_desc_pool->freelist;
  418. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  419. }
  420. (*tail)->next = NULL;
  421. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  422. return count;
  423. }
  424. /*
  425. * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
  426. * freelist.
  427. *
  428. * @soc: core txrx main context
  429. * @local_desc_list: local desc list provided by the caller
  430. * @tail: attach the point to last desc of local desc list
  431. * @pool_id: pool_id which is one of 3 mac_ids
  432. * @rx_desc_pool: rx descriptor pool pointer
  433. */
  434. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  435. union dp_rx_desc_list_elem_t **local_desc_list,
  436. union dp_rx_desc_list_elem_t **tail,
  437. uint16_t pool_id,
  438. struct rx_desc_pool *rx_desc_pool)
  439. {
  440. union dp_rx_desc_list_elem_t *temp_list = NULL;
  441. qdf_spin_lock_bh(&rx_desc_pool->lock);
  442. temp_list = rx_desc_pool->freelist;
  443. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  444. "temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
  445. temp_list, *local_desc_list, *tail, (*tail)->next);
  446. rx_desc_pool->freelist = *local_desc_list;
  447. (*tail)->next = temp_list;
  448. *tail = NULL;
  449. *local_desc_list = NULL;
  450. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  451. }