dp_be_rx.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_BE_RX_H_
  20. #define _DP_BE_RX_H_
  21. #include <dp_types.h>
  22. #include "dp_be.h"
  23. #include "dp_peer.h"
  24. #include <dp_rx.h>
  25. #include "hal_be_rx.h"
  26. /*
  27. * dp_be_intrabss_params
  28. *
  29. * @dest_soc: dest soc to forward the packet to
  30. * @tx_vdev_id: vdev id retrieved from dest peer
  31. */
  32. struct dp_be_intrabss_params {
  33. struct dp_soc *dest_soc;
  34. uint8_t tx_vdev_id;
  35. };
  36. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  37. /*
  38. * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  39. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  40. * @soc: core txrx main context
  41. * @ta_txrx_peer: source peer entry
  42. * @rx_tlv_hdr: start address of rx tlvs
  43. * @nbuf: nbuf that has to be intrabss forwarded
  44. * @msdu_metadata: msdu metadata
  45. *
  46. * Return: true if it is forwarded else false
  47. */
  48. bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
  49. struct dp_txrx_peer *ta_txrx_peer,
  50. uint8_t *rx_tlv_hdr,
  51. qdf_nbuf_t nbuf,
  52. struct hal_rx_msdu_metadata msdu_metadata);
  53. #endif
  54. /*
  55. * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case
  56. * @soc: core txrx main context
  57. * @ta_txrx_peer: source txrx_peer entry
  58. * @nbuf_copy: nbuf that has to be intrabss forwarded
  59. * @tid_stats: tid_stats structure
  60. *
  61. * Return: true if it is forwarded else false
  62. */
  63. bool
  64. dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  65. qdf_nbuf_t nbuf_copy,
  66. struct cdp_tid_rx_stats *tid_stats);
  67. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  68. uint32_t *msg_word,
  69. void *rx_filter);
  70. uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
  71. hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
  72. uint32_t quota);
  73. /**
  74. * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
  75. * @soc: Handle to DP Soc structure
  76. * @rx_desc_pool: Rx descriptor pool handler
  77. * @pool_id: Rx descriptor pool ID
  78. *
  79. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  80. */
  81. QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
  82. struct rx_desc_pool *rx_desc_pool,
  83. uint32_t pool_id);
  84. /**
  85. * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
  86. * @soc: Handle to DP Soc structure
  87. * @rx_desc_pool: Rx descriptor pool handler
  88. * @pool_id: Rx descriptor pool ID
  89. *
  90. * Return: None
  91. */
  92. void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
  93. struct rx_desc_pool *rx_desc_pool,
  94. uint32_t pool_id);
  95. /**
  96. * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
  97. * address from WBM ring Desc
  98. * @soc: Handle to DP Soc structure
  99. * @ring_desc: ring descriptor structure pointer
  100. * @r_rx_desc: pointer to a pointer of Rx Desc
  101. *
  102. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  103. */
  104. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  105. void *ring_desc,
  106. struct dp_rx_desc **r_rx_desc);
  107. /**
  108. * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
  109. * @soc:Handle to DP Soc structure
  110. * @cookie: cookie used to lookup virtual address
  111. *
  112. * Return: Rx descriptor virtual address
  113. */
  114. struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
  115. uint32_t cookie);
  116. #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
  117. defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
  118. /**
  119. * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
  120. if not, do SW cookie conversion.
  121. * @soc:Handle to DP Soc structure
  122. * @rx_buf_cookie: RX desc cookie ID
  123. * @r_rx_desc: double pointer for RX desc
  124. *
  125. * Return: None
  126. */
  127. static inline void
  128. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  129. uint32_t rx_buf_cookie,
  130. struct dp_rx_desc **r_rx_desc)
  131. {
  132. if (qdf_unlikely(!(*r_rx_desc))) {
  133. *r_rx_desc = (struct dp_rx_desc *)
  134. dp_cc_desc_find(soc,
  135. rx_buf_cookie);
  136. }
  137. }
  138. #else
  139. static inline void
  140. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  141. uint32_t rx_buf_cookie,
  142. struct dp_rx_desc **r_rx_desc)
  143. {
  144. }
  145. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
  146. #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata) (0)
  147. #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
  148. static inline uint16_t
  149. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  150. {
  151. struct htt_rx_peer_metadata_v1 *metadata =
  152. (struct htt_rx_peer_metadata_v1 *)&peer_metadata;
  153. uint16_t peer_id;
  154. peer_id = metadata->peer_id |
  155. (metadata->ml_peer_valid << soc->peer_id_shift);
  156. return peer_id;
  157. }
  158. #else
  159. /* Combine ml_peer_valid and peer_id field */
  160. #define DP_BE_PEER_METADATA_PEER_ID_MASK 0x00003fff
  161. #define DP_BE_PEER_METADATA_PEER_ID_SHIFT 0
  162. static inline uint16_t
  163. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  164. {
  165. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  166. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  167. }
  168. #endif
  169. static inline uint16_t
  170. dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  171. {
  172. struct htt_rx_peer_metadata_v1 *metadata =
  173. (struct htt_rx_peer_metadata_v1 *)&peer_metadata;
  174. return metadata->vdev_id;
  175. }
  176. static inline uint8_t
  177. dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
  178. {
  179. return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
  180. }
  181. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  182. /**
  183. * dp_rx_nf_process() - Near Full state handler for RX rings.
  184. * @int_ctx: interrupt context
  185. * @hal_ring_hdl: Rx ring handle
  186. * @reo_ring_num: RX ring number
  187. * @quota: Quota of work to be done
  188. *
  189. * Return: work done in the handler
  190. */
  191. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  192. hal_ring_handle_t hal_ring_hdl,
  193. uint8_t reo_ring_num,
  194. uint32_t quota);
  195. #else
  196. static inline
  197. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  198. hal_ring_handle_t hal_ring_hdl,
  199. uint8_t reo_ring_num,
  200. uint32_t quota)
  201. {
  202. return 0;
  203. }
  204. #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
  205. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  206. struct dp_soc *
  207. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id);
  208. #else
  209. static inline struct dp_soc *
  210. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
  211. {
  212. return soc;
  213. }
  214. #endif
  215. #ifdef WLAN_FEATURE_11BE_MLO
  216. /**
  217. * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
  218. * @soc: Handle to DP Soc structure
  219. * @vdev: DP vdev handle
  220. * @peer: DP peer handle
  221. * @nbuf: nbuf to be enqueued
  222. *
  223. * Return: true when packet sent to stack, false failure
  224. */
  225. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  226. struct dp_vdev *vdev,
  227. struct dp_txrx_peer *peer,
  228. qdf_nbuf_t nbuf);
  229. /**
  230. * dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
  231. per peer type
  232. * @soc: DP Soc handle
  233. * @peer: dp peer to operate on
  234. * @tid: TID
  235. * @ba_window_size: BlockAck window size
  236. *
  237. * Return: 0 - success, others - failure
  238. */
  239. static inline
  240. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  241. struct dp_peer *peer,
  242. int tid,
  243. uint32_t ba_window_size)
  244. {
  245. uint8_t i;
  246. struct dp_mld_link_peers link_peers_info;
  247. struct dp_peer *link_peer;
  248. struct dp_rx_tid *rx_tid;
  249. struct dp_soc *link_peer_soc;
  250. rx_tid = &peer->rx_tid[tid];
  251. if (!rx_tid->hw_qdesc_paddr)
  252. return QDF_STATUS_E_INVAL;
  253. if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
  254. if (IS_MLO_DP_MLD_PEER(peer)) {
  255. /* get link peers with reference */
  256. dp_get_link_peers_ref_from_mld_peer(soc, peer,
  257. &link_peers_info,
  258. DP_MOD_ID_CDP);
  259. /* send WMI cmd to each link peers */
  260. for (i = 0; i < link_peers_info.num_links; i++) {
  261. link_peer = link_peers_info.link_peers[i];
  262. link_peer_soc = link_peer->vdev->pdev->soc;
  263. if (link_peer_soc->cdp_soc.ol_ops->
  264. peer_rx_reorder_queue_setup) {
  265. if (link_peer_soc->cdp_soc.ol_ops->
  266. peer_rx_reorder_queue_setup(
  267. link_peer_soc->ctrl_psoc,
  268. link_peer->vdev->pdev->pdev_id,
  269. link_peer->vdev->vdev_id,
  270. link_peer->mac_addr.raw,
  271. rx_tid->hw_qdesc_paddr,
  272. tid, tid,
  273. 1, ba_window_size)) {
  274. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  275. link_peer_soc, tid);
  276. return QDF_STATUS_E_FAILURE;
  277. }
  278. }
  279. }
  280. /* release link peers reference */
  281. dp_release_link_peers_ref(&link_peers_info,
  282. DP_MOD_ID_CDP);
  283. } else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
  284. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  285. if (soc->cdp_soc.ol_ops->
  286. peer_rx_reorder_queue_setup(
  287. soc->ctrl_psoc,
  288. peer->vdev->pdev->pdev_id,
  289. peer->vdev->vdev_id,
  290. peer->mac_addr.raw,
  291. rx_tid->hw_qdesc_paddr,
  292. tid, tid,
  293. 1, ba_window_size)) {
  294. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  295. soc, tid);
  296. return QDF_STATUS_E_FAILURE;
  297. }
  298. }
  299. } else {
  300. dp_peer_err("invalid peer type %d", peer->peer_type);
  301. return QDF_STATUS_E_FAILURE;
  302. }
  303. } else {
  304. /* Some BE targets dont require WMI and use shared
  305. * table managed by host for storing Reo queue ref structs
  306. */
  307. if (IS_MLO_DP_LINK_PEER(peer) ||
  308. peer->peer_id == HTT_INVALID_PEER) {
  309. /* Return if this is for MLD link peer and table
  310. * is not used in MLD link peer case as MLD peer's
  311. * qref is written to LUT in peer setup or peer map.
  312. * At this point peer setup for link peer is called
  313. * before peer map, hence peer id is not assigned.
  314. * This could happen if peer_setup is called before
  315. * host receives HTT peer map. In this case return
  316. * success with no op and let peer map handle
  317. * writing the reo_qref to LUT.
  318. */
  319. dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
  320. return QDF_STATUS_SUCCESS;
  321. }
  322. hal_reo_shared_qaddr_write(soc->hal_soc,
  323. peer->peer_id,
  324. tid, peer->rx_tid[tid].hw_qdesc_paddr);
  325. }
  326. return QDF_STATUS_SUCCESS;
  327. }
  328. #else
  329. static inline
  330. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  331. struct dp_peer *peer,
  332. int tid,
  333. uint32_t ba_window_size)
  334. {
  335. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  336. if (!rx_tid->hw_qdesc_paddr)
  337. return QDF_STATUS_E_INVAL;
  338. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  339. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  340. soc->ctrl_psoc,
  341. peer->vdev->pdev->pdev_id,
  342. peer->vdev->vdev_id,
  343. peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
  344. 1, ba_window_size)) {
  345. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  346. soc, tid);
  347. return QDF_STATUS_E_FAILURE;
  348. }
  349. }
  350. return QDF_STATUS_SUCCESS;
  351. }
  352. #endif /* WLAN_FEATURE_11BE_MLO */
  353. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  354. static inline
  355. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  356. {
  357. if (next) {
  358. /* prefetch skb->next and first few bytes of skb->cb */
  359. qdf_prefetch(next);
  360. /* skb->cb spread across 2 cache lines hence below prefetch */
  361. qdf_prefetch(&next->_skb_refdst);
  362. qdf_prefetch(&next->len);
  363. qdf_prefetch(&next->protocol);
  364. qdf_prefetch(next->data);
  365. qdf_prefetch(next->data + 64);
  366. qdf_prefetch(next->data + 128);
  367. }
  368. }
  369. #else
  370. static inline
  371. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  372. {
  373. }
  374. #endif
  375. #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
  376. /**
  377. * dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc
  378. * @soc: Handle to DP Soc structure
  379. * @cookie: cookie used to lookup virtual address
  380. *
  381. * Return: prefetched Rx descriptor virtual address
  382. */
  383. static inline
  384. void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
  385. {
  386. void *prefetch_desc;
  387. prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
  388. qdf_prefetch(prefetch_desc);
  389. return prefetch_desc;
  390. }
  391. /**
  392. * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
  393. * @soc: Handle to HAL Soc structure
  394. * @num_entries: valid number of HW descriptors
  395. * @hal_ring_hdl: Destination ring pointer
  396. * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
  397. * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
  398. *
  399. * Return: None
  400. */
  401. static inline void
  402. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  403. hal_soc_handle_t hal_soc,
  404. uint32_t num_entries,
  405. hal_ring_handle_t hal_ring_hdl,
  406. hal_ring_desc_t *last_prefetched_hw_desc,
  407. struct dp_rx_desc **last_prefetched_sw_desc)
  408. {
  409. if (*last_prefetched_sw_desc) {
  410. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
  411. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
  412. }
  413. if (num_entries) {
  414. *last_prefetched_sw_desc =
  415. dp_rx_va_prefetch(*last_prefetched_hw_desc);
  416. if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
  417. *last_prefetched_hw_desc =
  418. hal_srng_dst_prefetch_next_cached_desc(hal_soc,
  419. hal_ring_hdl,
  420. (uint8_t *)*last_prefetched_hw_desc);
  421. else
  422. *last_prefetched_hw_desc =
  423. hal_srng_dst_get_next_32_byte_desc(hal_soc,
  424. hal_ring_hdl,
  425. (uint8_t *)*last_prefetched_hw_desc);
  426. }
  427. }
  428. #else
  429. static inline void
  430. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  431. hal_soc_handle_t hal_soc,
  432. uint32_t num_entries,
  433. hal_ring_handle_t hal_ring_hdl,
  434. hal_ring_desc_t *last_prefetched_hw_desc,
  435. struct dp_rx_desc **last_prefetched_sw_desc)
  436. {
  437. }
  438. #endif
  439. #endif