dp_be_rx.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_BE_RX_H_
  20. #define _DP_BE_RX_H_
  21. #include <dp_types.h>
  22. #include "dp_be.h"
  23. #include "dp_peer.h"
  24. /*
  25. * dp_be_intrabss_params
  26. *
  27. * @dest_soc: dest soc to forward the packet to
  28. * @tx_vdev_id: vdev id retrieved from dest peer
  29. */
  30. struct dp_be_intrabss_params {
  31. struct dp_soc *dest_soc;
  32. uint8_t tx_vdev_id;
  33. };
  34. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  35. /*
  36. * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  37. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  38. * @soc: core txrx main context
  39. * @ta_txrx_peer: source peer entry
  40. * @rx_tlv_hdr: start address of rx tlvs
  41. * @nbuf: nbuf that has to be intrabss forwarded
  42. * @msdu_metadata: msdu metadata
  43. *
  44. * Return: true if it is forwarded else false
  45. */
  46. bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
  47. struct dp_txrx_peer *ta_txrx_peer,
  48. uint8_t *rx_tlv_hdr,
  49. qdf_nbuf_t nbuf,
  50. struct hal_rx_msdu_metadata msdu_metadata);
  51. #endif
  52. /*
  53. * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case
  54. * @soc: core txrx main context
  55. * @ta_txrx_peer: source txrx_peer entry
  56. * @nbuf_copy: nbuf that has to be intrabss forwarded
  57. * @tid_stats: tid_stats structure
  58. *
  59. * Return: true if it is forwarded else false
  60. */
  61. bool
  62. dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  63. qdf_nbuf_t nbuf_copy,
  64. struct cdp_tid_rx_stats *tid_stats);
  65. uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
  66. hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
  67. uint32_t quota);
  68. /**
  69. * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
  70. * @soc: Handle to DP Soc structure
  71. * @rx_desc_pool: Rx descriptor pool handler
  72. * @pool_id: Rx descriptor pool ID
  73. *
  74. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  75. */
  76. QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
  77. struct rx_desc_pool *rx_desc_pool,
  78. uint32_t pool_id);
  79. /**
  80. * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
  81. * @soc: Handle to DP Soc structure
  82. * @rx_desc_pool: Rx descriptor pool handler
  83. * @pool_id: Rx descriptor pool ID
  84. *
  85. * Return: None
  86. */
  87. void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
  88. struct rx_desc_pool *rx_desc_pool,
  89. uint32_t pool_id);
  90. /**
  91. * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
  92. * address from WBM ring Desc
  93. * @soc: Handle to DP Soc structure
  94. * @ring_desc: ring descriptor structure pointer
  95. * @r_rx_desc: pointer to a pointer of Rx Desc
  96. *
  97. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  98. */
  99. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  100. void *ring_desc,
  101. struct dp_rx_desc **r_rx_desc);
  102. /**
  103. * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
  104. * @soc:Handle to DP Soc structure
  105. * @cookie: cookie used to lookup virtual address
  106. *
  107. * Return: Rx descriptor virtual address
  108. */
  109. struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
  110. uint32_t cookie);
  111. #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
  112. defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
  113. /**
  114. * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
  115. if not, do SW cookie conversion.
  116. * @soc:Handle to DP Soc structure
  117. * @rx_buf_cookie: RX desc cookie ID
  118. * @r_rx_desc: double pointer for RX desc
  119. *
  120. * Return: None
  121. */
  122. static inline void
  123. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  124. uint32_t rx_buf_cookie,
  125. struct dp_rx_desc **r_rx_desc)
  126. {
  127. if (qdf_unlikely(!(*r_rx_desc))) {
  128. *r_rx_desc = (struct dp_rx_desc *)
  129. dp_cc_desc_find(soc,
  130. rx_buf_cookie);
  131. }
  132. }
  133. #else
  134. static inline void
  135. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  136. uint32_t rx_buf_cookie,
  137. struct dp_rx_desc **r_rx_desc)
  138. {
  139. }
  140. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
  141. #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata) (0)
  142. #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
  143. static inline uint16_t
  144. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  145. {
  146. struct htt_rx_peer_metadata_v1 *metadata =
  147. (struct htt_rx_peer_metadata_v1 *)&peer_metadata;
  148. uint16_t peer_id;
  149. peer_id = metadata->peer_id |
  150. (metadata->ml_peer_valid << soc->peer_id_shift);
  151. return peer_id;
  152. }
  153. #else
  154. /* Combine ml_peer_valid and peer_id field */
  155. #define DP_BE_PEER_METADATA_PEER_ID_MASK 0x00003fff
  156. #define DP_BE_PEER_METADATA_PEER_ID_SHIFT 0
  157. static inline uint16_t
  158. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  159. {
  160. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  161. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  162. }
  163. #endif
  164. static inline uint16_t
  165. dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  166. {
  167. struct htt_rx_peer_metadata_v1 *metadata =
  168. (struct htt_rx_peer_metadata_v1 *)&peer_metadata;
  169. return metadata->vdev_id;
  170. }
  171. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  172. /**
  173. * dp_rx_nf_process() - Near Full state handler for RX rings.
  174. * @int_ctx: interrupt context
  175. * @hal_ring_hdl: Rx ring handle
  176. * @reo_ring_num: RX ring number
  177. * @quota: Quota of work to be done
  178. *
  179. * Return: work done in the handler
  180. */
  181. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  182. hal_ring_handle_t hal_ring_hdl,
  183. uint8_t reo_ring_num,
  184. uint32_t quota);
  185. #else
  186. static inline
  187. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  188. hal_ring_handle_t hal_ring_hdl,
  189. uint8_t reo_ring_num,
  190. uint32_t quota)
  191. {
  192. return 0;
  193. }
  194. #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
  195. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  196. struct dp_soc *
  197. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id);
  198. #else
  199. static inline struct dp_soc *
  200. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
  201. {
  202. return soc;
  203. }
  204. #endif
  205. #ifdef WLAN_FEATURE_11BE_MLO
  206. /**
  207. * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
  208. * @soc: Handle to DP Soc structure
  209. * @vdev: DP vdev handle
  210. * @peer: DP peer handle
  211. * @nbuf: nbuf to be enqueued
  212. *
  213. * Return: true when packet sent to stack, false failure
  214. */
  215. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  216. struct dp_vdev *vdev,
  217. struct dp_txrx_peer *peer,
  218. qdf_nbuf_t nbuf);
  219. /**
  220. * dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
  221. per peer type
  222. * @soc: DP Soc handle
  223. * @peer: dp peer to operate on
  224. * @tid: TID
  225. * @ba_window_size: BlockAck window size
  226. *
  227. * Return: 0 - success, others - failure
  228. */
  229. static inline
  230. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  231. struct dp_peer *peer,
  232. int tid,
  233. uint32_t ba_window_size)
  234. {
  235. uint8_t i;
  236. struct dp_mld_link_peers link_peers_info;
  237. struct dp_peer *link_peer;
  238. struct dp_rx_tid *rx_tid;
  239. struct dp_soc *link_peer_soc;
  240. rx_tid = &peer->rx_tid[tid];
  241. if (!rx_tid->hw_qdesc_paddr)
  242. return QDF_STATUS_E_INVAL;
  243. if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
  244. if (IS_MLO_DP_MLD_PEER(peer)) {
  245. /* get link peers with reference */
  246. dp_get_link_peers_ref_from_mld_peer(soc, peer,
  247. &link_peers_info,
  248. DP_MOD_ID_CDP);
  249. /* send WMI cmd to each link peers */
  250. for (i = 0; i < link_peers_info.num_links; i++) {
  251. link_peer = link_peers_info.link_peers[i];
  252. link_peer_soc = link_peer->vdev->pdev->soc;
  253. if (link_peer_soc->cdp_soc.ol_ops->
  254. peer_rx_reorder_queue_setup) {
  255. if (link_peer_soc->cdp_soc.ol_ops->
  256. peer_rx_reorder_queue_setup(
  257. link_peer_soc->ctrl_psoc,
  258. link_peer->vdev->pdev->pdev_id,
  259. link_peer->vdev->vdev_id,
  260. link_peer->mac_addr.raw,
  261. rx_tid->hw_qdesc_paddr,
  262. tid, tid,
  263. 1, ba_window_size)) {
  264. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  265. link_peer_soc, tid);
  266. return QDF_STATUS_E_FAILURE;
  267. }
  268. }
  269. }
  270. /* release link peers reference */
  271. dp_release_link_peers_ref(&link_peers_info,
  272. DP_MOD_ID_CDP);
  273. } else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
  274. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  275. if (soc->cdp_soc.ol_ops->
  276. peer_rx_reorder_queue_setup(
  277. soc->ctrl_psoc,
  278. peer->vdev->pdev->pdev_id,
  279. peer->vdev->vdev_id,
  280. peer->mac_addr.raw,
  281. rx_tid->hw_qdesc_paddr,
  282. tid, tid,
  283. 1, ba_window_size)) {
  284. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  285. soc, tid);
  286. return QDF_STATUS_E_FAILURE;
  287. }
  288. }
  289. } else {
  290. dp_peer_err("invalid peer type %d", peer->peer_type);
  291. return QDF_STATUS_E_FAILURE;
  292. }
  293. } else {
  294. /* Some BE targets dont require WMI and use shared
  295. * table managed by host for storing Reo queue ref structs
  296. */
  297. if (IS_MLO_DP_LINK_PEER(peer) ||
  298. peer->peer_id == HTT_INVALID_PEER) {
  299. /* Return if this is for MLD link peer and table
  300. * is not used in MLD link peer case as MLD peer's
  301. * qref is written to LUT in peer setup or peer map.
  302. * At this point peer setup for link peer is called
  303. * before peer map, hence peer id is not assigned.
  304. * This could happen if peer_setup is called before
  305. * host receives HTT peer map. In this case return
  306. * success with no op and let peer map handle
  307. * writing the reo_qref to LUT.
  308. */
  309. dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
  310. return QDF_STATUS_SUCCESS;
  311. }
  312. hal_reo_shared_qaddr_write(soc->hal_soc,
  313. peer->peer_id,
  314. tid, peer->rx_tid[tid].hw_qdesc_paddr);
  315. }
  316. return QDF_STATUS_SUCCESS;
  317. }
  318. #else
  319. static inline
  320. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  321. struct dp_peer *peer,
  322. int tid,
  323. uint32_t ba_window_size)
  324. {
  325. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  326. if (!rx_tid->hw_qdesc_paddr)
  327. return QDF_STATUS_E_INVAL;
  328. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  329. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  330. soc->ctrl_psoc,
  331. peer->vdev->pdev->pdev_id,
  332. peer->vdev->vdev_id,
  333. peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
  334. 1, ba_window_size)) {
  335. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  336. soc, tid);
  337. return QDF_STATUS_E_FAILURE;
  338. }
  339. }
  340. return QDF_STATUS_SUCCESS;
  341. }
  342. #endif /* WLAN_FEATURE_11BE_MLO */
  343. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  344. static inline
  345. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  346. {
  347. if (next) {
  348. /* prefetch skb->next and first few bytes of skb->cb */
  349. qdf_prefetch(next);
  350. /* skb->cb spread across 2 cache lines hence below prefetch */
  351. qdf_prefetch(&next->_skb_refdst);
  352. qdf_prefetch(&next->len);
  353. qdf_prefetch(&next->protocol);
  354. qdf_prefetch(next->data);
  355. qdf_prefetch(next->data + 64);
  356. qdf_prefetch(next->data + 128);
  357. }
  358. }
  359. #else
  360. static inline
  361. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  362. {
  363. }
  364. #endif
  365. #endif