dp_be_rx.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_BE_RX_H_
  20. #define _DP_BE_RX_H_
  21. #include <dp_types.h>
  22. #include "dp_be.h"
  23. #include "dp_peer.h"
  24. #include <dp_rx.h>
  25. #include "hal_be_rx.h"
  26. #include "hal_be_rx_tlv.h"
  27. /*
  28. * dp_be_intrabss_params
  29. *
  30. * @dest_soc: dest soc to forward the packet to
  31. * @tx_vdev_id: vdev id retrieved from dest peer
  32. */
  33. struct dp_be_intrabss_params {
  34. struct dp_soc *dest_soc;
  35. uint8_t tx_vdev_id;
  36. };
  37. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  38. /**
  39. * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  40. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  41. * @soc: core txrx main context
  42. * @ta_txrx_peer: source peer entry
  43. * @rx_tlv_hdr: start address of rx tlvs
  44. * @nbuf: nbuf that has to be intrabss forwarded
  45. * @msdu_metadata: msdu metadata
  46. * @link_id: link id on which the packet is received
  47. *
  48. * Return: true if it is forwarded else false
  49. */
  50. bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
  51. struct dp_txrx_peer *ta_txrx_peer,
  52. uint8_t *rx_tlv_hdr,
  53. qdf_nbuf_t nbuf,
  54. struct hal_rx_msdu_metadata msdu_metadata,
  55. uint8_t link_id);
  56. #endif
  57. /**
  58. * dp_rx_intrabss_mcast_handler_be() - intrabss mcast handler
  59. * @soc: core txrx main context
  60. * @ta_txrx_peer: source txrx_peer entry
  61. * @nbuf_copy: nbuf that has to be intrabss forwarded
  62. * @tid_stats: tid_stats structure
  63. * @link_id: link id on which the packet is received
  64. *
  65. * Return: true if it is forwarded else false
  66. */
  67. bool
  68. dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
  69. struct dp_txrx_peer *ta_txrx_peer,
  70. qdf_nbuf_t nbuf_copy,
  71. struct cdp_tid_rx_stats *tid_stats,
  72. uint8_t link_id);
  73. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  74. uint32_t *msg_word,
  75. void *rx_filter);
  76. /**
  77. * dp_rx_process_be() - Brain of the Rx processing functionality
  78. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  79. * @int_ctx: per interrupt context
  80. * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
  81. * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
  82. * @quota: No. of units (packets) that can be serviced in one shot.
  83. *
  84. * This function implements the core of Rx functionality. This is
  85. * expected to handle only non-error frames.
  86. *
  87. * Return: uint32_t: No. of elements processed
  88. */
  89. uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
  90. hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
  91. uint32_t quota);
  92. /**
  93. * dp_rx_chain_msdus_be() - Function to chain all msdus of a mpdu
  94. * to pdev invalid peer list
  95. *
  96. * @soc: core DP main context
  97. * @nbuf: Buffer pointer
  98. * @rx_tlv_hdr: start of rx tlv header
  99. * @mac_id: mac id
  100. *
  101. * Return: bool: true for last msdu of mpdu
  102. */
  103. bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
  104. uint8_t *rx_tlv_hdr, uint8_t mac_id);
  105. /**
  106. * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
  107. * @soc: Handle to DP Soc structure
  108. * @rx_desc_pool: Rx descriptor pool handler
  109. * @pool_id: Rx descriptor pool ID
  110. *
  111. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  112. */
  113. QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
  114. struct rx_desc_pool *rx_desc_pool,
  115. uint32_t pool_id);
  116. /**
  117. * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
  118. * @soc: Handle to DP Soc structure
  119. * @rx_desc_pool: Rx descriptor pool handler
  120. * @pool_id: Rx descriptor pool ID
  121. *
  122. * Return: None
  123. */
  124. void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
  125. struct rx_desc_pool *rx_desc_pool,
  126. uint32_t pool_id);
  127. /**
  128. * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
  129. * address from WBM ring Desc
  130. * @soc: Handle to DP Soc structure
  131. * @ring_desc: ring descriptor structure pointer
  132. * @r_rx_desc: pointer to a pointer of Rx Desc
  133. *
  134. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  135. */
  136. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  137. void *ring_desc,
  138. struct dp_rx_desc **r_rx_desc);
  139. /**
  140. * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
  141. * @soc:Handle to DP Soc structure
  142. * @cookie: cookie used to lookup virtual address
  143. *
  144. * Return: Rx descriptor virtual address
  145. */
  146. struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
  147. uint32_t cookie);
  148. #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
  149. defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
  150. /**
  151. * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
  152. * if not, do SW cookie conversion.
  153. * @soc:Handle to DP Soc structure
  154. * @rx_buf_cookie: RX desc cookie ID
  155. * @r_rx_desc: double pointer for RX desc
  156. *
  157. * Return: None
  158. */
  159. static inline void
  160. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  161. uint32_t rx_buf_cookie,
  162. struct dp_rx_desc **r_rx_desc)
  163. {
  164. if (qdf_unlikely(!(*r_rx_desc))) {
  165. *r_rx_desc = (struct dp_rx_desc *)
  166. dp_cc_desc_find(soc,
  167. rx_buf_cookie);
  168. }
  169. }
  170. #else
  171. static inline void
  172. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  173. uint32_t rx_buf_cookie,
  174. struct dp_rx_desc **r_rx_desc)
  175. {
  176. }
  177. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
  178. #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata) (0)
  179. #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
  180. static inline uint16_t
  181. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  182. {
  183. struct htt_rx_peer_metadata_v1 *metadata =
  184. (struct htt_rx_peer_metadata_v1 *)&peer_metadata;
  185. uint16_t peer_id;
  186. peer_id = metadata->peer_id |
  187. (metadata->ml_peer_valid << soc->peer_id_shift);
  188. return peer_id;
  189. }
  190. #else
  191. /* Combine ml_peer_valid and peer_id field */
  192. #define DP_BE_PEER_METADATA_PEER_ID_MASK 0x00003fff
  193. #define DP_BE_PEER_METADATA_PEER_ID_SHIFT 0
  194. static inline uint16_t
  195. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  196. {
  197. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  198. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  199. }
  200. #endif
  201. static inline uint16_t
  202. dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  203. {
  204. struct htt_rx_peer_metadata_v1 *metadata =
  205. (struct htt_rx_peer_metadata_v1 *)&peer_metadata;
  206. return metadata->vdev_id;
  207. }
  208. static inline uint8_t
  209. dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
  210. {
  211. return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
  212. }
  213. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  214. /**
  215. * dp_rx_nf_process() - Near Full state handler for RX rings.
  216. * @int_ctx: interrupt context
  217. * @hal_ring_hdl: Rx ring handle
  218. * @reo_ring_num: RX ring number
  219. * @quota: Quota of work to be done
  220. *
  221. * Return: work done in the handler
  222. */
  223. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  224. hal_ring_handle_t hal_ring_hdl,
  225. uint8_t reo_ring_num,
  226. uint32_t quota);
  227. #else
  228. static inline
  229. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  230. hal_ring_handle_t hal_ring_hdl,
  231. uint8_t reo_ring_num,
  232. uint32_t quota)
  233. {
  234. return 0;
  235. }
  236. #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
  237. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  238. struct dp_soc *
  239. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id);
  240. struct dp_soc *
  241. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id);
  242. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc);
  243. #else
  244. static inline struct dp_soc *
  245. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
  246. {
  247. return soc;
  248. }
  249. static inline uint8_t
  250. dp_soc_get_num_soc_be(struct dp_soc *soc)
  251. {
  252. return 1;
  253. }
  254. #endif
  255. #ifdef WLAN_FEATURE_11BE_MLO
  256. /**
  257. * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
  258. * @soc: Handle to DP Soc structure
  259. * @vdev: DP vdev handle
  260. * @peer: DP peer handle
  261. * @nbuf: nbuf to be enqueued
  262. * @link_id: link id on which the packet is received
  263. *
  264. * Return: true when packet sent to stack, false failure
  265. */
  266. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  267. struct dp_vdev *vdev,
  268. struct dp_txrx_peer *peer,
  269. qdf_nbuf_t nbuf,
  270. uint8_t link_id);
  271. /**
  272. * dp_peer_rx_reorder_queue_setup_be() - Send reo queue setup wmi cmd to FW
  273. * per peer type
  274. * @soc: DP Soc handle
  275. * @peer: dp peer to operate on
  276. * @tid: TID
  277. * @ba_window_size: BlockAck window size
  278. *
  279. * Return: 0 - success, others - failure
  280. */
  281. static inline
  282. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  283. struct dp_peer *peer,
  284. int tid,
  285. uint32_t ba_window_size)
  286. {
  287. uint8_t i;
  288. struct dp_mld_link_peers link_peers_info;
  289. struct dp_peer *link_peer;
  290. struct dp_rx_tid *rx_tid;
  291. struct dp_soc *link_peer_soc;
  292. rx_tid = &peer->rx_tid[tid];
  293. if (!rx_tid->hw_qdesc_paddr)
  294. return QDF_STATUS_E_INVAL;
  295. if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
  296. if (IS_MLO_DP_MLD_PEER(peer)) {
  297. /* get link peers with reference */
  298. dp_get_link_peers_ref_from_mld_peer(soc, peer,
  299. &link_peers_info,
  300. DP_MOD_ID_CDP);
  301. /* send WMI cmd to each link peers */
  302. for (i = 0; i < link_peers_info.num_links; i++) {
  303. link_peer = link_peers_info.link_peers[i];
  304. link_peer_soc = link_peer->vdev->pdev->soc;
  305. if (link_peer_soc->cdp_soc.ol_ops->
  306. peer_rx_reorder_queue_setup) {
  307. if (link_peer_soc->cdp_soc.ol_ops->
  308. peer_rx_reorder_queue_setup(
  309. link_peer_soc->ctrl_psoc,
  310. link_peer->vdev->pdev->pdev_id,
  311. link_peer->vdev->vdev_id,
  312. link_peer->mac_addr.raw,
  313. rx_tid->hw_qdesc_paddr,
  314. tid, tid,
  315. 1, ba_window_size)) {
  316. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  317. link_peer_soc, tid);
  318. return QDF_STATUS_E_FAILURE;
  319. }
  320. }
  321. }
  322. /* release link peers reference */
  323. dp_release_link_peers_ref(&link_peers_info,
  324. DP_MOD_ID_CDP);
  325. } else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
  326. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  327. if (soc->cdp_soc.ol_ops->
  328. peer_rx_reorder_queue_setup(
  329. soc->ctrl_psoc,
  330. peer->vdev->pdev->pdev_id,
  331. peer->vdev->vdev_id,
  332. peer->mac_addr.raw,
  333. rx_tid->hw_qdesc_paddr,
  334. tid, tid,
  335. 1, ba_window_size)) {
  336. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  337. soc, tid);
  338. return QDF_STATUS_E_FAILURE;
  339. }
  340. }
  341. } else {
  342. dp_peer_err("invalid peer type %d", peer->peer_type);
  343. return QDF_STATUS_E_FAILURE;
  344. }
  345. } else {
  346. /* Some BE targets dont require WMI and use shared
  347. * table managed by host for storing Reo queue ref structs
  348. */
  349. if (IS_MLO_DP_LINK_PEER(peer) ||
  350. peer->peer_id == HTT_INVALID_PEER) {
  351. /* Return if this is for MLD link peer and table
  352. * is not used in MLD link peer case as MLD peer's
  353. * qref is written to LUT in peer setup or peer map.
  354. * At this point peer setup for link peer is called
  355. * before peer map, hence peer id is not assigned.
  356. * This could happen if peer_setup is called before
  357. * host receives HTT peer map. In this case return
  358. * success with no op and let peer map handle
  359. * writing the reo_qref to LUT.
  360. */
  361. dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
  362. return QDF_STATUS_SUCCESS;
  363. }
  364. hal_reo_shared_qaddr_write(soc->hal_soc,
  365. peer->peer_id,
  366. tid, peer->rx_tid[tid].hw_qdesc_paddr);
  367. }
  368. return QDF_STATUS_SUCCESS;
  369. }
  370. #else
  371. static inline
  372. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  373. struct dp_peer *peer,
  374. int tid,
  375. uint32_t ba_window_size)
  376. {
  377. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  378. if (!rx_tid->hw_qdesc_paddr)
  379. return QDF_STATUS_E_INVAL;
  380. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  381. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  382. soc->ctrl_psoc,
  383. peer->vdev->pdev->pdev_id,
  384. peer->vdev->vdev_id,
  385. peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
  386. 1, ba_window_size)) {
  387. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  388. soc, tid);
  389. return QDF_STATUS_E_FAILURE;
  390. }
  391. }
  392. return QDF_STATUS_SUCCESS;
  393. }
  394. #endif /* WLAN_FEATURE_11BE_MLO */
  395. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  396. static inline
  397. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  398. {
  399. if (next) {
  400. /* prefetch skb->next and first few bytes of skb->cb */
  401. qdf_prefetch(next);
  402. /* skb->cb spread across 2 cache lines hence below prefetch */
  403. qdf_prefetch(&next->_skb_refdst);
  404. qdf_prefetch(&next->len);
  405. qdf_prefetch(&next->protocol);
  406. qdf_prefetch(next->data);
  407. qdf_prefetch(next->data + 64);
  408. qdf_prefetch(next->data + 128);
  409. }
  410. }
  411. #else
  412. static inline
  413. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  414. {
  415. }
  416. #endif
  417. #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
  418. /**
  419. * dp_rx_va_prefetch() - function to prefetch the SW desc
  420. * @last_prefetched_hw_desc: HW desc
  421. *
  422. * Return: prefetched Rx descriptor virtual address
  423. */
  424. static inline
  425. void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
  426. {
  427. void *prefetch_desc;
  428. prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
  429. qdf_prefetch(prefetch_desc);
  430. return prefetch_desc;
  431. }
  432. /**
  433. * dp_rx_prefetch_hw_sw_nbuf_32_byte_desc() - function to prefetch HW and SW
  434. * descriptors
  435. * @soc: DP soc context
  436. * @hal_soc: Handle to HAL Soc structure
  437. * @num_entries: valid number of HW descriptors
  438. * @hal_ring_hdl: Destination ring pointer
  439. * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
  440. * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
  441. *
  442. * Return: None
  443. */
  444. static inline void
  445. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  446. hal_soc_handle_t hal_soc,
  447. uint32_t num_entries,
  448. hal_ring_handle_t hal_ring_hdl,
  449. hal_ring_desc_t *last_prefetched_hw_desc,
  450. struct dp_rx_desc **last_prefetched_sw_desc)
  451. {
  452. if (*last_prefetched_sw_desc) {
  453. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
  454. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
  455. }
  456. if (num_entries) {
  457. *last_prefetched_sw_desc =
  458. dp_rx_va_prefetch(*last_prefetched_hw_desc);
  459. if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
  460. *last_prefetched_hw_desc =
  461. hal_srng_dst_prefetch_next_cached_desc(hal_soc,
  462. hal_ring_hdl,
  463. (uint8_t *)*last_prefetched_hw_desc);
  464. else
  465. *last_prefetched_hw_desc =
  466. hal_srng_dst_get_next_32_byte_desc(hal_soc,
  467. hal_ring_hdl,
  468. (uint8_t *)*last_prefetched_hw_desc);
  469. }
  470. }
  471. #else
  472. static inline void
  473. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  474. hal_soc_handle_t hal_soc,
  475. uint32_t num_entries,
  476. hal_ring_handle_t hal_ring_hdl,
  477. hal_ring_desc_t *last_prefetched_hw_desc,
  478. struct dp_rx_desc **last_prefetched_sw_desc)
  479. {
  480. }
  481. #endif
  482. #ifdef CONFIG_WORD_BASED_TLV
  483. /**
  484. * dp_rx_get_reo_qdesc_addr_be(): API to get qdesc address of reo
  485. * entrance ring desc
  486. *
  487. * @hal_soc: Handle to HAL Soc structure
  488. * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
  489. * @buf: pointer to the start of RX PKT TLV headers
  490. * @txrx_peer: pointer to txrx_peer
  491. * @tid: tid value
  492. *
  493. * Return: qdesc address in reo destination ring buffer
  494. */
  495. static inline
  496. uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
  497. uint8_t *dst_ring_desc,
  498. uint8_t *buf,
  499. struct dp_txrx_peer *txrx_peer,
  500. unsigned int tid)
  501. {
  502. struct dp_peer *peer = NULL;
  503. uint64_t qdesc_addr = 0;
  504. if (hal_reo_shared_qaddr_is_enable(hal_soc)) {
  505. qdesc_addr = (uint64_t)txrx_peer->peer_id;
  506. } else {
  507. peer = dp_peer_get_ref_by_id(txrx_peer->vdev->pdev->soc,
  508. txrx_peer->peer_id,
  509. DP_MOD_ID_CONFIG);
  510. if (!peer)
  511. return 0;
  512. qdesc_addr = (uint64_t)peer->rx_tid[tid].hw_qdesc_paddr;
  513. dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
  514. }
  515. return qdesc_addr;
  516. }
  517. #else
  518. static inline
  519. uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
  520. uint8_t *dst_ring_desc,
  521. uint8_t *buf,
  522. struct dp_txrx_peer *txrx_peer,
  523. unsigned int tid)
  524. {
  525. return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
  526. }
  527. #endif
  528. /**
  529. * dp_rx_wbm_err_reap_desc_be() - Function to reap and replenish
  530. * WBM RX Error descriptors
  531. *
  532. * @int_ctx: pointer to DP interrupt context
  533. * @soc: core DP main context
  534. * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
  535. * @quota: No. of units (packets) that can be serviced in one shot.
  536. * @rx_bufs_used: No. of descriptors reaped
  537. *
  538. * This function implements the core Rx functionality like reap and
  539. * replenish the RX error ring Descriptors, and create a nbuf list
  540. * out of it. It also reads wbm error information from descriptors
  541. * and update the nbuf tlv area.
  542. *
  543. * Return: qdf_nbuf_t: head pointer to the nbuf list created
  544. */
  545. qdf_nbuf_t
  546. dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
  547. hal_ring_handle_t hal_ring_hdl, uint32_t quota,
  548. uint32_t *rx_bufs_used);
  549. /**
  550. * dp_rx_null_q_desc_handle_be() - Function to handle NULL Queue
  551. * descriptor violation on either a
  552. * REO or WBM ring
  553. *
  554. * @soc: core DP main context
  555. * @nbuf: buffer pointer
  556. * @rx_tlv_hdr: start of rx tlv header
  557. * @pool_id: mac id
  558. * @txrx_peer: txrx peer handle
  559. * @is_reo_exception: flag to check if the error is from REO or WBM
  560. * @link_id: link Id on which the packet is received
  561. *
  562. * This function handles NULL queue descriptor violations arising out
  563. * a missing REO queue for a given peer or a given TID. This typically
  564. * may happen if a packet is received on a QOS enabled TID before the
  565. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  566. * it may also happen for MC/BC frames if they are not routed to the
  567. * non-QOS TID queue, in the absence of any other default TID queue.
  568. * This error can show up both in a REO destination or WBM release ring.
  569. *
  570. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  571. * if nbuf could not be handled or dropped.
  572. */
  573. QDF_STATUS
  574. dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
  575. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  576. struct dp_txrx_peer *txrx_peer,
  577. bool is_reo_exception, uint8_t link_id);
  578. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  579. static inline void
  580. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  581. {
  582. uint8_t lmac_id;
  583. lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
  584. qdf_nbuf_set_lmac_id(nbuf, lmac_id);
  585. }
  586. #else
  587. static inline void
  588. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  589. {
  590. }
  591. #endif
  592. #ifndef CONFIG_NBUF_AP_PLATFORM
  593. static inline uint16_t
  594. dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
  595. {
  596. return QDF_NBUF_CB_RX_PEER_ID(nbuf);
  597. }
  598. static inline void
  599. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
  600. uint32_t mpdu_desc_info,
  601. uint32_t peer_mdata,
  602. uint32_t msdu_desc_info)
  603. {
  604. }
  605. static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
  606. hal_ring_desc_t ring_desc,
  607. qdf_nbuf_t nbuf,
  608. uint8_t reo_ring_num)
  609. {
  610. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  611. struct hal_rx_msdu_desc_info msdu_desc_info;
  612. uint8_t pkt_capture_offload = 0;
  613. uint32_t peer_mdata = 0;
  614. qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
  615. qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
  616. /* Get MPDU DESC info */
  617. hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
  618. /* Get MSDU DESC info */
  619. hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
  620. /* Set the end bit to identify the last buffer in MPDU */
  621. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  622. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  623. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
  624. qdf_nbuf_set_rx_retry_flag(nbuf, 1);
  625. if (qdf_unlikely(mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RAW_AMPDU))
  626. qdf_nbuf_set_raw_frame(nbuf, 1);
  627. peer_mdata = mpdu_desc_info.peer_meta_data;
  628. QDF_NBUF_CB_RX_PEER_ID(nbuf) =
  629. dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
  630. QDF_NBUF_CB_RX_VDEV_ID(nbuf) =
  631. dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
  632. dp_rx_set_msdu_lmac_id(nbuf, peer_mdata);
  633. /* to indicate whether this msdu is rx offload */
  634. pkt_capture_offload =
  635. DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
  636. /*
  637. * save msdu flags first, last and continuation msdu in
  638. * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
  639. * length to nbuf->cb. This ensures the info required for
  640. * per pkt processing is always in the same cache line.
  641. * This helps in improving throughput for smaller pkt
  642. * sizes.
  643. */
  644. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  645. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  646. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  647. qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
  648. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
  649. qdf_nbuf_set_da_mcbc(nbuf, 1);
  650. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
  651. qdf_nbuf_set_da_valid(nbuf, 1);
  652. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
  653. qdf_nbuf_set_sa_valid(nbuf, 1);
  654. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
  655. qdf_nbuf_set_intra_bss(nbuf, 1);
  656. if (qdf_likely(mpdu_desc_info.mpdu_flags &
  657. HAL_MPDU_F_QOS_CONTROL_VALID))
  658. qdf_nbuf_set_tid_val(nbuf, mpdu_desc_info.tid);
  659. /* set sw exception */
  660. qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
  661. nbuf,
  662. hal_rx_sw_exception_get_be(ring_desc));
  663. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_desc_info.msdu_len;
  664. QDF_NBUF_CB_RX_CTX_ID(nbuf) = reo_ring_num;
  665. return pkt_capture_offload;
  666. }
  667. static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
  668. uint8_t *rx_tlv_hdr)
  669. {
  670. return HAL_RX_TLV_L3_HEADER_PADDING_GET(rx_tlv_hdr);
  671. }
  672. #else
  673. static inline uint16_t
  674. dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
  675. {
  676. uint32_t peer_metadata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
  677. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  678. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  679. }
  680. static inline void
  681. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
  682. uint32_t mpdu_desc_info,
  683. uint32_t peer_mdata,
  684. uint32_t msdu_desc_info)
  685. {
  686. QDF_NBUF_CB_RX_MPDU_DESC_INFO_1(nbuf) = mpdu_desc_info;
  687. QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf) = peer_mdata;
  688. QDF_NBUF_CB_RX_MSDU_DESC_INFO(nbuf) = msdu_desc_info;
  689. }
  690. static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
  691. hal_ring_desc_t ring_desc,
  692. qdf_nbuf_t nbuf,
  693. uint8_t reo_ring_num)
  694. {
  695. uint32_t mpdu_desc_info = 0;
  696. uint32_t msdu_desc_info = 0;
  697. uint32_t peer_mdata = 0;
  698. /* get REO mpdu & msdu desc info */
  699. hal_rx_get_mpdu_msdu_desc_info_be(ring_desc,
  700. &mpdu_desc_info,
  701. &peer_mdata,
  702. &msdu_desc_info);
  703. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
  704. mpdu_desc_info,
  705. peer_mdata,
  706. msdu_desc_info);
  707. return 0;
  708. }
  709. static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
  710. uint8_t *rx_tlv_hdr)
  711. {
  712. return QDF_NBUF_CB_RX_L3_PAD_MSB(nbuf) ? 2 : 0;
  713. }
  714. #endif
  715. #endif