dp_be_rx.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_BE_RX_H_
  20. #define _DP_BE_RX_H_
  21. #include <dp_types.h>
  22. #include "dp_be.h"
  23. #include "dp_peer.h"
  24. #include <dp_rx.h>
  25. #include "hal_be_rx.h"
  26. #include "hal_be_rx_tlv.h"
  27. /*
  28. * dp_be_intrabss_params
  29. *
  30. * @dest_soc: dest soc to forward the packet to
  31. * @tx_vdev_id: vdev id retrieved from dest peer
  32. */
  33. struct dp_be_intrabss_params {
  34. struct dp_soc *dest_soc;
  35. uint8_t tx_vdev_id;
  36. };
  37. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  38. /**
  39. * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  40. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  41. * @soc: core txrx main context
  42. * @ta_txrx_peer: source peer entry
  43. * @rx_tlv_hdr: start address of rx tlvs
  44. * @nbuf: nbuf that has to be intrabss forwarded
  45. * @link_id: link id on which the packet is received
  46. *
  47. * Return: true if it is forwarded else false
  48. */
  49. bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
  50. struct dp_txrx_peer *ta_txrx_peer,
  51. uint8_t *rx_tlv_hdr,
  52. qdf_nbuf_t nbuf,
  53. uint8_t link_id);
  54. #endif
  55. /**
  56. * dp_rx_intrabss_mcast_handler_be() - intrabss mcast handler
  57. * @soc: core txrx main context
  58. * @ta_txrx_peer: source txrx_peer entry
  59. * @nbuf_copy: nbuf that has to be intrabss forwarded
  60. * @tid_stats: tid_stats structure
  61. * @link_id: link id on which the packet is received
  62. *
  63. * Return: true if it is forwarded else false
  64. */
  65. bool
  66. dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
  67. struct dp_txrx_peer *ta_txrx_peer,
  68. qdf_nbuf_t nbuf_copy,
  69. struct cdp_tid_rx_stats *tid_stats,
  70. uint8_t link_id);
  71. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  72. uint32_t *msg_word,
  73. void *rx_filter);
  74. /**
  75. * dp_rx_process_be() - Brain of the Rx processing functionality
  76. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  77. * @int_ctx: per interrupt context
  78. * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
  79. * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
  80. * @quota: No. of units (packets) that can be serviced in one shot.
  81. *
  82. * This function implements the core of Rx functionality. This is
  83. * expected to handle only non-error frames.
  84. *
  85. * Return: uint32_t: No. of elements processed
  86. */
  87. uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
  88. hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
  89. uint32_t quota);
  90. /**
  91. * dp_rx_chain_msdus_be() - Function to chain all msdus of a mpdu
  92. * to pdev invalid peer list
  93. *
  94. * @soc: core DP main context
  95. * @nbuf: Buffer pointer
  96. * @rx_tlv_hdr: start of rx tlv header
  97. * @mac_id: mac id
  98. *
  99. * Return: bool: true for last msdu of mpdu
  100. */
  101. bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
  102. uint8_t *rx_tlv_hdr, uint8_t mac_id);
  103. /**
  104. * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
  105. * @soc: Handle to DP Soc structure
  106. * @rx_desc_pool: Rx descriptor pool handler
  107. * @pool_id: Rx descriptor pool ID
  108. *
  109. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  110. */
  111. QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
  112. struct rx_desc_pool *rx_desc_pool,
  113. uint32_t pool_id);
  114. /**
  115. * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
  116. * @soc: Handle to DP Soc structure
  117. * @rx_desc_pool: Rx descriptor pool handler
  118. * @pool_id: Rx descriptor pool ID
  119. *
  120. * Return: None
  121. */
  122. void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
  123. struct rx_desc_pool *rx_desc_pool,
  124. uint32_t pool_id);
  125. /**
  126. * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
  127. * address from WBM ring Desc
  128. * @soc: Handle to DP Soc structure
  129. * @ring_desc: ring descriptor structure pointer
  130. * @r_rx_desc: pointer to a pointer of Rx Desc
  131. *
  132. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  133. */
  134. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  135. void *ring_desc,
  136. struct dp_rx_desc **r_rx_desc);
  137. /**
  138. * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
  139. * @soc:Handle to DP Soc structure
  140. * @cookie: cookie used to lookup virtual address
  141. *
  142. * Return: Rx descriptor virtual address
  143. */
  144. struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
  145. uint32_t cookie);
  146. #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
  147. defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
  148. /**
  149. * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
  150. * if not, do SW cookie conversion.
  151. * @soc:Handle to DP Soc structure
  152. * @rx_buf_cookie: RX desc cookie ID
  153. * @r_rx_desc: double pointer for RX desc
  154. *
  155. * Return: None
  156. */
  157. static inline void
  158. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  159. uint32_t rx_buf_cookie,
  160. struct dp_rx_desc **r_rx_desc)
  161. {
  162. if (qdf_unlikely(!(*r_rx_desc))) {
  163. *r_rx_desc = (struct dp_rx_desc *)
  164. dp_cc_desc_find(soc,
  165. rx_buf_cookie);
  166. }
  167. }
  168. #else
  169. static inline void
  170. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  171. uint32_t rx_buf_cookie,
  172. struct dp_rx_desc **r_rx_desc)
  173. {
  174. }
  175. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
  176. struct dp_rx_desc *dp_rx_desc_ppeds_cookie_2_va(struct dp_soc *soc,
  177. unsigned long cookie);
  178. #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata) (0)
  179. #define HTT_RX_PEER_META_DATA_FIELD_GET(_var, _field_s, _field_m) \
  180. (((_var) & (_field_m)) >> (_field_s))
  181. #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
  182. static inline uint16_t
  183. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  184. {
  185. uint8_t ml_peer_valid;
  186. uint16_t peer_id;
  187. peer_id = HTT_RX_PEER_META_DATA_FIELD_GET(peer_metadata,
  188. soc->htt_peer_id_s,
  189. soc->htt_peer_id_m);
  190. ml_peer_valid = HTT_RX_PEER_META_DATA_FIELD_GET(
  191. peer_metadata,
  192. soc->htt_mld_peer_valid_s,
  193. soc->htt_mld_peer_valid_m);
  194. return (peer_id | (ml_peer_valid << soc->peer_id_shift));
  195. }
  196. #else
  197. /* Combine ml_peer_valid and peer_id field */
  198. #define DP_BE_PEER_METADATA_PEER_ID_MASK 0x00003fff
  199. #define DP_BE_PEER_METADATA_PEER_ID_SHIFT 0
  200. static inline uint16_t
  201. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  202. {
  203. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  204. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  205. }
  206. #endif
  207. static inline uint16_t
  208. dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  209. {
  210. return HTT_RX_PEER_META_DATA_FIELD_GET(peer_metadata,
  211. soc->htt_vdev_id_s,
  212. soc->htt_vdev_id_m);
  213. }
  214. static inline uint8_t
  215. dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
  216. {
  217. return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
  218. }
  219. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  220. /**
  221. * dp_rx_nf_process() - Near Full state handler for RX rings.
  222. * @int_ctx: interrupt context
  223. * @hal_ring_hdl: Rx ring handle
  224. * @reo_ring_num: RX ring number
  225. * @quota: Quota of work to be done
  226. *
  227. * Return: work done in the handler
  228. */
  229. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  230. hal_ring_handle_t hal_ring_hdl,
  231. uint8_t reo_ring_num,
  232. uint32_t quota);
  233. #else
  234. static inline
  235. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  236. hal_ring_handle_t hal_ring_hdl,
  237. uint8_t reo_ring_num,
  238. uint32_t quota)
  239. {
  240. return 0;
  241. }
  242. #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
  243. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  244. struct dp_soc *
  245. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id);
  246. struct dp_soc *
  247. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id);
  248. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc);
  249. #else
  250. static inline struct dp_soc *
  251. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
  252. {
  253. return soc;
  254. }
  255. static inline uint8_t
  256. dp_soc_get_num_soc_be(struct dp_soc *soc)
  257. {
  258. return 1;
  259. }
  260. #endif
  261. #ifdef WLAN_FEATURE_11BE_MLO
  262. /**
  263. * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
  264. * @soc: Handle to DP Soc structure
  265. * @vdev: DP vdev handle
  266. * @peer: DP peer handle
  267. * @nbuf: nbuf to be enqueued
  268. * @link_id: link id on which the packet is received
  269. *
  270. * Return: true when packet sent to stack, false failure
  271. */
  272. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  273. struct dp_vdev *vdev,
  274. struct dp_txrx_peer *peer,
  275. qdf_nbuf_t nbuf,
  276. uint8_t link_id);
  277. /**
  278. * dp_peer_rx_reorder_queue_setup_be() - Send reo queue setup wmi cmd to FW
  279. * per peer type
  280. * @soc: DP Soc handle
  281. * @peer: dp peer to operate on
  282. * @tid: TID
  283. * @ba_window_size: BlockAck window size
  284. *
  285. * Return: 0 - success, others - failure
  286. */
  287. static inline
  288. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  289. struct dp_peer *peer,
  290. int tid,
  291. uint32_t ba_window_size)
  292. {
  293. uint8_t i;
  294. struct dp_mld_link_peers link_peers_info;
  295. struct dp_peer *link_peer;
  296. struct dp_rx_tid *rx_tid;
  297. struct dp_soc *link_peer_soc;
  298. rx_tid = &peer->rx_tid[tid];
  299. if (!rx_tid->hw_qdesc_paddr)
  300. return QDF_STATUS_E_INVAL;
  301. if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
  302. if (IS_MLO_DP_MLD_PEER(peer)) {
  303. /* get link peers with reference */
  304. dp_get_link_peers_ref_from_mld_peer(soc, peer,
  305. &link_peers_info,
  306. DP_MOD_ID_CDP);
  307. /* send WMI cmd to each link peers */
  308. for (i = 0; i < link_peers_info.num_links; i++) {
  309. link_peer = link_peers_info.link_peers[i];
  310. link_peer_soc = link_peer->vdev->pdev->soc;
  311. if (link_peer_soc->cdp_soc.ol_ops->
  312. peer_rx_reorder_queue_setup) {
  313. if (link_peer_soc->cdp_soc.ol_ops->
  314. peer_rx_reorder_queue_setup(
  315. link_peer_soc->ctrl_psoc,
  316. link_peer->vdev->pdev->pdev_id,
  317. link_peer->vdev->vdev_id,
  318. link_peer->mac_addr.raw,
  319. rx_tid->hw_qdesc_paddr,
  320. tid, tid,
  321. 1, ba_window_size)) {
  322. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  323. link_peer_soc, tid);
  324. return QDF_STATUS_E_FAILURE;
  325. }
  326. }
  327. }
  328. /* release link peers reference */
  329. dp_release_link_peers_ref(&link_peers_info,
  330. DP_MOD_ID_CDP);
  331. } else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
  332. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  333. if (soc->cdp_soc.ol_ops->
  334. peer_rx_reorder_queue_setup(
  335. soc->ctrl_psoc,
  336. peer->vdev->pdev->pdev_id,
  337. peer->vdev->vdev_id,
  338. peer->mac_addr.raw,
  339. rx_tid->hw_qdesc_paddr,
  340. tid, tid,
  341. 1, ba_window_size)) {
  342. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  343. soc, tid);
  344. return QDF_STATUS_E_FAILURE;
  345. }
  346. }
  347. } else {
  348. dp_peer_err("invalid peer type %d", peer->peer_type);
  349. return QDF_STATUS_E_FAILURE;
  350. }
  351. } else {
  352. /* Some BE targets dont require WMI and use shared
  353. * table managed by host for storing Reo queue ref structs
  354. */
  355. if (IS_MLO_DP_LINK_PEER(peer) ||
  356. peer->peer_id == HTT_INVALID_PEER) {
  357. /* Return if this is for MLD link peer and table
  358. * is not used in MLD link peer case as MLD peer's
  359. * qref is written to LUT in peer setup or peer map.
  360. * At this point peer setup for link peer is called
  361. * before peer map, hence peer id is not assigned.
  362. * This could happen if peer_setup is called before
  363. * host receives HTT peer map. In this case return
  364. * success with no op and let peer map handle
  365. * writing the reo_qref to LUT.
  366. */
  367. dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
  368. return QDF_STATUS_SUCCESS;
  369. }
  370. hal_reo_shared_qaddr_write(soc->hal_soc,
  371. peer->peer_id,
  372. tid, peer->rx_tid[tid].hw_qdesc_paddr);
  373. }
  374. return QDF_STATUS_SUCCESS;
  375. }
  376. #else
  377. static inline
  378. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  379. struct dp_peer *peer,
  380. int tid,
  381. uint32_t ba_window_size)
  382. {
  383. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  384. if (!rx_tid->hw_qdesc_paddr)
  385. return QDF_STATUS_E_INVAL;
  386. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  387. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  388. soc->ctrl_psoc,
  389. peer->vdev->pdev->pdev_id,
  390. peer->vdev->vdev_id,
  391. peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
  392. 1, ba_window_size)) {
  393. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  394. soc, tid);
  395. return QDF_STATUS_E_FAILURE;
  396. }
  397. }
  398. return QDF_STATUS_SUCCESS;
  399. }
  400. #endif /* WLAN_FEATURE_11BE_MLO */
  401. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  402. static inline
  403. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  404. {
  405. if (next) {
  406. /* prefetch skb->next and first few bytes of skb->cb */
  407. qdf_prefetch(next);
  408. /* skb->cb spread across 2 cache lines hence below prefetch */
  409. qdf_prefetch(&next->_skb_refdst);
  410. qdf_prefetch(&next->protocol);
  411. qdf_prefetch(&next->data);
  412. qdf_prefetch(next->data);
  413. qdf_prefetch(next->data + 64);
  414. }
  415. }
  416. #else
  417. static inline
  418. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  419. {
  420. }
  421. #endif
  422. #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
  423. /**
  424. * dp_rx_va_prefetch() - function to prefetch the SW desc
  425. * @last_prefetched_hw_desc: HW desc
  426. *
  427. * Return: prefetched Rx descriptor virtual address
  428. */
  429. static inline
  430. void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
  431. {
  432. void *prefetch_desc;
  433. prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
  434. qdf_prefetch(prefetch_desc);
  435. return prefetch_desc;
  436. }
  437. /**
  438. * dp_rx_prefetch_hw_sw_nbuf_32_byte_desc() - function to prefetch HW and SW
  439. * descriptors
  440. * @soc: DP soc context
  441. * @hal_soc: Handle to HAL Soc structure
  442. * @num_entries: valid number of HW descriptors
  443. * @hal_ring_hdl: Destination ring pointer
  444. * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
  445. * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
  446. *
  447. * Return: None
  448. */
  449. static inline void
  450. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  451. hal_soc_handle_t hal_soc,
  452. uint32_t num_entries,
  453. hal_ring_handle_t hal_ring_hdl,
  454. hal_ring_desc_t *last_prefetched_hw_desc,
  455. struct dp_rx_desc **last_prefetched_sw_desc)
  456. {
  457. if (*last_prefetched_sw_desc) {
  458. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
  459. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
  460. }
  461. if (num_entries) {
  462. *last_prefetched_sw_desc =
  463. dp_rx_va_prefetch(*last_prefetched_hw_desc);
  464. if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
  465. *last_prefetched_hw_desc =
  466. hal_srng_dst_prefetch_next_cached_desc(hal_soc,
  467. hal_ring_hdl,
  468. (uint8_t *)*last_prefetched_hw_desc);
  469. else
  470. *last_prefetched_hw_desc =
  471. hal_srng_dst_get_next_32_byte_desc(hal_soc,
  472. hal_ring_hdl,
  473. (uint8_t *)*last_prefetched_hw_desc);
  474. }
  475. }
  476. #else
  477. static inline void
  478. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  479. hal_soc_handle_t hal_soc,
  480. uint32_t num_entries,
  481. hal_ring_handle_t hal_ring_hdl,
  482. hal_ring_desc_t *last_prefetched_hw_desc,
  483. struct dp_rx_desc **last_prefetched_sw_desc)
  484. {
  485. }
  486. #endif
  487. #ifdef CONFIG_WORD_BASED_TLV
  488. /**
  489. * dp_rx_get_reo_qdesc_addr_be(): API to get qdesc address of reo
  490. * entrance ring desc
  491. *
  492. * @hal_soc: Handle to HAL Soc structure
  493. * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
  494. * @buf: pointer to the start of RX PKT TLV headers
  495. * @txrx_peer: pointer to txrx_peer
  496. * @tid: tid value
  497. *
  498. * Return: qdesc address in reo destination ring buffer
  499. */
  500. static inline
  501. uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
  502. uint8_t *dst_ring_desc,
  503. uint8_t *buf,
  504. struct dp_txrx_peer *txrx_peer,
  505. unsigned int tid)
  506. {
  507. struct dp_peer *peer = NULL;
  508. uint64_t qdesc_addr = 0;
  509. if (hal_reo_shared_qaddr_is_enable(hal_soc)) {
  510. qdesc_addr = (uint64_t)txrx_peer->peer_id;
  511. } else {
  512. peer = dp_peer_get_ref_by_id(txrx_peer->vdev->pdev->soc,
  513. txrx_peer->peer_id,
  514. DP_MOD_ID_CONFIG);
  515. if (!peer)
  516. return 0;
  517. qdesc_addr = (uint64_t)peer->rx_tid[tid].hw_qdesc_paddr;
  518. dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
  519. }
  520. return qdesc_addr;
  521. }
  522. #else
  523. static inline
  524. uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
  525. uint8_t *dst_ring_desc,
  526. uint8_t *buf,
  527. struct dp_txrx_peer *txrx_peer,
  528. unsigned int tid)
  529. {
  530. return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
  531. }
  532. #endif
  533. /**
  534. * dp_rx_wbm_err_reap_desc_be() - Function to reap and replenish
  535. * WBM RX Error descriptors
  536. *
  537. * @int_ctx: pointer to DP interrupt context
  538. * @soc: core DP main context
  539. * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
  540. * @quota: No. of units (packets) that can be serviced in one shot.
  541. * @rx_bufs_used: No. of descriptors reaped
  542. *
  543. * This function implements the core Rx functionality like reap and
  544. * replenish the RX error ring Descriptors, and create a nbuf list
  545. * out of it. It also reads wbm error information from descriptors
  546. * and update the nbuf tlv area.
  547. *
  548. * Return: qdf_nbuf_t: head pointer to the nbuf list created
  549. */
  550. qdf_nbuf_t
  551. dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
  552. hal_ring_handle_t hal_ring_hdl, uint32_t quota,
  553. uint32_t *rx_bufs_used);
  554. /**
  555. * dp_rx_null_q_desc_handle_be() - Function to handle NULL Queue
  556. * descriptor violation on either a
  557. * REO or WBM ring
  558. *
  559. * @soc: core DP main context
  560. * @nbuf: buffer pointer
  561. * @rx_tlv_hdr: start of rx tlv header
  562. * @pool_id: mac id
  563. * @txrx_peer: txrx peer handle
  564. * @is_reo_exception: flag to check if the error is from REO or WBM
  565. * @link_id: link Id on which the packet is received
  566. *
  567. * This function handles NULL queue descriptor violations arising out
  568. * a missing REO queue for a given peer or a given TID. This typically
  569. * may happen if a packet is received on a QOS enabled TID before the
  570. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  571. * it may also happen for MC/BC frames if they are not routed to the
  572. * non-QOS TID queue, in the absence of any other default TID queue.
  573. * This error can show up both in a REO destination or WBM release ring.
  574. *
  575. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  576. * if nbuf could not be handled or dropped.
  577. */
  578. QDF_STATUS
  579. dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
  580. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  581. struct dp_txrx_peer *txrx_peer,
  582. bool is_reo_exception, uint8_t link_id);
  583. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  584. static inline void
  585. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  586. {
  587. uint8_t lmac_id;
  588. lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
  589. qdf_nbuf_set_lmac_id(nbuf, lmac_id);
  590. }
  591. #else
  592. static inline void
  593. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  594. {
  595. }
  596. #endif
  597. #ifndef CONFIG_NBUF_AP_PLATFORM
  598. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  599. static inline uint8_t
  600. dp_rx_peer_mdata_link_id_get_be(uint32_t peer_mdata)
  601. {
  602. uint8_t link_id;
  603. link_id = HTT_RX_PEER_META_DATA_V1A_LOGICAL_LINK_ID_GET(peer_mdata) + 1;
  604. if (link_id > DP_MAX_MLO_LINKS)
  605. link_id = 0;
  606. return link_id;
  607. }
  608. #else
  609. static inline uint8_t
  610. dp_rx_peer_mdata_link_id_get_be(uint32_t peer_metadata)
  611. {
  612. return 0;
  613. }
  614. #endif /* DP_MLO_LINK_STATS_SUPPORT */
  615. static inline void
  616. dp_rx_set_mpdu_seq_number_be(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  617. {
  618. QDF_NBUF_CB_RX_MPDU_SEQ_NUM(nbuf) =
  619. hal_rx_mpdu_sequence_number_get_be(rx_tlv_hdr);
  620. }
  621. static inline void
  622. dp_rx_set_link_id_be(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  623. {
  624. uint8_t logical_link_id;
  625. logical_link_id = dp_rx_peer_mdata_link_id_get_be(peer_mdata);
  626. QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf) = logical_link_id;
  627. }
  628. static inline uint16_t
  629. dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
  630. {
  631. return QDF_NBUF_CB_RX_PEER_ID(nbuf);
  632. }
  633. static inline void
  634. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
  635. uint32_t mpdu_desc_info,
  636. uint32_t peer_mdata,
  637. uint32_t msdu_desc_info)
  638. {
  639. }
  640. static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
  641. hal_ring_desc_t ring_desc,
  642. qdf_nbuf_t nbuf,
  643. uint8_t reo_ring_num)
  644. {
  645. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  646. struct hal_rx_msdu_desc_info msdu_desc_info;
  647. uint8_t pkt_capture_offload = 0;
  648. uint32_t peer_mdata = 0;
  649. qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
  650. qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
  651. /* Get MPDU DESC info */
  652. hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
  653. /* Get MSDU DESC info */
  654. hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
  655. /* Set the end bit to identify the last buffer in MPDU */
  656. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  657. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  658. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
  659. qdf_nbuf_set_rx_retry_flag(nbuf, 1);
  660. if (qdf_unlikely(mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RAW_AMPDU))
  661. qdf_nbuf_set_raw_frame(nbuf, 1);
  662. peer_mdata = mpdu_desc_info.peer_meta_data;
  663. QDF_NBUF_CB_RX_PEER_ID(nbuf) =
  664. dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
  665. QDF_NBUF_CB_RX_VDEV_ID(nbuf) =
  666. dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
  667. dp_rx_set_msdu_lmac_id(nbuf, peer_mdata);
  668. dp_rx_set_link_id_be(nbuf, peer_mdata);
  669. /* to indicate whether this msdu is rx offload */
  670. pkt_capture_offload =
  671. DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
  672. /*
  673. * save msdu flags first, last and continuation msdu in
  674. * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
  675. * length to nbuf->cb. This ensures the info required for
  676. * per pkt processing is always in the same cache line.
  677. * This helps in improving throughput for smaller pkt
  678. * sizes.
  679. */
  680. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  681. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  682. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  683. qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
  684. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
  685. qdf_nbuf_set_da_mcbc(nbuf, 1);
  686. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
  687. qdf_nbuf_set_da_valid(nbuf, 1);
  688. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
  689. qdf_nbuf_set_sa_valid(nbuf, 1);
  690. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
  691. qdf_nbuf_set_intra_bss(nbuf, 1);
  692. if (qdf_likely(mpdu_desc_info.mpdu_flags &
  693. HAL_MPDU_F_QOS_CONTROL_VALID))
  694. qdf_nbuf_set_tid_val(nbuf, mpdu_desc_info.tid);
  695. /* set sw exception */
  696. qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
  697. nbuf,
  698. hal_rx_sw_exception_get_be(ring_desc));
  699. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_desc_info.msdu_len;
  700. QDF_NBUF_CB_RX_CTX_ID(nbuf) = reo_ring_num;
  701. return pkt_capture_offload;
  702. }
  703. static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
  704. uint8_t *rx_tlv_hdr)
  705. {
  706. return HAL_RX_TLV_L3_HEADER_PADDING_GET(rx_tlv_hdr);
  707. }
  708. static inline uint8_t
  709. dp_rx_wbm_err_msdu_continuation_get(struct dp_soc *soc,
  710. hal_ring_desc_t ring_desc,
  711. qdf_nbuf_t nbuf)
  712. {
  713. return hal_rx_wbm_err_msdu_continuation_get(soc->hal_soc,
  714. ring_desc);
  715. }
  716. #else
  717. static inline void
  718. dp_rx_set_link_id_be(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  719. {
  720. }
  721. static inline void
  722. dp_rx_set_mpdu_seq_number_be(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  723. {
  724. }
  725. static inline uint16_t
  726. dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
  727. {
  728. uint32_t peer_metadata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
  729. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  730. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  731. }
  732. static inline void
  733. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
  734. uint32_t mpdu_desc_info,
  735. uint32_t peer_mdata,
  736. uint32_t msdu_desc_info)
  737. {
  738. QDF_NBUF_CB_RX_MPDU_DESC_INFO_1(nbuf) = mpdu_desc_info;
  739. QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf) = peer_mdata;
  740. QDF_NBUF_CB_RX_MSDU_DESC_INFO(nbuf) = msdu_desc_info;
  741. }
  742. static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
  743. hal_ring_desc_t ring_desc,
  744. qdf_nbuf_t nbuf,
  745. uint8_t reo_ring_num)
  746. {
  747. uint32_t mpdu_desc_info = 0;
  748. uint32_t msdu_desc_info = 0;
  749. uint32_t peer_mdata = 0;
  750. /* get REO mpdu & msdu desc info */
  751. hal_rx_get_mpdu_msdu_desc_info_be(ring_desc,
  752. &mpdu_desc_info,
  753. &peer_mdata,
  754. &msdu_desc_info);
  755. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
  756. mpdu_desc_info,
  757. peer_mdata,
  758. msdu_desc_info);
  759. return 0;
  760. }
  761. static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
  762. uint8_t *rx_tlv_hdr)
  763. {
  764. return QDF_NBUF_CB_RX_L3_PAD_MSB(nbuf) ? 2 : 0;
  765. }
  766. static inline uint8_t
  767. dp_rx_wbm_err_msdu_continuation_get(struct dp_soc *soc,
  768. hal_ring_desc_t ring_desc,
  769. qdf_nbuf_t nbuf)
  770. {
  771. return qdf_nbuf_is_rx_chfrag_cont(nbuf);
  772. }
  773. #endif /* CONFIG_NBUF_AP_PLATFORM */
  774. /**
  775. * dp_rx_wbm_err_copy_desc_info_in_nbuf(): API to copy WBM dest ring
  776. * descriptor information in nbuf CB/TLV
  777. *
  778. * @soc: pointer to Soc structure
  779. * @ring_desc: wbm dest ring descriptor
  780. * @nbuf: nbuf to save descriptor information
  781. * @pool_id: pool id part of wbm error info
  782. *
  783. * Return: wbm error information details
  784. */
  785. static inline uint32_t
  786. dp_rx_wbm_err_copy_desc_info_in_nbuf(struct dp_soc *soc,
  787. hal_ring_desc_t ring_desc,
  788. qdf_nbuf_t nbuf,
  789. uint8_t pool_id)
  790. {
  791. uint32_t mpdu_desc_info = 0;
  792. uint32_t msdu_desc_info = 0;
  793. uint32_t peer_mdata = 0;
  794. union hal_wbm_err_info_u wbm_err = { 0 };
  795. /* get WBM mpdu & msdu desc info */
  796. hal_rx_wbm_err_mpdu_msdu_info_get_be(ring_desc,
  797. &wbm_err.info,
  798. &mpdu_desc_info,
  799. &msdu_desc_info,
  800. &peer_mdata);
  801. wbm_err.info_bit.pool_id = pool_id;
  802. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
  803. mpdu_desc_info,
  804. peer_mdata,
  805. msdu_desc_info);
  806. dp_rx_set_wbm_err_info_in_nbuf(soc, nbuf, wbm_err);
  807. return wbm_err.info;
  808. }
  809. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  810. struct dp_soc *
  811. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id);
  812. #else
  813. static inline struct dp_soc *
  814. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
  815. {
  816. return soc;
  817. }
  818. #endif
  819. #endif