dp_be_rx.h 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_BE_RX_H_
  20. #define _DP_BE_RX_H_
  21. #include <dp_types.h>
  22. #include "dp_be.h"
  23. #include "dp_peer.h"
  24. #include <dp_rx.h>
  25. #include "hal_be_rx.h"
  26. #include "hal_be_rx_tlv.h"
  27. /*
  28. * dp_be_intrabss_params
  29. *
  30. * @dest_soc: dest soc to forward the packet to
  31. * @tx_vdev_id: vdev id retrieved from dest peer
  32. */
  33. struct dp_be_intrabss_params {
  34. struct dp_soc *dest_soc;
  35. uint8_t tx_vdev_id;
  36. };
  37. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  38. /**
  39. * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  40. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  41. * @soc: core txrx main context
  42. * @ta_txrx_peer: source peer entry
  43. * @rx_tlv_hdr: start address of rx tlvs
  44. * @nbuf: nbuf that has to be intrabss forwarded
  45. * @link_id: link id on which the packet is received
  46. *
  47. * Return: true if it is forwarded else false
  48. */
  49. bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
  50. struct dp_txrx_peer *ta_txrx_peer,
  51. uint8_t *rx_tlv_hdr,
  52. qdf_nbuf_t nbuf,
  53. uint8_t link_id);
  54. #endif
  55. /**
  56. * dp_rx_intrabss_mcast_handler_be() - intrabss mcast handler
  57. * @soc: core txrx main context
  58. * @ta_txrx_peer: source txrx_peer entry
  59. * @nbuf_copy: nbuf that has to be intrabss forwarded
  60. * @tid_stats: tid_stats structure
  61. * @link_id: link id on which the packet is received
  62. *
  63. * Return: true if it is forwarded else false
  64. */
  65. bool
  66. dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
  67. struct dp_txrx_peer *ta_txrx_peer,
  68. qdf_nbuf_t nbuf_copy,
  69. struct cdp_tid_rx_stats *tid_stats,
  70. uint8_t link_id);
  71. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  72. uint32_t *msg_word,
  73. void *rx_filter);
  74. /**
  75. * dp_rx_process_be() - Brain of the Rx processing functionality
  76. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  77. * @int_ctx: per interrupt context
  78. * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
  79. * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
  80. * @quota: No. of units (packets) that can be serviced in one shot.
  81. *
  82. * This function implements the core of Rx functionality. This is
  83. * expected to handle only non-error frames.
  84. *
  85. * Return: uint32_t: No. of elements processed
  86. */
  87. uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
  88. hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
  89. uint32_t quota);
  90. /**
  91. * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
  92. * @soc: Handle to DP Soc structure
  93. * @rx_desc_pool: Rx descriptor pool handler
  94. * @pool_id: Rx descriptor pool ID
  95. *
  96. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  97. */
  98. QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
  99. struct rx_desc_pool *rx_desc_pool,
  100. uint32_t pool_id);
  101. /**
  102. * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
  103. * @soc: Handle to DP Soc structure
  104. * @rx_desc_pool: Rx descriptor pool handler
  105. * @pool_id: Rx descriptor pool ID
  106. *
  107. * Return: None
  108. */
  109. void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
  110. struct rx_desc_pool *rx_desc_pool,
  111. uint32_t pool_id);
  112. /**
  113. * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
  114. * address from WBM ring Desc
  115. * @soc: Handle to DP Soc structure
  116. * @ring_desc: ring descriptor structure pointer
  117. * @r_rx_desc: pointer to a pointer of Rx Desc
  118. *
  119. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  120. */
  121. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  122. void *ring_desc,
  123. struct dp_rx_desc **r_rx_desc);
  124. /**
  125. * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
  126. * @soc:Handle to DP Soc structure
  127. * @cookie: cookie used to lookup virtual address
  128. *
  129. * Return: Rx descriptor virtual address
  130. */
  131. struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
  132. uint32_t cookie);
  133. #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
  134. defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
  135. /**
  136. * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
  137. * if not, do SW cookie conversion.
  138. * @soc:Handle to DP Soc structure
  139. * @rx_buf_cookie: RX desc cookie ID
  140. * @r_rx_desc: double pointer for RX desc
  141. *
  142. * Return: None
  143. */
  144. static inline void
  145. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  146. uint32_t rx_buf_cookie,
  147. struct dp_rx_desc **r_rx_desc)
  148. {
  149. if (qdf_unlikely(!(*r_rx_desc))) {
  150. *r_rx_desc = (struct dp_rx_desc *)
  151. dp_cc_desc_find(soc,
  152. rx_buf_cookie);
  153. }
  154. }
  155. #else
  156. static inline void
  157. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  158. uint32_t rx_buf_cookie,
  159. struct dp_rx_desc **r_rx_desc)
  160. {
  161. }
  162. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
  163. struct dp_rx_desc *dp_rx_desc_ppeds_cookie_2_va(struct dp_soc *soc,
  164. unsigned long cookie);
  165. #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata) (0)
  166. #define HTT_RX_PEER_META_DATA_FIELD_GET(_var, _field_s, _field_m) \
  167. (((_var) & (_field_m)) >> (_field_s))
  168. #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
  169. static inline uint16_t
  170. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  171. {
  172. uint8_t ml_peer_valid;
  173. uint16_t peer_id;
  174. peer_id = HTT_RX_PEER_META_DATA_FIELD_GET(peer_metadata,
  175. soc->htt_peer_id_s,
  176. soc->htt_peer_id_m);
  177. ml_peer_valid = HTT_RX_PEER_META_DATA_FIELD_GET(
  178. peer_metadata,
  179. soc->htt_mld_peer_valid_s,
  180. soc->htt_mld_peer_valid_m);
  181. return (peer_id | (ml_peer_valid << soc->peer_id_shift));
  182. }
  183. #else
  184. /* Combine ml_peer_valid and peer_id field */
  185. #define DP_BE_PEER_METADATA_PEER_ID_MASK 0x00003fff
  186. #define DP_BE_PEER_METADATA_PEER_ID_SHIFT 0
  187. static inline uint16_t
  188. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  189. {
  190. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  191. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  192. }
  193. #endif
  194. static inline uint16_t
  195. dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  196. {
  197. return HTT_RX_PEER_META_DATA_FIELD_GET(peer_metadata,
  198. soc->htt_vdev_id_s,
  199. soc->htt_vdev_id_m);
  200. }
  201. static inline uint8_t
  202. dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
  203. {
  204. return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
  205. }
  206. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  207. /**
  208. * dp_rx_nf_process() - Near Full state handler for RX rings.
  209. * @int_ctx: interrupt context
  210. * @hal_ring_hdl: Rx ring handle
  211. * @reo_ring_num: RX ring number
  212. * @quota: Quota of work to be done
  213. *
  214. * Return: work done in the handler
  215. */
  216. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  217. hal_ring_handle_t hal_ring_hdl,
  218. uint8_t reo_ring_num,
  219. uint32_t quota);
  220. #else
  221. static inline
  222. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  223. hal_ring_handle_t hal_ring_hdl,
  224. uint8_t reo_ring_num,
  225. uint32_t quota)
  226. {
  227. return 0;
  228. }
  229. #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
  230. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  231. struct dp_soc *
  232. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id);
  233. struct dp_soc *
  234. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id);
  235. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc);
  236. #else
  237. static inline struct dp_soc *
  238. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
  239. {
  240. return soc;
  241. }
  242. static inline uint8_t
  243. dp_soc_get_num_soc_be(struct dp_soc *soc)
  244. {
  245. return 1;
  246. }
  247. #endif
  248. static inline QDF_STATUS
  249. dp_peer_rx_reorder_q_setup_per_tid(struct dp_peer *peer,
  250. uint32_t tid_bitmap,
  251. uint32_t ba_window_size)
  252. {
  253. int tid;
  254. struct dp_rx_tid *rx_tid;
  255. struct dp_soc *soc = peer->vdev->pdev->soc;
  256. if (!soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  257. dp_peer_debug("%pK: rx_reorder_queue_setup NULL bitmap 0x%x",
  258. soc, tid_bitmap);
  259. return QDF_STATUS_SUCCESS;
  260. }
  261. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  262. if (!(BIT(tid) & tid_bitmap))
  263. continue;
  264. rx_tid = &peer->rx_tid[tid];
  265. if (!rx_tid->hw_qdesc_paddr) {
  266. tid_bitmap &= ~BIT(tid);
  267. continue;
  268. }
  269. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  270. soc->ctrl_psoc,
  271. peer->vdev->pdev->pdev_id,
  272. peer->vdev->vdev_id,
  273. peer->mac_addr.raw,
  274. rx_tid->hw_qdesc_paddr,
  275. tid, tid,
  276. 1, ba_window_size)) {
  277. dp_peer_err("%pK: Fail to send reo q to FW. tid %d",
  278. soc, tid);
  279. return QDF_STATUS_E_FAILURE;
  280. }
  281. }
  282. if (!tid_bitmap) {
  283. dp_peer_err("tid_bitmap=0. All tids setup fail");
  284. return QDF_STATUS_E_FAILURE;
  285. }
  286. return QDF_STATUS_SUCCESS;
  287. }
  288. static inline QDF_STATUS
  289. dp_peer_multi_tid_params_setup(struct dp_peer *peer,
  290. uint32_t tid_bitmap,
  291. uint32_t ba_window_size,
  292. struct multi_rx_reorder_queue_setup_params *tid_params)
  293. {
  294. struct dp_rx_tid *rx_tid;
  295. int tid;
  296. tid_params->peer_macaddr = peer->mac_addr.raw;
  297. tid_params->tid_bitmap = tid_bitmap;
  298. tid_params->vdev_id = peer->vdev->vdev_id;
  299. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  300. if (!(BIT(tid) & tid_bitmap))
  301. continue;
  302. rx_tid = &peer->rx_tid[tid];
  303. if (!rx_tid->hw_qdesc_paddr) {
  304. tid_params->tid_bitmap &= ~BIT(tid);
  305. continue;
  306. }
  307. tid_params->tid_num++;
  308. tid_params->queue_params_list[tid].hw_qdesc_paddr =
  309. rx_tid->hw_qdesc_paddr;
  310. tid_params->queue_params_list[tid].queue_no = tid;
  311. tid_params->queue_params_list[tid].ba_window_size_valid = 1;
  312. tid_params->queue_params_list[tid].ba_window_size =
  313. ba_window_size;
  314. }
  315. if (!tid_params->tid_bitmap) {
  316. dp_peer_err("tid_bitmap=0. All tids setup fail");
  317. return QDF_STATUS_E_FAILURE;
  318. }
  319. return QDF_STATUS_SUCCESS;
  320. }
  321. static inline QDF_STATUS
  322. dp_peer_rx_reorder_multi_q_setup(struct dp_peer *peer,
  323. uint32_t tid_bitmap,
  324. uint32_t ba_window_size)
  325. {
  326. QDF_STATUS status;
  327. struct dp_soc *soc = peer->vdev->pdev->soc;
  328. struct multi_rx_reorder_queue_setup_params tid_params = {0};
  329. if (!soc->cdp_soc.ol_ops->peer_multi_rx_reorder_queue_setup) {
  330. dp_peer_debug("%pK: callback NULL", soc);
  331. return QDF_STATUS_SUCCESS;
  332. }
  333. status = dp_peer_multi_tid_params_setup(peer, tid_bitmap,
  334. ba_window_size,
  335. &tid_params);
  336. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status)))
  337. return status;
  338. if (soc->cdp_soc.ol_ops->peer_multi_rx_reorder_queue_setup(
  339. soc->ctrl_psoc,
  340. peer->vdev->pdev->pdev_id,
  341. &tid_params)) {
  342. dp_peer_err("%pK: multi_reorder_q_setup fail. tid_bitmap 0x%x",
  343. soc, tid_bitmap);
  344. return QDF_STATUS_E_FAILURE;
  345. }
  346. return QDF_STATUS_SUCCESS;
  347. }
  348. #ifdef WLAN_FEATURE_11BE_MLO
  349. /**
  350. * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
  351. * @soc: Handle to DP Soc structure
  352. * @vdev: DP vdev handle
  353. * @peer: DP peer handle
  354. * @nbuf: nbuf to be enqueued
  355. * @link_id: link id on which the packet is received
  356. *
  357. * Return: true when packet sent to stack, false failure
  358. */
  359. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  360. struct dp_vdev *vdev,
  361. struct dp_txrx_peer *peer,
  362. qdf_nbuf_t nbuf,
  363. uint8_t link_id);
  364. /**
  365. * dp_peer_rx_reorder_queue_setup_be() - Send reo queue
  366. * setup wmi cmd to FW per peer type
  367. * @soc: DP Soc handle
  368. * @peer: dp peer to operate on
  369. * @tid_bitmap: TIDs to be set up
  370. * @ba_window_size: BlockAck window size
  371. *
  372. * Return: 0 - success, others - failure
  373. */
  374. static inline
  375. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  376. struct dp_peer *peer,
  377. uint32_t tid_bitmap,
  378. uint32_t ba_window_size)
  379. {
  380. uint8_t i;
  381. struct dp_mld_link_peers link_peers_info;
  382. struct dp_peer *link_peer;
  383. struct dp_rx_tid *rx_tid;
  384. int tid;
  385. QDF_STATUS status;
  386. if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
  387. /* Some BE targets dont require WMI and use shared
  388. * table managed by host for storing Reo queue ref structs
  389. */
  390. if (IS_MLO_DP_LINK_PEER(peer) ||
  391. peer->peer_id == HTT_INVALID_PEER) {
  392. /* Return if this is for MLD link peer and table
  393. * is not used in MLD link peer case as MLD peer's
  394. * qref is written to LUT in peer setup or peer map.
  395. * At this point peer setup for link peer is called
  396. * before peer map, hence peer id is not assigned.
  397. * This could happen if peer_setup is called before
  398. * host receives HTT peer map. In this case return
  399. * success with no op and let peer map handle
  400. * writing the reo_qref to LUT.
  401. */
  402. dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
  403. return QDF_STATUS_SUCCESS;
  404. }
  405. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  406. if (!((1 << tid) & tid_bitmap))
  407. continue;
  408. rx_tid = &peer->rx_tid[tid];
  409. if (!rx_tid->hw_qdesc_paddr) {
  410. tid_bitmap &= ~BIT(tid);
  411. continue;
  412. }
  413. hal_reo_shared_qaddr_write(soc->hal_soc,
  414. peer->peer_id,
  415. tid, peer->rx_tid[tid].
  416. hw_qdesc_paddr);
  417. if (!tid_bitmap) {
  418. dp_peer_err("tid_bitmap=0. All tids setup fail");
  419. return QDF_STATUS_E_FAILURE;
  420. }
  421. }
  422. return QDF_STATUS_SUCCESS;
  423. }
  424. /* when (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) is true: */
  425. if (IS_MLO_DP_MLD_PEER(peer)) {
  426. /* get link peers with reference */
  427. dp_get_link_peers_ref_from_mld_peer(soc, peer,
  428. &link_peers_info,
  429. DP_MOD_ID_CDP);
  430. /* send WMI cmd to each link peers */
  431. for (i = 0; i < link_peers_info.num_links; i++) {
  432. link_peer = link_peers_info.link_peers[i];
  433. if (soc->features.multi_rx_reorder_q_setup_support)
  434. status = dp_peer_rx_reorder_multi_q_setup(
  435. link_peer, tid_bitmap, ba_window_size);
  436. else
  437. status = dp_peer_rx_reorder_q_setup_per_tid(
  438. link_peer,
  439. tid_bitmap,
  440. ba_window_size);
  441. if (QDF_IS_STATUS_ERROR(status)) {
  442. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  443. return status;
  444. }
  445. }
  446. /* release link peers reference */
  447. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  448. } else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
  449. if (soc->features.multi_rx_reorder_q_setup_support)
  450. return dp_peer_rx_reorder_multi_q_setup(peer,
  451. tid_bitmap,
  452. ba_window_size);
  453. else
  454. return dp_peer_rx_reorder_q_setup_per_tid(peer,
  455. tid_bitmap,
  456. ba_window_size);
  457. } else {
  458. dp_peer_err("invalid peer type %d", peer->peer_type);
  459. return QDF_STATUS_E_FAILURE;
  460. }
  461. return QDF_STATUS_SUCCESS;
  462. }
  463. #else
  464. static inline
  465. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  466. struct dp_peer *peer,
  467. uint32_t tid_bitmap,
  468. uint32_t ba_window_size)
  469. {
  470. if (soc->features.multi_rx_reorder_q_setup_support)
  471. return dp_peer_rx_reorder_multi_q_setup(peer,
  472. tid_bitmap,
  473. ba_window_size);
  474. else
  475. return dp_peer_rx_reorder_q_setup_per_tid(peer,
  476. tid_bitmap,
  477. ba_window_size);
  478. }
  479. #endif /* WLAN_FEATURE_11BE_MLO */
  480. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  481. static inline
  482. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  483. {
  484. if (next) {
  485. /* prefetch skb->next and first few bytes of skb->cb */
  486. qdf_prefetch(next);
  487. /* skb->cb spread across 2 cache lines hence below prefetch */
  488. qdf_prefetch(&next->_skb_refdst);
  489. qdf_prefetch(&next->protocol);
  490. qdf_prefetch(&next->data);
  491. qdf_prefetch(next->data);
  492. qdf_prefetch(next->data + 64);
  493. }
  494. }
  495. #else
  496. static inline
  497. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  498. {
  499. }
  500. #endif
  501. #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
  502. /**
  503. * dp_rx_va_prefetch() - function to prefetch the SW desc
  504. * @last_prefetched_hw_desc: HW desc
  505. *
  506. * Return: prefetched Rx descriptor virtual address
  507. */
  508. static inline
  509. void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
  510. {
  511. void *prefetch_desc;
  512. prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
  513. qdf_prefetch(prefetch_desc);
  514. return prefetch_desc;
  515. }
  516. /**
  517. * dp_rx_prefetch_hw_sw_nbuf_32_byte_desc() - function to prefetch HW and SW
  518. * descriptors
  519. * @soc: DP soc context
  520. * @hal_soc: Handle to HAL Soc structure
  521. * @num_entries: valid number of HW descriptors
  522. * @hal_ring_hdl: Destination ring pointer
  523. * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
  524. * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
  525. *
  526. * Return: None
  527. */
  528. static inline void
  529. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  530. hal_soc_handle_t hal_soc,
  531. uint32_t num_entries,
  532. hal_ring_handle_t hal_ring_hdl,
  533. hal_ring_desc_t *last_prefetched_hw_desc,
  534. struct dp_rx_desc **last_prefetched_sw_desc)
  535. {
  536. if (*last_prefetched_sw_desc) {
  537. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
  538. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
  539. }
  540. if (num_entries) {
  541. *last_prefetched_sw_desc =
  542. dp_rx_va_prefetch(*last_prefetched_hw_desc);
  543. if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
  544. *last_prefetched_hw_desc =
  545. hal_srng_dst_prefetch_next_cached_desc(hal_soc,
  546. hal_ring_hdl,
  547. (uint8_t *)*last_prefetched_hw_desc);
  548. else
  549. *last_prefetched_hw_desc =
  550. hal_srng_dst_get_next_32_byte_desc(hal_soc,
  551. hal_ring_hdl,
  552. (uint8_t *)*last_prefetched_hw_desc);
  553. }
  554. }
  555. #else
  556. static inline void
  557. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  558. hal_soc_handle_t hal_soc,
  559. uint32_t num_entries,
  560. hal_ring_handle_t hal_ring_hdl,
  561. hal_ring_desc_t *last_prefetched_hw_desc,
  562. struct dp_rx_desc **last_prefetched_sw_desc)
  563. {
  564. }
  565. #endif
  566. #ifdef CONFIG_WORD_BASED_TLV
  567. /**
  568. * dp_rx_get_reo_qdesc_addr_be(): API to get qdesc address of reo
  569. * entrance ring desc
  570. *
  571. * @hal_soc: Handle to HAL Soc structure
  572. * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
  573. * @buf: pointer to the start of RX PKT TLV headers
  574. * @txrx_peer: pointer to txrx_peer
  575. * @tid: tid value
  576. *
  577. * Return: qdesc address in reo destination ring buffer
  578. */
  579. static inline
  580. uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
  581. uint8_t *dst_ring_desc,
  582. uint8_t *buf,
  583. struct dp_txrx_peer *txrx_peer,
  584. unsigned int tid)
  585. {
  586. struct dp_peer *peer = NULL;
  587. uint64_t qdesc_addr = 0;
  588. if (hal_reo_shared_qaddr_is_enable(hal_soc)) {
  589. qdesc_addr = (uint64_t)txrx_peer->peer_id;
  590. } else {
  591. peer = dp_peer_get_ref_by_id(txrx_peer->vdev->pdev->soc,
  592. txrx_peer->peer_id,
  593. DP_MOD_ID_CONFIG);
  594. if (!peer)
  595. return 0;
  596. qdesc_addr = (uint64_t)peer->rx_tid[tid].hw_qdesc_paddr;
  597. dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
  598. }
  599. return qdesc_addr;
  600. }
  601. #else
  602. static inline
  603. uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
  604. uint8_t *dst_ring_desc,
  605. uint8_t *buf,
  606. struct dp_txrx_peer *txrx_peer,
  607. unsigned int tid)
  608. {
  609. return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
  610. }
  611. #endif
  612. /**
  613. * dp_rx_wbm_err_reap_desc_be() - Function to reap and replenish
  614. * WBM RX Error descriptors
  615. *
  616. * @int_ctx: pointer to DP interrupt context
  617. * @soc: core DP main context
  618. * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, to be serviced
  619. * @quota: No. of units (packets) that can be serviced in one shot.
  620. * @rx_bufs_used: No. of descriptors reaped
  621. *
  622. * This function implements the core Rx functionality like reap and
  623. * replenish the RX error ring Descriptors, and create a nbuf list
  624. * out of it. It also reads wbm error information from descriptors
  625. * and update the nbuf tlv area.
  626. *
  627. * Return: qdf_nbuf_t: head pointer to the nbuf list created
  628. */
  629. qdf_nbuf_t
  630. dp_rx_wbm_err_reap_desc_be(struct dp_intr *int_ctx, struct dp_soc *soc,
  631. hal_ring_handle_t hal_ring_hdl, uint32_t quota,
  632. uint32_t *rx_bufs_used);
  633. /**
  634. * dp_rx_null_q_desc_handle_be() - Function to handle NULL Queue
  635. * descriptor violation on either a
  636. * REO or WBM ring
  637. *
  638. * @soc: core DP main context
  639. * @nbuf: buffer pointer
  640. * @rx_tlv_hdr: start of rx tlv header
  641. * @pool_id: mac id
  642. * @txrx_peer: txrx peer handle
  643. * @is_reo_exception: flag to check if the error is from REO or WBM
  644. * @link_id: link Id on which the packet is received
  645. *
  646. * This function handles NULL queue descriptor violations arising out
  647. * a missing REO queue for a given peer or a given TID. This typically
  648. * may happen if a packet is received on a QOS enabled TID before the
  649. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  650. * it may also happen for MC/BC frames if they are not routed to the
  651. * non-QOS TID queue, in the absence of any other default TID queue.
  652. * This error can show up both in a REO destination or WBM release ring.
  653. *
  654. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  655. * if nbuf could not be handled or dropped.
  656. */
  657. QDF_STATUS
  658. dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
  659. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  660. struct dp_txrx_peer *txrx_peer,
  661. bool is_reo_exception, uint8_t link_id);
  662. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  663. static inline void
  664. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  665. {
  666. uint8_t lmac_id;
  667. lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
  668. qdf_nbuf_set_lmac_id(nbuf, lmac_id);
  669. }
  670. #else
  671. static inline void
  672. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  673. {
  674. }
  675. #endif
  676. #ifndef CONFIG_NBUF_AP_PLATFORM
  677. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  678. static inline uint8_t
  679. dp_rx_peer_mdata_link_id_get_be(uint32_t peer_mdata)
  680. {
  681. uint8_t link_id;
  682. link_id = HTT_RX_PEER_META_DATA_V1A_LOGICAL_LINK_ID_GET(peer_mdata) + 1;
  683. if (link_id > DP_MAX_MLO_LINKS)
  684. link_id = 0;
  685. return link_id;
  686. }
  687. #else
  688. static inline uint8_t
  689. dp_rx_peer_mdata_link_id_get_be(uint32_t peer_metadata)
  690. {
  691. return 0;
  692. }
  693. #endif /* DP_MLO_LINK_STATS_SUPPORT */
  694. static inline void
  695. dp_rx_set_mpdu_seq_number_be(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  696. {
  697. QDF_NBUF_CB_RX_MPDU_SEQ_NUM(nbuf) =
  698. hal_rx_mpdu_sequence_number_get_be(rx_tlv_hdr);
  699. }
  700. static inline void
  701. dp_rx_set_link_id_be(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  702. {
  703. uint8_t logical_link_id;
  704. logical_link_id = dp_rx_peer_mdata_link_id_get_be(peer_mdata);
  705. QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf) = logical_link_id;
  706. }
  707. static inline uint16_t
  708. dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
  709. {
  710. return QDF_NBUF_CB_RX_PEER_ID(nbuf);
  711. }
  712. static inline void
  713. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
  714. uint32_t mpdu_desc_info,
  715. uint32_t peer_mdata,
  716. uint32_t msdu_desc_info)
  717. {
  718. }
  719. static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
  720. hal_ring_desc_t ring_desc,
  721. qdf_nbuf_t nbuf,
  722. uint8_t reo_ring_num)
  723. {
  724. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  725. struct hal_rx_msdu_desc_info msdu_desc_info;
  726. uint8_t pkt_capture_offload = 0;
  727. uint32_t peer_mdata = 0;
  728. qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
  729. qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
  730. /* Get MPDU DESC info */
  731. hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
  732. /* Get MSDU DESC info */
  733. hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
  734. /* Set the end bit to identify the last buffer in MPDU */
  735. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  736. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  737. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
  738. qdf_nbuf_set_rx_retry_flag(nbuf, 1);
  739. if (qdf_unlikely(mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RAW_AMPDU))
  740. qdf_nbuf_set_raw_frame(nbuf, 1);
  741. peer_mdata = mpdu_desc_info.peer_meta_data;
  742. QDF_NBUF_CB_RX_PEER_ID(nbuf) =
  743. dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
  744. QDF_NBUF_CB_RX_VDEV_ID(nbuf) =
  745. dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
  746. dp_rx_set_msdu_lmac_id(nbuf, peer_mdata);
  747. dp_rx_set_link_id_be(nbuf, peer_mdata);
  748. /* to indicate whether this msdu is rx offload */
  749. pkt_capture_offload =
  750. DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
  751. /*
  752. * save msdu flags first, last and continuation msdu in
  753. * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
  754. * length to nbuf->cb. This ensures the info required for
  755. * per pkt processing is always in the same cache line.
  756. * This helps in improving throughput for smaller pkt
  757. * sizes.
  758. */
  759. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  760. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  761. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  762. qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
  763. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
  764. qdf_nbuf_set_da_mcbc(nbuf, 1);
  765. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
  766. qdf_nbuf_set_da_valid(nbuf, 1);
  767. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
  768. qdf_nbuf_set_sa_valid(nbuf, 1);
  769. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
  770. qdf_nbuf_set_intra_bss(nbuf, 1);
  771. if (qdf_likely(mpdu_desc_info.mpdu_flags &
  772. HAL_MPDU_F_QOS_CONTROL_VALID))
  773. qdf_nbuf_set_tid_val(nbuf, mpdu_desc_info.tid);
  774. /* set sw exception */
  775. qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
  776. nbuf,
  777. hal_rx_sw_exception_get_be(ring_desc));
  778. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_desc_info.msdu_len;
  779. QDF_NBUF_CB_RX_CTX_ID(nbuf) = reo_ring_num;
  780. return pkt_capture_offload;
  781. }
  782. static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
  783. uint8_t *rx_tlv_hdr)
  784. {
  785. return HAL_RX_TLV_L3_HEADER_PADDING_GET(rx_tlv_hdr);
  786. }
  787. static inline uint8_t
  788. dp_rx_wbm_err_msdu_continuation_get(struct dp_soc *soc,
  789. hal_ring_desc_t ring_desc,
  790. qdf_nbuf_t nbuf)
  791. {
  792. return hal_rx_wbm_err_msdu_continuation_get(soc->hal_soc,
  793. ring_desc);
  794. }
  795. #else
  796. static inline void
  797. dp_rx_set_link_id_be(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  798. {
  799. }
  800. static inline void
  801. dp_rx_set_mpdu_seq_number_be(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  802. {
  803. }
  804. static inline uint16_t
  805. dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
  806. {
  807. uint32_t peer_metadata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
  808. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  809. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  810. }
  811. static inline void
  812. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
  813. uint32_t mpdu_desc_info,
  814. uint32_t peer_mdata,
  815. uint32_t msdu_desc_info)
  816. {
  817. QDF_NBUF_CB_RX_MPDU_DESC_INFO_1(nbuf) = mpdu_desc_info;
  818. QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf) = peer_mdata;
  819. QDF_NBUF_CB_RX_MSDU_DESC_INFO(nbuf) = msdu_desc_info;
  820. }
  821. static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
  822. hal_ring_desc_t ring_desc,
  823. qdf_nbuf_t nbuf,
  824. uint8_t reo_ring_num)
  825. {
  826. uint32_t mpdu_desc_info = 0;
  827. uint32_t msdu_desc_info = 0;
  828. uint32_t peer_mdata = 0;
  829. /* get REO mpdu & msdu desc info */
  830. hal_rx_get_mpdu_msdu_desc_info_be(ring_desc,
  831. &mpdu_desc_info,
  832. &peer_mdata,
  833. &msdu_desc_info);
  834. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
  835. mpdu_desc_info,
  836. peer_mdata,
  837. msdu_desc_info);
  838. return 0;
  839. }
  840. static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
  841. uint8_t *rx_tlv_hdr)
  842. {
  843. return QDF_NBUF_CB_RX_L3_PAD_MSB(nbuf) ? 2 : 0;
  844. }
  845. static inline uint8_t
  846. dp_rx_wbm_err_msdu_continuation_get(struct dp_soc *soc,
  847. hal_ring_desc_t ring_desc,
  848. qdf_nbuf_t nbuf)
  849. {
  850. return qdf_nbuf_is_rx_chfrag_cont(nbuf);
  851. }
  852. #endif /* CONFIG_NBUF_AP_PLATFORM */
  853. /**
  854. * dp_rx_wbm_err_copy_desc_info_in_nbuf(): API to copy WBM dest ring
  855. * descriptor information in nbuf CB/TLV
  856. *
  857. * @soc: pointer to Soc structure
  858. * @ring_desc: wbm dest ring descriptor
  859. * @nbuf: nbuf to save descriptor information
  860. * @pool_id: pool id part of wbm error info
  861. *
  862. * Return: wbm error information details
  863. */
  864. static inline uint32_t
  865. dp_rx_wbm_err_copy_desc_info_in_nbuf(struct dp_soc *soc,
  866. hal_ring_desc_t ring_desc,
  867. qdf_nbuf_t nbuf,
  868. uint8_t pool_id)
  869. {
  870. uint32_t mpdu_desc_info = 0;
  871. uint32_t msdu_desc_info = 0;
  872. uint32_t peer_mdata = 0;
  873. union hal_wbm_err_info_u wbm_err = { 0 };
  874. /* get WBM mpdu & msdu desc info */
  875. hal_rx_wbm_err_mpdu_msdu_info_get_be(ring_desc,
  876. &wbm_err.info,
  877. &mpdu_desc_info,
  878. &msdu_desc_info,
  879. &peer_mdata);
  880. wbm_err.info_bit.pool_id = pool_id;
  881. dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
  882. mpdu_desc_info,
  883. peer_mdata,
  884. msdu_desc_info);
  885. dp_rx_set_wbm_err_info_in_nbuf(soc, nbuf, wbm_err);
  886. return wbm_err.info;
  887. }
  888. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  889. struct dp_soc *
  890. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id);
  891. #else
  892. static inline struct dp_soc *
  893. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
  894. {
  895. return soc;
  896. }
  897. #endif
  898. #endif