dp_be_rx.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_BE_RX_H_
  20. #define _DP_BE_RX_H_
  21. #include <dp_types.h>
  22. #include "dp_be.h"
  23. #include "dp_peer.h"
  24. #include <dp_rx.h>
  25. #include "hal_be_rx.h"
  26. /*
  27. * dp_be_intrabss_params
  28. *
  29. * @dest_soc: dest soc to forward the packet to
  30. * @tx_vdev_id: vdev id retrieved from dest peer
  31. */
  32. struct dp_be_intrabss_params {
  33. struct dp_soc *dest_soc;
  34. uint8_t tx_vdev_id;
  35. };
  36. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  37. /*
  38. * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  39. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  40. * @soc: core txrx main context
  41. * @ta_txrx_peer: source peer entry
  42. * @rx_tlv_hdr: start address of rx tlvs
  43. * @nbuf: nbuf that has to be intrabss forwarded
  44. * @msdu_metadata: msdu metadata
  45. *
  46. * Return: true if it is forwarded else false
  47. */
  48. bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
  49. struct dp_txrx_peer *ta_txrx_peer,
  50. uint8_t *rx_tlv_hdr,
  51. qdf_nbuf_t nbuf,
  52. struct hal_rx_msdu_metadata msdu_metadata);
  53. #endif
  54. /**
  55. * dp_rx_intrabss_mcast_handler_be() - intrabss mcast handler
  56. * @soc: core txrx main context
  57. * @ta_txrx_peer: source txrx_peer entry
  58. * @nbuf_copy: nbuf that has to be intrabss forwarded
  59. * @tid_stats: tid_stats structure
  60. *
  61. * Return: true if it is forwarded else false
  62. */
  63. bool
  64. dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
  65. struct dp_txrx_peer *ta_txrx_peer,
  66. qdf_nbuf_t nbuf_copy,
  67. struct cdp_tid_rx_stats *tid_stats);
  68. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  69. uint32_t *msg_word,
  70. void *rx_filter);
  71. uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
  72. hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
  73. uint32_t quota);
  74. /**
  75. * dp_rx_chain_msdus_be() - Function to chain all msdus of a mpdu
  76. * to pdev invalid peer list
  77. *
  78. * @soc: core DP main context
  79. * @nbuf: Buffer pointer
  80. * @rx_tlv_hdr: start of rx tlv header
  81. * @mac_id: mac id
  82. *
  83. * Return: bool: true for last msdu of mpdu
  84. */
  85. bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
  86. uint8_t *rx_tlv_hdr, uint8_t mac_id);
  87. /**
  88. * dp_rx_desc_pool_init_be() - Initialize Rx Descriptor pool(s)
  89. * @soc: Handle to DP Soc structure
  90. * @rx_desc_pool: Rx descriptor pool handler
  91. * @pool_id: Rx descriptor pool ID
  92. *
  93. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  94. */
  95. QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
  96. struct rx_desc_pool *rx_desc_pool,
  97. uint32_t pool_id);
  98. /**
  99. * dp_rx_desc_pool_deinit_be() - De-initialize Rx Descriptor pool(s)
  100. * @soc: Handle to DP Soc structure
  101. * @rx_desc_pool: Rx descriptor pool handler
  102. * @pool_id: Rx descriptor pool ID
  103. *
  104. * Return: None
  105. */
  106. void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
  107. struct rx_desc_pool *rx_desc_pool,
  108. uint32_t pool_id);
  109. /**
  110. * dp_wbm_get_rx_desc_from_hal_desc_be() - Get corresponding Rx Desc
  111. * address from WBM ring Desc
  112. * @soc: Handle to DP Soc structure
  113. * @ring_desc: ring descriptor structure pointer
  114. * @r_rx_desc: pointer to a pointer of Rx Desc
  115. *
  116. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  117. */
  118. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  119. void *ring_desc,
  120. struct dp_rx_desc **r_rx_desc);
  121. /**
  122. * dp_rx_desc_cookie_2_va_be() - Convert RX Desc cookie ID to VA
  123. * @soc:Handle to DP Soc structure
  124. * @cookie: cookie used to lookup virtual address
  125. *
  126. * Return: Rx descriptor virtual address
  127. */
  128. struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
  129. uint32_t cookie);
  130. #if !defined(DP_FEATURE_HW_COOKIE_CONVERSION) || \
  131. defined(DP_HW_COOKIE_CONVERT_EXCEPTION)
  132. /**
  133. * dp_rx_desc_sw_cc_check() - check if RX desc VA is got correctly,
  134. if not, do SW cookie conversion.
  135. * @soc:Handle to DP Soc structure
  136. * @rx_buf_cookie: RX desc cookie ID
  137. * @r_rx_desc: double pointer for RX desc
  138. *
  139. * Return: None
  140. */
  141. static inline void
  142. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  143. uint32_t rx_buf_cookie,
  144. struct dp_rx_desc **r_rx_desc)
  145. {
  146. if (qdf_unlikely(!(*r_rx_desc))) {
  147. *r_rx_desc = (struct dp_rx_desc *)
  148. dp_cc_desc_find(soc,
  149. rx_buf_cookie);
  150. }
  151. }
  152. #else
  153. static inline void
  154. dp_rx_desc_sw_cc_check(struct dp_soc *soc,
  155. uint32_t rx_buf_cookie,
  156. struct dp_rx_desc **r_rx_desc)
  157. {
  158. }
  159. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION && DP_HW_COOKIE_CONVERT_EXCEPTION */
  160. #define DP_PEER_METADATA_OFFLOAD_GET_BE(_peer_metadata) (0)
  161. #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
  162. static inline uint16_t
  163. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  164. {
  165. struct htt_rx_peer_metadata_v1 *metadata =
  166. (struct htt_rx_peer_metadata_v1 *)&peer_metadata;
  167. uint16_t peer_id;
  168. peer_id = metadata->peer_id |
  169. (metadata->ml_peer_valid << soc->peer_id_shift);
  170. return peer_id;
  171. }
  172. #else
  173. /* Combine ml_peer_valid and peer_id field */
  174. #define DP_BE_PEER_METADATA_PEER_ID_MASK 0x00003fff
  175. #define DP_BE_PEER_METADATA_PEER_ID_SHIFT 0
  176. static inline uint16_t
  177. dp_rx_peer_metadata_peer_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  178. {
  179. return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
  180. DP_BE_PEER_METADATA_PEER_ID_SHIFT);
  181. }
  182. #endif
  183. static inline uint16_t
  184. dp_rx_peer_metadata_vdev_id_get_be(struct dp_soc *soc, uint32_t peer_metadata)
  185. {
  186. struct htt_rx_peer_metadata_v1 *metadata =
  187. (struct htt_rx_peer_metadata_v1 *)&peer_metadata;
  188. return metadata->vdev_id;
  189. }
  190. static inline uint8_t
  191. dp_rx_peer_metadata_lmac_id_get_be(uint32_t peer_metadata)
  192. {
  193. return HTT_RX_PEER_META_DATA_V1_LMAC_ID_GET(peer_metadata);
  194. }
  195. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  196. /**
  197. * dp_rx_nf_process() - Near Full state handler for RX rings.
  198. * @int_ctx: interrupt context
  199. * @hal_ring_hdl: Rx ring handle
  200. * @reo_ring_num: RX ring number
  201. * @quota: Quota of work to be done
  202. *
  203. * Return: work done in the handler
  204. */
  205. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  206. hal_ring_handle_t hal_ring_hdl,
  207. uint8_t reo_ring_num,
  208. uint32_t quota);
  209. #else
  210. static inline
  211. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  212. hal_ring_handle_t hal_ring_hdl,
  213. uint8_t reo_ring_num,
  214. uint32_t quota)
  215. {
  216. return 0;
  217. }
  218. #endif /*WLAN_FEATURE_NEAR_FULL_IRQ */
  219. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  220. struct dp_soc *
  221. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id);
  222. struct dp_soc *
  223. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id);
  224. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc);
  225. #else
  226. static inline struct dp_soc *
  227. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
  228. {
  229. return soc;
  230. }
  231. static inline uint8_t
  232. dp_soc_get_num_soc_be(struct dp_soc *soc)
  233. {
  234. return 1;
  235. }
  236. #endif
  237. #ifdef WLAN_FEATURE_11BE_MLO
  238. /**
  239. * dp_rx_mlo_igmp_handler() - Rx handler for Mcast packets
  240. * @soc: Handle to DP Soc structure
  241. * @vdev: DP vdev handle
  242. * @peer: DP peer handle
  243. * @nbuf: nbuf to be enqueued
  244. *
  245. * Return: true when packet sent to stack, false failure
  246. */
  247. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  248. struct dp_vdev *vdev,
  249. struct dp_txrx_peer *peer,
  250. qdf_nbuf_t nbuf);
  251. /**
  252. * dp_peer_rx_reorder_queue_setup() - Send reo queue setup wmi cmd to FW
  253. per peer type
  254. * @soc: DP Soc handle
  255. * @peer: dp peer to operate on
  256. * @tid: TID
  257. * @ba_window_size: BlockAck window size
  258. *
  259. * Return: 0 - success, others - failure
  260. */
  261. static inline
  262. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  263. struct dp_peer *peer,
  264. int tid,
  265. uint32_t ba_window_size)
  266. {
  267. uint8_t i;
  268. struct dp_mld_link_peers link_peers_info;
  269. struct dp_peer *link_peer;
  270. struct dp_rx_tid *rx_tid;
  271. struct dp_soc *link_peer_soc;
  272. rx_tid = &peer->rx_tid[tid];
  273. if (!rx_tid->hw_qdesc_paddr)
  274. return QDF_STATUS_E_INVAL;
  275. if (!hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
  276. if (IS_MLO_DP_MLD_PEER(peer)) {
  277. /* get link peers with reference */
  278. dp_get_link_peers_ref_from_mld_peer(soc, peer,
  279. &link_peers_info,
  280. DP_MOD_ID_CDP);
  281. /* send WMI cmd to each link peers */
  282. for (i = 0; i < link_peers_info.num_links; i++) {
  283. link_peer = link_peers_info.link_peers[i];
  284. link_peer_soc = link_peer->vdev->pdev->soc;
  285. if (link_peer_soc->cdp_soc.ol_ops->
  286. peer_rx_reorder_queue_setup) {
  287. if (link_peer_soc->cdp_soc.ol_ops->
  288. peer_rx_reorder_queue_setup(
  289. link_peer_soc->ctrl_psoc,
  290. link_peer->vdev->pdev->pdev_id,
  291. link_peer->vdev->vdev_id,
  292. link_peer->mac_addr.raw,
  293. rx_tid->hw_qdesc_paddr,
  294. tid, tid,
  295. 1, ba_window_size)) {
  296. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  297. link_peer_soc, tid);
  298. return QDF_STATUS_E_FAILURE;
  299. }
  300. }
  301. }
  302. /* release link peers reference */
  303. dp_release_link_peers_ref(&link_peers_info,
  304. DP_MOD_ID_CDP);
  305. } else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
  306. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  307. if (soc->cdp_soc.ol_ops->
  308. peer_rx_reorder_queue_setup(
  309. soc->ctrl_psoc,
  310. peer->vdev->pdev->pdev_id,
  311. peer->vdev->vdev_id,
  312. peer->mac_addr.raw,
  313. rx_tid->hw_qdesc_paddr,
  314. tid, tid,
  315. 1, ba_window_size)) {
  316. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  317. soc, tid);
  318. return QDF_STATUS_E_FAILURE;
  319. }
  320. }
  321. } else {
  322. dp_peer_err("invalid peer type %d", peer->peer_type);
  323. return QDF_STATUS_E_FAILURE;
  324. }
  325. } else {
  326. /* Some BE targets dont require WMI and use shared
  327. * table managed by host for storing Reo queue ref structs
  328. */
  329. if (IS_MLO_DP_LINK_PEER(peer) ||
  330. peer->peer_id == HTT_INVALID_PEER) {
  331. /* Return if this is for MLD link peer and table
  332. * is not used in MLD link peer case as MLD peer's
  333. * qref is written to LUT in peer setup or peer map.
  334. * At this point peer setup for link peer is called
  335. * before peer map, hence peer id is not assigned.
  336. * This could happen if peer_setup is called before
  337. * host receives HTT peer map. In this case return
  338. * success with no op and let peer map handle
  339. * writing the reo_qref to LUT.
  340. */
  341. dp_peer_debug("Invalid peer id for dp_peer:%pK", peer);
  342. return QDF_STATUS_SUCCESS;
  343. }
  344. hal_reo_shared_qaddr_write(soc->hal_soc,
  345. peer->peer_id,
  346. tid, peer->rx_tid[tid].hw_qdesc_paddr);
  347. }
  348. return QDF_STATUS_SUCCESS;
  349. }
  350. #else
  351. static inline
  352. QDF_STATUS dp_peer_rx_reorder_queue_setup_be(struct dp_soc *soc,
  353. struct dp_peer *peer,
  354. int tid,
  355. uint32_t ba_window_size)
  356. {
  357. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  358. if (!rx_tid->hw_qdesc_paddr)
  359. return QDF_STATUS_E_INVAL;
  360. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  361. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  362. soc->ctrl_psoc,
  363. peer->vdev->pdev->pdev_id,
  364. peer->vdev->vdev_id,
  365. peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
  366. 1, ba_window_size)) {
  367. dp_peer_err("%pK: Failed to send reo queue setup to FW - tid %d\n",
  368. soc, tid);
  369. return QDF_STATUS_E_FAILURE;
  370. }
  371. }
  372. return QDF_STATUS_SUCCESS;
  373. }
  374. #endif /* WLAN_FEATURE_11BE_MLO */
  375. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  376. static inline
  377. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  378. {
  379. if (next) {
  380. /* prefetch skb->next and first few bytes of skb->cb */
  381. qdf_prefetch(next);
  382. /* skb->cb spread across 2 cache lines hence below prefetch */
  383. qdf_prefetch(&next->_skb_refdst);
  384. qdf_prefetch(&next->len);
  385. qdf_prefetch(&next->protocol);
  386. qdf_prefetch(next->data);
  387. qdf_prefetch(next->data + 64);
  388. qdf_prefetch(next->data + 128);
  389. }
  390. }
  391. #else
  392. static inline
  393. void dp_rx_prefetch_nbuf_data_be(qdf_nbuf_t nbuf, qdf_nbuf_t next)
  394. {
  395. }
  396. #endif
  397. #ifdef QCA_DP_RX_HW_SW_NBUF_DESC_PREFETCH
  398. /**
  399. * dp_rx_cookie_2_va_rxdma_buf_prefetch() - function to prefetch the SW desc
  400. * @soc: Handle to DP Soc structure
  401. * @cookie: cookie used to lookup virtual address
  402. *
  403. * Return: prefetched Rx descriptor virtual address
  404. */
  405. static inline
  406. void *dp_rx_va_prefetch(void *last_prefetched_hw_desc)
  407. {
  408. void *prefetch_desc;
  409. prefetch_desc = (void *)hal_rx_get_reo_desc_va(last_prefetched_hw_desc);
  410. qdf_prefetch(prefetch_desc);
  411. return prefetch_desc;
  412. }
  413. /**
  414. * dp_rx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
  415. * @soc: Handle to HAL Soc structure
  416. * @num_entries: valid number of HW descriptors
  417. * @hal_ring_hdl: Destination ring pointer
  418. * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
  419. * @last_prefetched_sw_desc: input & output param of last prefetch SW desc
  420. *
  421. * Return: None
  422. */
  423. static inline void
  424. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  425. hal_soc_handle_t hal_soc,
  426. uint32_t num_entries,
  427. hal_ring_handle_t hal_ring_hdl,
  428. hal_ring_desc_t *last_prefetched_hw_desc,
  429. struct dp_rx_desc **last_prefetched_sw_desc)
  430. {
  431. if (*last_prefetched_sw_desc) {
  432. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
  433. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
  434. }
  435. if (num_entries) {
  436. *last_prefetched_sw_desc =
  437. dp_rx_va_prefetch(*last_prefetched_hw_desc);
  438. if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
  439. *last_prefetched_hw_desc =
  440. hal_srng_dst_prefetch_next_cached_desc(hal_soc,
  441. hal_ring_hdl,
  442. (uint8_t *)*last_prefetched_hw_desc);
  443. else
  444. *last_prefetched_hw_desc =
  445. hal_srng_dst_get_next_32_byte_desc(hal_soc,
  446. hal_ring_hdl,
  447. (uint8_t *)*last_prefetched_hw_desc);
  448. }
  449. }
  450. #else
  451. static inline void
  452. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(struct dp_soc *soc,
  453. hal_soc_handle_t hal_soc,
  454. uint32_t num_entries,
  455. hal_ring_handle_t hal_ring_hdl,
  456. hal_ring_desc_t *last_prefetched_hw_desc,
  457. struct dp_rx_desc **last_prefetched_sw_desc)
  458. {
  459. }
  460. #endif
  461. #ifdef CONFIG_WORD_BASED_TLV
  462. /**
  463. * dp_rx_get_reo_qdesc_addr_be(): API to get qdesc address of reo
  464. * entrance ring desc
  465. *
  466. * @hal_soc: Handle to HAL Soc structure
  467. * @dst_ring_desc: reo dest ring descriptor (used for Lithium DP)
  468. * @buf: pointer to the start of RX PKT TLV headers
  469. * @txrx_peer: pointer to txrx_peer
  470. * @tid: tid value
  471. *
  472. * Return: qdesc address in reo destination ring buffer
  473. */
  474. static inline
  475. uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
  476. uint8_t *dst_ring_desc,
  477. uint8_t *buf,
  478. struct dp_txrx_peer *txrx_peer,
  479. unsigned int tid)
  480. {
  481. struct dp_peer *peer = NULL;
  482. uint64_t qdesc_addr = 0;
  483. if (hal_reo_shared_qaddr_is_enable(hal_soc)) {
  484. qdesc_addr = (uint64_t)txrx_peer->peer_id;
  485. } else {
  486. peer = dp_peer_get_ref_by_id(txrx_peer->vdev->pdev->soc,
  487. txrx_peer->peer_id,
  488. DP_MOD_ID_CONFIG);
  489. if (!peer)
  490. return 0;
  491. qdesc_addr = (uint64_t)peer->rx_tid[tid].hw_qdesc_paddr;
  492. dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
  493. }
  494. return qdesc_addr;
  495. }
  496. #else
  497. static inline
  498. uint64_t dp_rx_get_reo_qdesc_addr_be(hal_soc_handle_t hal_soc,
  499. uint8_t *dst_ring_desc,
  500. uint8_t *buf,
  501. struct dp_txrx_peer *txrx_peer,
  502. unsigned int tid)
  503. {
  504. return hal_rx_get_qdesc_addr(hal_soc, dst_ring_desc, buf);
  505. }
  506. #endif
  507. #endif