dp_be_rx.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "cdp_txrx_cmn_struct.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_types.h"
  22. #include "dp_rx.h"
  23. #include "dp_tx.h"
  24. #include "dp_be_rx.h"
  25. #include "dp_peer.h"
  26. #include "hal_rx.h"
  27. #include "hal_be_rx.h"
  28. #include "hal_api.h"
  29. #include "hal_be_api.h"
  30. #include "qdf_nbuf.h"
  31. #include "hal_be_rx_tlv.h"
  32. #ifdef MESH_MODE_SUPPORT
  33. #include "if_meta_hdr.h"
  34. #endif
  35. #include "dp_internal.h"
  36. #include "dp_ipa.h"
  37. #ifdef FEATURE_WDS
  38. #include "dp_txrx_wds.h"
  39. #endif
  40. #include "dp_hist.h"
  41. #include "dp_rx_buffer_pool.h"
  42. #ifndef AST_OFFLOAD_ENABLE
  43. static void
  44. dp_rx_wds_learn(struct dp_soc *soc,
  45. struct dp_vdev *vdev,
  46. uint8_t *rx_tlv_hdr,
  47. struct dp_txrx_peer *txrx_peer,
  48. qdf_nbuf_t nbuf,
  49. struct hal_rx_msdu_metadata msdu_metadata)
  50. {
  51. /* WDS Source Port Learning */
  52. if (qdf_likely(vdev->wds_enabled))
  53. dp_rx_wds_srcport_learn(soc,
  54. rx_tlv_hdr,
  55. txrx_peer,
  56. nbuf,
  57. msdu_metadata);
  58. }
  59. #else
  60. #ifdef QCA_SUPPORT_WDS_EXTENDED
  61. /**
  62. * dp_wds_ext_peer_learn_be() - function to send event to control
  63. * path on receiving 1st 4-address frame from backhaul.
  64. * @soc: DP soc
  65. * @ta_txrx_peer: WDS repeater txrx peer
  66. * @rx_tlv_hdr : start address of rx tlvs
  67. * @nbuf: RX packet buffer
  68. *
  69. * Return: void
  70. */
  71. static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
  72. struct dp_txrx_peer *ta_txrx_peer,
  73. uint8_t *rx_tlv_hdr,
  74. qdf_nbuf_t nbuf)
  75. {
  76. uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE];
  77. struct dp_peer *ta_base_peer;
  78. /* instead of checking addr4 is valid or not in per packet path
  79. * check for init bit, which will be set on reception of
  80. * first addr4 valid packet.
  81. */
  82. if (!ta_txrx_peer->vdev->wds_ext_enabled ||
  83. qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
  84. &ta_txrx_peer->wds_ext.init))
  85. return;
  86. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  87. hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)) {
  88. qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
  89. &ta_txrx_peer->wds_ext.init);
  90. ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
  91. DP_MOD_ID_RX);
  92. if (!ta_base_peer)
  93. return;
  94. qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0],
  95. QDF_MAC_ADDR_SIZE);
  96. dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
  97. soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
  98. soc->ctrl_psoc,
  99. ta_txrx_peer->peer_id,
  100. ta_txrx_peer->vdev->vdev_id,
  101. wds_ext_src_mac);
  102. }
  103. }
  104. #else
  105. static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
  106. struct dp_txrx_peer *ta_txrx_peer,
  107. uint8_t *rx_tlv_hdr,
  108. qdf_nbuf_t nbuf)
  109. {
  110. }
  111. #endif
  112. static void
  113. dp_rx_wds_learn(struct dp_soc *soc,
  114. struct dp_vdev *vdev,
  115. uint8_t *rx_tlv_hdr,
  116. struct dp_txrx_peer *ta_txrx_peer,
  117. qdf_nbuf_t nbuf,
  118. struct hal_rx_msdu_metadata msdu_metadata)
  119. {
  120. dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf);
  121. }
  122. #endif
  123. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  124. static inline void
  125. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  126. {
  127. uint8_t lmac_id;
  128. lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
  129. qdf_nbuf_set_lmac_id(nbuf, lmac_id);
  130. }
  131. #else
  132. static inline void
  133. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  134. {
  135. }
  136. #endif
  137. /**
  138. * dp_rx_process_be() - Brain of the Rx processing functionality
  139. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  140. * @int_ctx: per interrupt context
  141. * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
  142. * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
  143. * @quota: No. of units (packets) that can be serviced in one shot.
  144. *
  145. * This function implements the core of Rx functionality. This is
  146. * expected to handle only non-error frames.
  147. *
  148. * Return: uint32_t: No. of elements processed
  149. */
  150. uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
  151. hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
  152. uint32_t quota)
  153. {
  154. hal_ring_desc_t ring_desc;
  155. hal_ring_desc_t last_prefetched_hw_desc;
  156. hal_soc_handle_t hal_soc;
  157. struct dp_rx_desc *rx_desc = NULL;
  158. struct dp_rx_desc *last_prefetched_sw_desc = NULL;
  159. qdf_nbuf_t nbuf, next;
  160. bool near_full;
  161. union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
  162. union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
  163. uint32_t num_pending = 0;
  164. uint32_t rx_bufs_used = 0, rx_buf_cookie;
  165. uint16_t msdu_len = 0;
  166. uint16_t peer_id;
  167. uint8_t vdev_id;
  168. struct dp_txrx_peer *txrx_peer;
  169. dp_txrx_ref_handle txrx_ref_handle = NULL;
  170. struct dp_vdev *vdev;
  171. uint32_t pkt_len = 0;
  172. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  173. struct hal_rx_msdu_desc_info msdu_desc_info;
  174. enum hal_reo_error_status error;
  175. uint32_t peer_mdata;
  176. uint8_t *rx_tlv_hdr;
  177. uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
  178. uint8_t mac_id = 0;
  179. struct dp_pdev *rx_pdev;
  180. bool enh_flag;
  181. struct dp_srng *dp_rxdma_srng;
  182. struct rx_desc_pool *rx_desc_pool;
  183. struct dp_soc *soc = int_ctx->soc;
  184. struct cdp_tid_rx_stats *tid_stats;
  185. qdf_nbuf_t nbuf_head;
  186. qdf_nbuf_t nbuf_tail;
  187. qdf_nbuf_t deliver_list_head;
  188. qdf_nbuf_t deliver_list_tail;
  189. uint32_t num_rx_bufs_reaped = 0;
  190. uint32_t intr_id;
  191. struct hif_opaque_softc *scn;
  192. int32_t tid = 0;
  193. bool is_prev_msdu_last = true;
  194. uint32_t num_entries_avail = 0;
  195. uint32_t rx_ol_pkt_cnt = 0;
  196. uint32_t num_entries = 0;
  197. struct hal_rx_msdu_metadata msdu_metadata;
  198. QDF_STATUS status;
  199. qdf_nbuf_t ebuf_head;
  200. qdf_nbuf_t ebuf_tail;
  201. uint8_t pkt_capture_offload = 0;
  202. struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
  203. int max_reap_limit, ring_near_full;
  204. struct dp_soc *replenish_soc;
  205. uint8_t chip_id;
  206. uint64_t current_time = 0;
  207. uint32_t old_tid;
  208. uint32_t peer_ext_stats;
  209. uint32_t dsf;
  210. DP_HIST_INIT();
  211. qdf_assert_always(soc && hal_ring_hdl);
  212. hal_soc = soc->hal_soc;
  213. qdf_assert_always(hal_soc);
  214. scn = soc->hif_handle;
  215. intr_id = int_ctx->dp_intr_id;
  216. num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
  217. dp_runtime_pm_mark_last_busy(soc);
  218. more_data:
  219. /* reset local variables here to be re-used in the function */
  220. nbuf_head = NULL;
  221. nbuf_tail = NULL;
  222. deliver_list_head = NULL;
  223. deliver_list_tail = NULL;
  224. txrx_peer = NULL;
  225. vdev = NULL;
  226. num_rx_bufs_reaped = 0;
  227. ebuf_head = NULL;
  228. ebuf_tail = NULL;
  229. ring_near_full = 0;
  230. max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
  231. qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
  232. qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
  233. qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
  234. qdf_mem_zero(head, sizeof(head));
  235. qdf_mem_zero(tail, sizeof(tail));
  236. old_tid = 0xff;
  237. dsf = 0;
  238. peer_ext_stats = 0;
  239. rx_pdev = NULL;
  240. tid_stats = NULL;
  241. dp_pkt_get_timestamp(&current_time);
  242. ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring,
  243. &max_reap_limit);
  244. peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
  245. if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  246. /*
  247. * Need API to convert from hal_ring pointer to
  248. * Ring Type / Ring Id combo
  249. */
  250. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  251. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  252. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  253. goto done;
  254. }
  255. hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl);
  256. if (!num_pending)
  257. num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
  258. if (num_pending > quota)
  259. num_pending = quota;
  260. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending);
  261. last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
  262. hal_ring_hdl,
  263. num_pending);
  264. /*
  265. * start reaping the buffers from reo ring and queue
  266. * them in per vdev queue.
  267. * Process the received pkts in a different per vdev loop.
  268. */
  269. while (qdf_likely(num_pending)) {
  270. ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  271. if (qdf_unlikely(!ring_desc))
  272. break;
  273. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  274. if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
  275. dp_rx_err("%pK: HAL RING 0x%pK:error %d",
  276. soc, hal_ring_hdl, error);
  277. DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num],
  278. 1);
  279. /* Don't know how to deal with this -- assert */
  280. qdf_assert(0);
  281. }
  282. dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
  283. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  284. status = dp_rx_cookie_check_and_invalidate(ring_desc);
  285. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  286. DP_STATS_INC(soc, rx.err.stale_cookie, 1);
  287. break;
  288. }
  289. rx_desc = (struct dp_rx_desc *)
  290. hal_rx_get_reo_desc_va(ring_desc);
  291. dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc);
  292. status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
  293. ring_desc, rx_desc);
  294. if (QDF_IS_STATUS_ERROR(status)) {
  295. if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
  296. qdf_assert_always(!rx_desc->unmapped);
  297. dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
  298. rx_desc->unmapped = 1;
  299. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  300. rx_desc->pool_id);
  301. dp_rx_add_to_free_desc_list(
  302. &head[rx_desc->chip_id][rx_desc->pool_id],
  303. &tail[rx_desc->chip_id][rx_desc->pool_id],
  304. rx_desc);
  305. }
  306. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  307. continue;
  308. }
  309. /*
  310. * this is a unlikely scenario where the host is reaping
  311. * a descriptor which it already reaped just a while ago
  312. * but is yet to replenish it back to HW.
  313. * In this case host will dump the last 128 descriptors
  314. * including the software descriptor rx_desc and assert.
  315. */
  316. if (qdf_unlikely(!rx_desc->in_use)) {
  317. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  318. dp_info_rl("Reaping rx_desc not in use!");
  319. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  320. ring_desc, rx_desc);
  321. /* ignore duplicate RX desc and continue to process */
  322. /* Pop out the descriptor */
  323. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  324. continue;
  325. }
  326. status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc);
  327. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  328. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  329. dp_info_rl("Nbuf sanity check failure!");
  330. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  331. ring_desc, rx_desc);
  332. rx_desc->in_err_state = 1;
  333. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  334. continue;
  335. }
  336. if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
  337. dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
  338. DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
  339. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  340. ring_desc, rx_desc);
  341. }
  342. /* Get MPDU DESC info */
  343. hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
  344. /* Get MSDU DESC info */
  345. hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
  346. /* Set the end bit to identify the last buffer in MPDU */
  347. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  348. qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
  349. if (qdf_unlikely(msdu_desc_info.msdu_flags &
  350. HAL_MSDU_F_MSDU_CONTINUATION)) {
  351. /* In dp_rx_sg_create() until the last buffer,
  352. * end bit should not be set. As continuation bit set,
  353. * this is not a last buffer.
  354. */
  355. qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 0);
  356. /* previous msdu has end bit set, so current one is
  357. * the new MPDU
  358. */
  359. if (is_prev_msdu_last) {
  360. /* Get number of entries available in HW ring */
  361. num_entries_avail =
  362. hal_srng_dst_num_valid(hal_soc,
  363. hal_ring_hdl, 1);
  364. /* For new MPDU check if we can read complete
  365. * MPDU by comparing the number of buffers
  366. * available and number of buffers needed to
  367. * reap this MPDU
  368. */
  369. if ((msdu_desc_info.msdu_len /
  370. (RX_DATA_BUFFER_SIZE -
  371. soc->rx_pkt_tlv_size) + 1) >
  372. num_pending) {
  373. DP_STATS_INC(soc,
  374. rx.msdu_scatter_wait_break,
  375. 1);
  376. dp_rx_cookie_reset_invalid_bit(
  377. ring_desc);
  378. /* As we are going to break out of the
  379. * loop because of unavailability of
  380. * descs to form complete SG, we need to
  381. * reset the TP in the REO destination
  382. * ring.
  383. */
  384. hal_srng_dst_dec_tp(hal_soc,
  385. hal_ring_hdl);
  386. break;
  387. }
  388. is_prev_msdu_last = false;
  389. }
  390. }
  391. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
  392. qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
  393. if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
  394. HAL_MPDU_F_RAW_AMPDU))
  395. qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
  396. if (!is_prev_msdu_last &&
  397. !(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION))
  398. is_prev_msdu_last = true;
  399. rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
  400. peer_mdata = mpdu_desc_info.peer_meta_data;
  401. QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
  402. dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
  403. QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
  404. dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
  405. dp_rx_set_msdu_lmac_id(rx_desc->nbuf, peer_mdata);
  406. /* to indicate whether this msdu is rx offload */
  407. pkt_capture_offload =
  408. DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
  409. /*
  410. * save msdu flags first, last and continuation msdu in
  411. * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
  412. * length to nbuf->cb. This ensures the info required for
  413. * per pkt processing is always in the same cache line.
  414. * This helps in improving throughput for smaller pkt
  415. * sizes.
  416. */
  417. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  418. qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
  419. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  420. qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
  421. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
  422. qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
  423. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
  424. qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
  425. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
  426. qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
  427. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
  428. qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1);
  429. if (qdf_likely(mpdu_desc_info.mpdu_flags &
  430. HAL_MPDU_F_QOS_CONTROL_VALID))
  431. qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
  432. /* set sw exception */
  433. qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
  434. rx_desc->nbuf,
  435. hal_rx_sw_exception_get_be(ring_desc));
  436. QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
  437. QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
  438. /*
  439. * move unmap after scattered msdu waiting break logic
  440. * in case double skb unmap happened.
  441. */
  442. dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
  443. rx_desc->unmapped = 1;
  444. DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
  445. ebuf_tail, rx_desc);
  446. quota -= 1;
  447. num_pending -= 1;
  448. dp_rx_add_to_free_desc_list
  449. (&head[rx_desc->chip_id][rx_desc->pool_id],
  450. &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
  451. num_rx_bufs_reaped++;
  452. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc,
  453. num_pending,
  454. hal_ring_hdl,
  455. &last_prefetched_hw_desc,
  456. &last_prefetched_sw_desc);
  457. /*
  458. * only if complete msdu is received for scatter case,
  459. * then allow break.
  460. */
  461. if (is_prev_msdu_last &&
  462. dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped,
  463. max_reap_limit))
  464. break;
  465. }
  466. done:
  467. dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
  468. qdf_dsb();
  469. dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped);
  470. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  471. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  472. /*
  473. * continue with next mac_id if no pkts were reaped
  474. * from that pool
  475. */
  476. if (!rx_bufs_reaped[chip_id][mac_id])
  477. continue;
  478. replenish_soc = dp_rx_replensih_soc_get(soc, chip_id);
  479. dp_rxdma_srng =
  480. &replenish_soc->rx_refill_buf_ring[mac_id];
  481. rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
  482. dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
  483. dp_rxdma_srng,
  484. rx_desc_pool,
  485. rx_bufs_reaped[chip_id][mac_id],
  486. &head[chip_id][mac_id],
  487. &tail[chip_id][mac_id]);
  488. }
  489. }
  490. /* Peer can be NULL is case of LFR */
  491. if (qdf_likely(txrx_peer))
  492. vdev = NULL;
  493. /*
  494. * BIG loop where each nbuf is dequeued from global queue,
  495. * processed and queued back on a per vdev basis. These nbufs
  496. * are sent to stack as and when we run out of nbufs
  497. * or a new nbuf dequeued from global queue has a different
  498. * vdev when compared to previous nbuf.
  499. */
  500. nbuf = nbuf_head;
  501. while (nbuf) {
  502. next = nbuf->next;
  503. dp_rx_prefetch_nbuf_data_be(nbuf, next);
  504. if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
  505. nbuf = next;
  506. DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
  507. continue;
  508. }
  509. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  510. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  511. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  512. if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
  513. peer_id, vdev_id)) {
  514. dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
  515. deliver_list_head,
  516. deliver_list_tail);
  517. deliver_list_head = NULL;
  518. deliver_list_tail = NULL;
  519. }
  520. /* Get TID from struct cb->tid_val, save to tid */
  521. tid = qdf_nbuf_get_tid_val(nbuf);
  522. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) {
  523. DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1);
  524. dp_rx_nbuf_free(nbuf);
  525. nbuf = next;
  526. continue;
  527. }
  528. if (qdf_unlikely(!txrx_peer)) {
  529. txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
  530. peer_id,
  531. &txrx_ref_handle,
  532. pkt_capture_offload,
  533. &vdev,
  534. &rx_pdev, &dsf,
  535. &old_tid);
  536. if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
  537. nbuf = next;
  538. continue;
  539. }
  540. enh_flag = rx_pdev->enhanced_stats_en;
  541. } else if (txrx_peer && txrx_peer->peer_id != peer_id) {
  542. dp_txrx_peer_unref_delete(txrx_ref_handle,
  543. DP_MOD_ID_RX);
  544. txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
  545. peer_id,
  546. &txrx_ref_handle,
  547. pkt_capture_offload,
  548. &vdev,
  549. &rx_pdev, &dsf,
  550. &old_tid);
  551. if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
  552. nbuf = next;
  553. continue;
  554. }
  555. enh_flag = rx_pdev->enhanced_stats_en;
  556. }
  557. if (txrx_peer) {
  558. QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
  559. qdf_dp_trace_set_track(nbuf, QDF_RX);
  560. QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
  561. QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
  562. QDF_NBUF_RX_PKT_DATA_TRACK;
  563. }
  564. rx_bufs_used++;
  565. /* when hlos tid override is enabled, save tid in
  566. * skb->priority
  567. */
  568. if (qdf_unlikely(vdev->skip_sw_tid_classification &
  569. DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
  570. qdf_nbuf_set_priority(nbuf, tid);
  571. DP_RX_TID_SAVE(nbuf, tid);
  572. if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) ||
  573. dp_rx_pkt_tracepoints_enabled())
  574. qdf_nbuf_set_timestamp(nbuf);
  575. if (qdf_likely(old_tid != tid)) {
  576. tid_stats =
  577. &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid];
  578. old_tid = tid;
  579. }
  580. /*
  581. * Check if DMA completed -- msdu_done is the last bit
  582. * to be written
  583. */
  584. if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
  585. !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) {
  586. dp_err("MSDU DONE failure");
  587. DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
  588. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  589. QDF_TRACE_LEVEL_INFO);
  590. tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
  591. dp_rx_nbuf_free(nbuf);
  592. qdf_assert(0);
  593. nbuf = next;
  594. continue;
  595. }
  596. DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
  597. /*
  598. * First IF condition:
  599. * 802.11 Fragmented pkts are reinjected to REO
  600. * HW block as SG pkts and for these pkts we only
  601. * need to pull the RX TLVS header length.
  602. * Second IF condition:
  603. * The below condition happens when an MSDU is spread
  604. * across multiple buffers. This can happen in two cases
  605. * 1. The nbuf size is smaller then the received msdu.
  606. * ex: we have set the nbuf size to 2048 during
  607. * nbuf_alloc. but we received an msdu which is
  608. * 2304 bytes in size then this msdu is spread
  609. * across 2 nbufs.
  610. *
  611. * 2. AMSDUs when RAW mode is enabled.
  612. * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
  613. * across 1st nbuf and 2nd nbuf and last MSDU is
  614. * spread across 2nd nbuf and 3rd nbuf.
  615. *
  616. * for these scenarios let us create a skb frag_list and
  617. * append these buffers till the last MSDU of the AMSDU
  618. * Third condition:
  619. * This is the most likely case, we receive 802.3 pkts
  620. * decapsulated by HW, here we need to set the pkt length.
  621. */
  622. hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr,
  623. &msdu_metadata);
  624. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  625. bool is_mcbc, is_sa_vld, is_da_vld;
  626. is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  627. rx_tlv_hdr);
  628. is_sa_vld =
  629. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  630. rx_tlv_hdr);
  631. is_da_vld =
  632. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  633. rx_tlv_hdr);
  634. qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
  635. qdf_nbuf_set_da_valid(nbuf, is_da_vld);
  636. qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
  637. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  638. } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  639. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  640. nbuf = dp_rx_sg_create(soc, nbuf);
  641. next = nbuf->next;
  642. if (qdf_nbuf_is_raw_frame(nbuf)) {
  643. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  644. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  645. rx.raw, 1,
  646. msdu_len);
  647. } else {
  648. DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
  649. if (!dp_rx_is_sg_supported()) {
  650. dp_rx_nbuf_free(nbuf);
  651. dp_info_rl("sg msdu len %d, dropped",
  652. msdu_len);
  653. nbuf = next;
  654. continue;
  655. }
  656. }
  657. } else {
  658. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  659. pkt_len = msdu_len +
  660. msdu_metadata.l3_hdr_pad +
  661. soc->rx_pkt_tlv_size;
  662. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  663. dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad);
  664. }
  665. dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
  666. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
  667. dp_rx_err("%pK: Policy Check Drop pkt", soc);
  668. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  669. rx.policy_check_drop, 1);
  670. tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
  671. /* Drop & free packet */
  672. dp_rx_nbuf_free(nbuf);
  673. /* Statistics */
  674. nbuf = next;
  675. continue;
  676. }
  677. /*
  678. * Drop non-EAPOL frames from unauthorized peer.
  679. */
  680. if (qdf_likely(txrx_peer) &&
  681. qdf_unlikely(!txrx_peer->authorize) &&
  682. !qdf_nbuf_is_raw_frame(nbuf)) {
  683. bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
  684. qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
  685. if (!is_eapol) {
  686. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  687. rx.peer_unauth_rx_pkt_drop,
  688. 1);
  689. dp_rx_nbuf_free(nbuf);
  690. nbuf = next;
  691. continue;
  692. }
  693. }
  694. dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
  695. if (qdf_unlikely(!rx_pdev->rx_fast_flag)) {
  696. /*
  697. * process frame for mulitpass phrase processing
  698. */
  699. if (qdf_unlikely(vdev->multipass_en)) {
  700. if (dp_rx_multipass_process(txrx_peer, nbuf,
  701. tid) == false) {
  702. DP_PEER_PER_PKT_STATS_INC
  703. (txrx_peer,
  704. rx.multipass_rx_pkt_drop, 1);
  705. dp_rx_nbuf_free(nbuf);
  706. nbuf = next;
  707. continue;
  708. }
  709. }
  710. if (qdf_unlikely(txrx_peer &&
  711. (txrx_peer->nawds_enabled) &&
  712. (qdf_nbuf_is_da_mcbc(nbuf)) &&
  713. (hal_rx_get_mpdu_mac_ad4_valid_be
  714. (rx_tlv_hdr) == false))) {
  715. tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
  716. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  717. rx.nawds_mcast_drop,
  718. 1);
  719. dp_rx_nbuf_free(nbuf);
  720. nbuf = next;
  721. continue;
  722. }
  723. /* Update the protocol tag in SKB based on CCE metadata
  724. */
  725. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  726. reo_ring_num, false, true);
  727. /* Update the flow tag in SKB based on FSE metadata */
  728. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
  729. true);
  730. if (qdf_likely(vdev->rx_decap_type ==
  731. htt_cmn_pkt_type_ethernet) &&
  732. qdf_likely(!vdev->mesh_vdev)) {
  733. dp_rx_wds_learn(soc, vdev,
  734. rx_tlv_hdr,
  735. txrx_peer,
  736. nbuf,
  737. msdu_metadata);
  738. }
  739. if (qdf_unlikely(vdev->mesh_vdev)) {
  740. if (dp_rx_filter_mesh_packets(vdev, nbuf,
  741. rx_tlv_hdr)
  742. == QDF_STATUS_SUCCESS) {
  743. dp_rx_info("%pK: mesh pkt filtered",
  744. soc);
  745. tid_stats->fail_cnt[MESH_FILTER_DROP]++;
  746. DP_STATS_INC(vdev->pdev,
  747. dropped.mesh_filter, 1);
  748. dp_rx_nbuf_free(nbuf);
  749. nbuf = next;
  750. continue;
  751. }
  752. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
  753. txrx_peer);
  754. }
  755. }
  756. dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
  757. reo_ring_num, tid_stats);
  758. if (qdf_likely(vdev->rx_decap_type ==
  759. htt_cmn_pkt_type_ethernet) &&
  760. qdf_likely(!vdev->mesh_vdev)) {
  761. /* Intrabss-fwd */
  762. if (dp_rx_check_ap_bridge(vdev))
  763. if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
  764. rx_tlv_hdr,
  765. nbuf,
  766. msdu_metadata)) {
  767. nbuf = next;
  768. tid_stats->intrabss_cnt++;
  769. continue; /* Get next desc */
  770. }
  771. }
  772. dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
  773. dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr,
  774. nbuf);
  775. dp_rx_update_stats(soc, nbuf);
  776. dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
  777. current_time, nbuf);
  778. DP_RX_LIST_APPEND(deliver_list_head,
  779. deliver_list_tail,
  780. nbuf);
  781. DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
  782. QDF_NBUF_CB_RX_PKT_LEN(nbuf),
  783. enh_flag);
  784. if (qdf_unlikely(txrx_peer->in_twt))
  785. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  786. rx.to_stack_twt, 1,
  787. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  788. tid_stats->delivered_to_stack++;
  789. nbuf = next;
  790. }
  791. DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id,
  792. pkt_capture_offload,
  793. deliver_list_head,
  794. deliver_list_tail);
  795. if (qdf_likely(txrx_peer))
  796. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
  797. /*
  798. * If we are processing in near-full condition, there are 3 scenario
  799. * 1) Ring entries has reached critical state
  800. * 2) Ring entries are still near high threshold
  801. * 3) Ring entries are below the safe level
  802. *
  803. * One more loop will move the state to normal processing and yield
  804. */
  805. if (ring_near_full && quota)
  806. goto more_data;
  807. if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
  808. if (quota) {
  809. num_pending =
  810. dp_rx_srng_get_num_pending(hal_soc,
  811. hal_ring_hdl,
  812. num_entries,
  813. &near_full);
  814. if (num_pending) {
  815. DP_STATS_INC(soc, rx.hp_oos2, 1);
  816. if (!hif_exec_should_yield(scn, intr_id))
  817. goto more_data;
  818. if (qdf_unlikely(near_full)) {
  819. DP_STATS_INC(soc, rx.near_full, 1);
  820. goto more_data;
  821. }
  822. }
  823. }
  824. if (vdev && vdev->osif_fisa_flush)
  825. vdev->osif_fisa_flush(soc, reo_ring_num);
  826. if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
  827. vdev->osif_gro_flush(vdev->osif_vdev,
  828. reo_ring_num);
  829. }
  830. }
  831. /* Update histogram statistics by looping through pdev's */
  832. DP_RX_HIST_STATS_PER_PDEV();
  833. return rx_bufs_used; /* Assume no scale factor for now */
  834. }
  835. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  836. /**
  837. * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion
  838. * @soc: Handle to DP Soc structure
  839. * @rx_desc_pool: Rx descriptor pool handler
  840. * @pool_id: Rx descriptor pool ID
  841. *
  842. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  843. */
  844. static QDF_STATUS
  845. dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
  846. struct rx_desc_pool *rx_desc_pool,
  847. uint32_t pool_id)
  848. {
  849. struct dp_hw_cookie_conversion_t *cc_ctx;
  850. struct dp_soc_be *be_soc;
  851. union dp_rx_desc_list_elem_t *rx_desc_elem;
  852. struct dp_spt_page_desc *page_desc;
  853. uint32_t ppt_idx = 0;
  854. uint32_t avail_entry_index = 0;
  855. if (!rx_desc_pool->pool_size) {
  856. dp_err("desc_num 0 !!");
  857. return QDF_STATUS_E_FAILURE;
  858. }
  859. be_soc = dp_get_be_soc_from_dp_soc(soc);
  860. cc_ctx = &be_soc->rx_cc_ctx[pool_id];
  861. page_desc = &cc_ctx->page_desc_base[0];
  862. rx_desc_elem = rx_desc_pool->freelist;
  863. while (rx_desc_elem) {
  864. if (avail_entry_index == 0) {
  865. if (ppt_idx >= cc_ctx->total_page_num) {
  866. dp_alert("insufficient secondary page tables");
  867. qdf_assert_always(0);
  868. }
  869. page_desc = &cc_ctx->page_desc_base[ppt_idx++];
  870. }
  871. /* put each RX Desc VA to SPT pages and
  872. * get corresponding ID
  873. */
  874. DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
  875. avail_entry_index,
  876. &rx_desc_elem->rx_desc);
  877. rx_desc_elem->rx_desc.cookie =
  878. dp_cc_desc_id_generate(page_desc->ppt_index,
  879. avail_entry_index);
  880. rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc);
  881. rx_desc_elem->rx_desc.pool_id = pool_id;
  882. rx_desc_elem->rx_desc.in_use = 0;
  883. rx_desc_elem = rx_desc_elem->next;
  884. avail_entry_index = (avail_entry_index + 1) &
  885. DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
  886. }
  887. return QDF_STATUS_SUCCESS;
  888. }
  889. #else
  890. static QDF_STATUS
  891. dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
  892. struct rx_desc_pool *rx_desc_pool,
  893. uint32_t pool_id)
  894. {
  895. struct dp_hw_cookie_conversion_t *cc_ctx;
  896. struct dp_soc_be *be_soc;
  897. struct dp_spt_page_desc *page_desc;
  898. uint32_t ppt_idx = 0;
  899. uint32_t avail_entry_index = 0;
  900. int i = 0;
  901. if (!rx_desc_pool->pool_size) {
  902. dp_err("desc_num 0 !!");
  903. return QDF_STATUS_E_FAILURE;
  904. }
  905. be_soc = dp_get_be_soc_from_dp_soc(soc);
  906. cc_ctx = &be_soc->rx_cc_ctx[pool_id];
  907. page_desc = &cc_ctx->page_desc_base[0];
  908. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  909. if (i == rx_desc_pool->pool_size - 1)
  910. rx_desc_pool->array[i].next = NULL;
  911. else
  912. rx_desc_pool->array[i].next =
  913. &rx_desc_pool->array[i + 1];
  914. if (avail_entry_index == 0) {
  915. if (ppt_idx >= cc_ctx->total_page_num) {
  916. dp_alert("insufficient secondary page tables");
  917. qdf_assert_always(0);
  918. }
  919. page_desc = &cc_ctx->page_desc_base[ppt_idx++];
  920. }
  921. /* put each RX Desc VA to SPT pages and
  922. * get corresponding ID
  923. */
  924. DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
  925. avail_entry_index,
  926. &rx_desc_pool->array[i].rx_desc);
  927. rx_desc_pool->array[i].rx_desc.cookie =
  928. dp_cc_desc_id_generate(page_desc->ppt_index,
  929. avail_entry_index);
  930. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  931. rx_desc_pool->array[i].rx_desc.in_use = 0;
  932. rx_desc_pool->array[i].rx_desc.chip_id =
  933. dp_mlo_get_chip_id(soc);
  934. avail_entry_index = (avail_entry_index + 1) &
  935. DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
  936. }
  937. return QDF_STATUS_SUCCESS;
  938. }
  939. #endif
  940. static void
  941. dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc,
  942. struct rx_desc_pool *rx_desc_pool,
  943. uint32_t pool_id)
  944. {
  945. struct dp_spt_page_desc *page_desc;
  946. struct dp_soc_be *be_soc;
  947. int i = 0;
  948. struct dp_hw_cookie_conversion_t *cc_ctx;
  949. be_soc = dp_get_be_soc_from_dp_soc(soc);
  950. cc_ctx = &be_soc->rx_cc_ctx[pool_id];
  951. for (i = 0; i < cc_ctx->total_page_num; i++) {
  952. page_desc = &cc_ctx->page_desc_base[i];
  953. qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
  954. }
  955. }
  956. QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
  957. struct rx_desc_pool *rx_desc_pool,
  958. uint32_t pool_id)
  959. {
  960. QDF_STATUS status = QDF_STATUS_SUCCESS;
  961. /* Only regular RX buffer desc pool use HW cookie conversion */
  962. if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) {
  963. dp_info("rx_desc_buf pool init");
  964. status = dp_rx_desc_pool_init_be_cc(soc,
  965. rx_desc_pool,
  966. pool_id);
  967. } else {
  968. dp_info("non_rx_desc_buf_pool init");
  969. status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool,
  970. pool_id);
  971. }
  972. return status;
  973. }
  974. void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
  975. struct rx_desc_pool *rx_desc_pool,
  976. uint32_t pool_id)
  977. {
  978. if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE)
  979. dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id);
  980. }
  981. #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
  982. #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
  983. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  984. void *ring_desc,
  985. struct dp_rx_desc **r_rx_desc)
  986. {
  987. if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) {
  988. /* HW cookie conversion done */
  989. *r_rx_desc = (struct dp_rx_desc *)
  990. hal_rx_wbm_get_desc_va(ring_desc);
  991. } else {
  992. /* SW do cookie conversion */
  993. uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
  994. *r_rx_desc = (struct dp_rx_desc *)
  995. dp_cc_desc_find(soc, cookie);
  996. }
  997. return QDF_STATUS_SUCCESS;
  998. }
  999. #else
  1000. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  1001. void *ring_desc,
  1002. struct dp_rx_desc **r_rx_desc)
  1003. {
  1004. *r_rx_desc = (struct dp_rx_desc *)
  1005. hal_rx_wbm_get_desc_va(ring_desc);
  1006. return QDF_STATUS_SUCCESS;
  1007. }
  1008. #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
  1009. #else
  1010. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  1011. void *ring_desc,
  1012. struct dp_rx_desc **r_rx_desc)
  1013. {
  1014. /* SW do cookie conversion */
  1015. uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
  1016. *r_rx_desc = (struct dp_rx_desc *)
  1017. dp_cc_desc_find(soc, cookie);
  1018. return QDF_STATUS_SUCCESS;
  1019. }
  1020. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
  1021. struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
  1022. uint32_t cookie)
  1023. {
  1024. return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie);
  1025. }
  1026. #if defined(WLAN_FEATURE_11BE_MLO)
  1027. #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
  1028. #define DP_RANDOM_MAC_ID_BIT_MASK 0xC0
  1029. #define DP_RANDOM_MAC_OFFSET 1
  1030. #define DP_MAC_LOCAL_ADMBIT_MASK 0x2
  1031. #define DP_MAC_LOCAL_ADMBIT_OFFSET 0
  1032. static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev,
  1033. qdf_nbuf_t nbuf)
  1034. {
  1035. uint8_t random_mac[QDF_MAC_ADDR_SIZE] = {0};
  1036. qdf_ether_header_t *eh =
  1037. (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1038. qdf_mem_copy(random_mac, &vdev->mld_mac_addr.raw[0], QDF_MAC_ADDR_SIZE);
  1039. random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] =
  1040. random_mac[DP_MAC_LOCAL_ADMBIT_OFFSET] |
  1041. DP_MAC_LOCAL_ADMBIT_MASK;
  1042. random_mac[DP_RANDOM_MAC_OFFSET] =
  1043. random_mac[DP_RANDOM_MAC_OFFSET] ^ DP_RANDOM_MAC_ID_BIT_MASK;
  1044. qdf_mem_copy(&eh->ether_shost[0], random_mac, QDF_MAC_ADDR_SIZE);
  1045. }
  1046. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1047. static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
  1048. {
  1049. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  1050. }
  1051. #else
  1052. static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
  1053. {
  1054. return false;
  1055. }
  1056. #endif
  1057. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  1058. struct dp_vdev *vdev,
  1059. struct dp_txrx_peer *peer,
  1060. qdf_nbuf_t nbuf)
  1061. {
  1062. struct dp_vdev *mcast_primary_vdev = NULL;
  1063. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  1064. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1065. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1066. struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats.
  1067. tid_stats.tid_rx_wbm_stats[0][tid];
  1068. if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) ||
  1069. qdf_nbuf_is_ipv6_igmp_pkt(nbuf)))
  1070. return false;
  1071. if (qdf_unlikely(vdev->multipass_en)) {
  1072. if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
  1073. DP_PEER_PER_PKT_STATS_INC(peer,
  1074. rx.multipass_rx_pkt_drop, 1);
  1075. return false;
  1076. }
  1077. }
  1078. if (!peer->bss_peer) {
  1079. if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf, tid_stats))
  1080. dp_rx_err("forwarding failed");
  1081. }
  1082. /*
  1083. * In the case of ME6, Backhaul WDS, NAWDS
  1084. * send the igmp pkt on the same link where it received,
  1085. * as these features will use peer based tcl metadata
  1086. */
  1087. qdf_nbuf_set_next(nbuf, NULL);
  1088. if (vdev->mcast_enhancement_en || be_vdev->mcast_primary ||
  1089. peer->nawds_enabled)
  1090. goto send_pkt;
  1091. if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer)))
  1092. goto send_pkt;
  1093. mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, be_vdev,
  1094. DP_MOD_ID_RX);
  1095. if (!mcast_primary_vdev) {
  1096. dp_rx_debug("Non mlo vdev");
  1097. goto send_pkt;
  1098. }
  1099. if (qdf_unlikely(vdev->wrap_vdev)) {
  1100. /* In the case of qwrap repeater send the original
  1101. * packet on the interface where it received,
  1102. * packet with dummy src on the mcast primary interface.
  1103. */
  1104. qdf_nbuf_t nbuf_copy;
  1105. nbuf_copy = qdf_nbuf_copy(nbuf);
  1106. if (qdf_likely(nbuf_copy))
  1107. dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy,
  1108. NULL);
  1109. }
  1110. dp_rx_dummy_src_mac(vdev, nbuf);
  1111. dp_rx_deliver_to_stack(mcast_primary_vdev->pdev->soc,
  1112. mcast_primary_vdev,
  1113. peer,
  1114. nbuf,
  1115. NULL);
  1116. dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc,
  1117. mcast_primary_vdev,
  1118. DP_MOD_ID_RX);
  1119. return true;
  1120. send_pkt:
  1121. dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc,
  1122. &be_vdev->vdev,
  1123. peer,
  1124. nbuf,
  1125. NULL);
  1126. return true;
  1127. }
  1128. #else
  1129. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  1130. struct dp_vdev *vdev,
  1131. struct dp_txrx_peer *peer,
  1132. qdf_nbuf_t nbuf)
  1133. {
  1134. return false;
  1135. }
  1136. #endif
  1137. #endif
  1138. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  1139. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  1140. hal_ring_handle_t hal_ring_hdl,
  1141. uint8_t reo_ring_num,
  1142. uint32_t quota)
  1143. {
  1144. struct dp_soc *soc = int_ctx->soc;
  1145. struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
  1146. uint32_t work_done = 0;
  1147. if (dp_srng_get_near_full_level(soc, rx_ring) <
  1148. DP_SRNG_THRESH_NEAR_FULL)
  1149. return 0;
  1150. qdf_atomic_set(&rx_ring->near_full, 1);
  1151. work_done++;
  1152. return work_done;
  1153. }
  1154. #endif
  1155. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1156. #ifdef WLAN_FEATURE_11BE_MLO
  1157. /**
  1158. * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed
  1159. * @ta_peer: transmitter peer handle
  1160. * @da_peer: destination peer handle
  1161. *
  1162. * Return: true - MLO forwarding case, false: not
  1163. */
  1164. static inline bool
  1165. dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
  1166. struct dp_txrx_peer *da_peer)
  1167. {
  1168. /* TA peer and DA peer's vdev should be partner MLO vdevs */
  1169. if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr,
  1170. &da_peer->vdev->mld_mac_addr))
  1171. return false;
  1172. return true;
  1173. }
  1174. #else
  1175. static inline bool
  1176. dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
  1177. struct dp_txrx_peer *da_peer)
  1178. {
  1179. return false;
  1180. }
  1181. #endif
  1182. #ifdef INTRA_BSS_FWD_OFFLOAD
  1183. /**
  1184. * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed
  1185. * for unicast frame
  1186. * @nbuf: RX packet buffer
  1187. * @ta_peer: transmitter DP peer handle
  1188. * @rx_tlv_hdr: Rx TLV header
  1189. * @msdu_metadata: MSDU meta data info
  1190. * @params: params to be filled in
  1191. *
  1192. * Return: true - intrabss allowed
  1193. * false - not allow
  1194. */
  1195. static bool
  1196. dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
  1197. struct dp_txrx_peer *ta_peer,
  1198. uint8_t *rx_tlv_hdr,
  1199. struct hal_rx_msdu_metadata *msdu_metadata,
  1200. struct dp_be_intrabss_params *params)
  1201. {
  1202. uint8_t dest_chip_id, dest_chip_pmac_id;
  1203. struct dp_vdev_be *be_vdev =
  1204. dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
  1205. struct dp_soc_be *be_soc =
  1206. dp_get_be_soc_from_dp_soc(params->dest_soc);
  1207. if (!qdf_nbuf_is_intra_bss(nbuf))
  1208. return false;
  1209. hal_rx_tlv_get_dest_chip_pmac_id(rx_tlv_hdr,
  1210. &dest_chip_id,
  1211. &dest_chip_pmac_id);
  1212. qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
  1213. if (dest_chip_id == be_soc->mlo_chip_id) {
  1214. /* TODO: adding to self list is better */
  1215. params->tx_vdev_id = ta_peer->vdev->vdev_id;
  1216. return true;
  1217. }
  1218. params->dest_soc =
  1219. dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
  1220. dest_chip_id);
  1221. if (!params->dest_soc)
  1222. return false;
  1223. params->tx_vdev_id =
  1224. be_vdev->partner_vdev_list[dest_chip_id][dest_chip_pmac_id];
  1225. return true;
  1226. }
  1227. #else
  1228. #ifdef WLAN_MLO_MULTI_CHIP
  1229. static bool
  1230. dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
  1231. struct dp_txrx_peer *ta_peer,
  1232. uint8_t *rx_tlv_hdr,
  1233. struct hal_rx_msdu_metadata *msdu_metadata,
  1234. struct dp_be_intrabss_params *params)
  1235. {
  1236. uint16_t da_peer_id;
  1237. struct dp_txrx_peer *da_peer;
  1238. bool ret = false;
  1239. uint8_t dest_chip_id;
  1240. dp_txrx_ref_handle txrx_ref_handle = NULL;
  1241. struct dp_vdev_be *be_vdev =
  1242. dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
  1243. struct dp_soc_be *be_soc =
  1244. dp_get_be_soc_from_dp_soc(params->dest_soc);
  1245. if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)))
  1246. return false;
  1247. dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata);
  1248. qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
  1249. da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
  1250. /* use dest chip id when TA is MLD peer and DA is legacy */
  1251. if (be_soc->mlo_enabled &&
  1252. ta_peer->mld_peer &&
  1253. !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
  1254. /* validate chip_id, get a ref, and re-assign soc */
  1255. params->dest_soc =
  1256. dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
  1257. dest_chip_id);
  1258. if (!params->dest_soc)
  1259. return false;
  1260. da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
  1261. da_peer_id,
  1262. &txrx_ref_handle,
  1263. DP_MOD_ID_RX);
  1264. if (!da_peer)
  1265. return false;
  1266. } else {
  1267. da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
  1268. da_peer_id,
  1269. &txrx_ref_handle,
  1270. DP_MOD_ID_RX);
  1271. if (!da_peer)
  1272. return false;
  1273. params->dest_soc = da_peer->vdev->pdev->soc;
  1274. if (!params->dest_soc)
  1275. goto rel_da_peer;
  1276. }
  1277. params->tx_vdev_id = da_peer->vdev->vdev_id;
  1278. /* If the source or destination peer in the isolation
  1279. * list then dont forward instead push to bridge stack.
  1280. */
  1281. if (dp_get_peer_isolation(ta_peer) ||
  1282. dp_get_peer_isolation(da_peer)) {
  1283. ret = false;
  1284. goto rel_da_peer;
  1285. }
  1286. if (da_peer->bss_peer || (da_peer == ta_peer)) {
  1287. ret = false;
  1288. goto rel_da_peer;
  1289. }
  1290. /* Same vdev, support Inra-BSS */
  1291. if (da_peer->vdev == ta_peer->vdev) {
  1292. ret = true;
  1293. goto rel_da_peer;
  1294. }
  1295. /* MLO specific Intra-BSS check */
  1296. if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
  1297. /* use dest chip id for legacy dest peer */
  1298. if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
  1299. if (!(be_vdev->partner_vdev_list[dest_chip_id][0] ==
  1300. params->tx_vdev_id) &&
  1301. !(be_vdev->partner_vdev_list[dest_chip_id][1] ==
  1302. params->tx_vdev_id)) {
  1303. /*dp_soc_unref_delete(soc);*/
  1304. goto rel_da_peer;
  1305. }
  1306. }
  1307. ret = true;
  1308. }
  1309. rel_da_peer:
  1310. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
  1311. return ret;
  1312. }
  1313. #else
  1314. static bool
  1315. dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
  1316. struct dp_txrx_peer *ta_peer,
  1317. uint8_t *rx_tlv_hdr,
  1318. struct hal_rx_msdu_metadata *msdu_metadata,
  1319. struct dp_be_intrabss_params *params)
  1320. {
  1321. uint16_t da_peer_id;
  1322. struct dp_txrx_peer *da_peer;
  1323. bool ret = false;
  1324. dp_txrx_ref_handle txrx_ref_handle = NULL;
  1325. if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
  1326. return false;
  1327. da_peer_id = dp_rx_peer_metadata_peer_id_get_be(
  1328. params->dest_soc,
  1329. msdu_metadata->da_idx);
  1330. da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
  1331. &txrx_ref_handle, DP_MOD_ID_RX);
  1332. if (!da_peer)
  1333. return false;
  1334. params->tx_vdev_id = da_peer->vdev->vdev_id;
  1335. /* If the source or destination peer in the isolation
  1336. * list then dont forward instead push to bridge stack.
  1337. */
  1338. if (dp_get_peer_isolation(ta_peer) ||
  1339. dp_get_peer_isolation(da_peer))
  1340. goto rel_da_peer;
  1341. if (da_peer->bss_peer || da_peer == ta_peer)
  1342. goto rel_da_peer;
  1343. /* Same vdev, support Inra-BSS */
  1344. if (da_peer->vdev == ta_peer->vdev) {
  1345. ret = true;
  1346. goto rel_da_peer;
  1347. }
  1348. /* MLO specific Intra-BSS check */
  1349. if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
  1350. ret = true;
  1351. goto rel_da_peer;
  1352. }
  1353. rel_da_peer:
  1354. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
  1355. return ret;
  1356. }
  1357. #endif /* WLAN_MLO_MULTI_CHIP */
  1358. #endif /* INTRA_BSS_FWD_OFFLOAD */
  1359. #if defined(QCA_MONITOR_2_0_SUPPORT) || defined(CONFIG_WORD_BASED_TLV)
  1360. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  1361. uint32_t *msg_word,
  1362. void *rx_filter)
  1363. {
  1364. struct htt_rx_ring_tlv_filter *tlv_filter =
  1365. (struct htt_rx_ring_tlv_filter *)rx_filter;
  1366. if (!msg_word || !tlv_filter)
  1367. return;
  1368. /* if word mask is zero, FW will set the default values */
  1369. if (!(tlv_filter->rx_mpdu_start_wmask > 0 &&
  1370. tlv_filter->rx_msdu_end_wmask > 0)) {
  1371. msg_word += 4;
  1372. *msg_word = 0;
  1373. goto config_mon;
  1374. }
  1375. HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET(*msg_word, 1);
  1376. /* word 14 */
  1377. msg_word += 3;
  1378. *msg_word = 0;
  1379. HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_WORD_MASK_SET(
  1380. *msg_word,
  1381. tlv_filter->rx_mpdu_start_wmask);
  1382. /* word 15 */
  1383. msg_word++;
  1384. *msg_word = 0;
  1385. HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_WORD_MASK_SET(
  1386. *msg_word,
  1387. tlv_filter->rx_msdu_end_wmask);
  1388. config_mon:
  1389. msg_word--;
  1390. dp_mon_rx_wmask_subscribe(soc, msg_word, tlv_filter);
  1391. }
  1392. #else
  1393. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  1394. uint32_t *msg_word,
  1395. void *rx_filter)
  1396. {
  1397. }
  1398. #endif
  1399. /*
  1400. * dp_rx_intrabss_handle_nawds_be() - Forward mcbc intrabss pkts in nawds case
  1401. * @soc: core txrx main context
  1402. * @ta_txrx_peer: source txrx_peer entry
  1403. * @nbuf_copy: nbuf that has to be intrabss forwarded
  1404. * @tid_stats: tid_stats structure
  1405. *
  1406. * Return: true if it is forwarded else false
  1407. */
  1408. bool
  1409. dp_rx_intrabss_handle_nawds_be(struct dp_soc *soc,
  1410. struct dp_txrx_peer *ta_txrx_peer,
  1411. qdf_nbuf_t nbuf_copy,
  1412. struct cdp_tid_rx_stats *tid_stats)
  1413. {
  1414. if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
  1415. struct cdp_tx_exception_metadata tx_exc_metadata = {0};
  1416. uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy);
  1417. tx_exc_metadata.peer_id = ta_txrx_peer->peer_id;
  1418. tx_exc_metadata.is_intrabss_fwd = 1;
  1419. tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID;
  1420. if (dp_tx_send_exception((struct cdp_soc_t *)soc,
  1421. ta_txrx_peer->vdev->vdev_id,
  1422. nbuf_copy,
  1423. &tx_exc_metadata)) {
  1424. DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
  1425. rx.intra_bss.fail, 1,
  1426. len);
  1427. tid_stats->fail_cnt[INTRABSS_DROP]++;
  1428. qdf_nbuf_free(nbuf_copy);
  1429. } else {
  1430. DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
  1431. rx.intra_bss.pkts, 1,
  1432. len);
  1433. tid_stats->intrabss_cnt++;
  1434. }
  1435. return true;
  1436. }
  1437. return false;
  1438. }
  1439. /*
  1440. * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  1441. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  1442. * @soc: core txrx main context
  1443. * @ta_peer: source peer entry
  1444. * @rx_tlv_hdr: start address of rx tlvs
  1445. * @nbuf: nbuf that has to be intrabss forwarded
  1446. * @msdu_metadata: msdu metadata
  1447. *
  1448. * Return: true if it is forwarded else false
  1449. */
  1450. bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  1451. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  1452. struct hal_rx_msdu_metadata msdu_metadata)
  1453. {
  1454. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1455. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1456. struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
  1457. tid_stats.tid_rx_stats[ring_id][tid];
  1458. bool ret = false;
  1459. struct dp_be_intrabss_params params;
  1460. /* if it is a broadcast pkt (eg: ARP) and it is not its own
  1461. * source, then clone the pkt and send the cloned pkt for
  1462. * intra BSS forwarding and original pkt up the network stack
  1463. * Note: how do we handle multicast pkts. do we forward
  1464. * all multicast pkts as is or let a higher layer module
  1465. * like igmpsnoop decide whether to forward or not with
  1466. * Mcast enhancement.
  1467. */
  1468. if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) {
  1469. return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
  1470. nbuf, tid_stats);
  1471. }
  1472. if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
  1473. nbuf))
  1474. return true;
  1475. params.dest_soc = soc;
  1476. if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, rx_tlv_hdr,
  1477. &msdu_metadata, &params)) {
  1478. ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer,
  1479. params.tx_vdev_id,
  1480. rx_tlv_hdr, nbuf, tid_stats);
  1481. }
  1482. return ret;
  1483. }
  1484. #endif
  1485. bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1486. uint8_t *rx_tlv_hdr, uint8_t mac_id)
  1487. {
  1488. bool mpdu_done = false;
  1489. qdf_nbuf_t curr_nbuf = NULL;
  1490. qdf_nbuf_t tmp_nbuf = NULL;
  1491. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1492. if (!dp_pdev) {
  1493. dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
  1494. return mpdu_done;
  1495. }
  1496. /* if invalid peer SG list has max values free the buffers in list
  1497. * and treat current buffer as start of list
  1498. *
  1499. * current logic to detect the last buffer from attn_tlv is not reliable
  1500. * in OFDMA UL scenario hence add max buffers check to avoid list pile
  1501. * up
  1502. */
  1503. if (!dp_pdev->first_nbuf ||
  1504. (dp_pdev->invalid_peer_head_msdu &&
  1505. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
  1506. (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
  1507. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1508. dp_pdev->first_nbuf = true;
  1509. /* If the new nbuf received is the first msdu of the
  1510. * amsdu and there are msdus in the invalid peer msdu
  1511. * list, then let us free all the msdus of the invalid
  1512. * peer msdu list.
  1513. * This scenario can happen when we start receiving
  1514. * new a-msdu even before the previous a-msdu is completely
  1515. * received.
  1516. */
  1517. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  1518. while (curr_nbuf) {
  1519. tmp_nbuf = curr_nbuf->next;
  1520. dp_rx_nbuf_free(curr_nbuf);
  1521. curr_nbuf = tmp_nbuf;
  1522. }
  1523. dp_pdev->invalid_peer_head_msdu = NULL;
  1524. dp_pdev->invalid_peer_tail_msdu = NULL;
  1525. dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr);
  1526. }
  1527. if (qdf_nbuf_is_rx_chfrag_end(nbuf) &&
  1528. hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1529. qdf_assert_always(dp_pdev->first_nbuf);
  1530. dp_pdev->first_nbuf = false;
  1531. mpdu_done = true;
  1532. }
  1533. /*
  1534. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  1535. * should be NULL here, add the checking for debugging purpose
  1536. * in case some corner case.
  1537. */
  1538. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  1539. dp_pdev->invalid_peer_tail_msdu);
  1540. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  1541. dp_pdev->invalid_peer_tail_msdu,
  1542. nbuf);
  1543. return mpdu_done;
  1544. }