dp_be_rx.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "cdp_txrx_cmn_struct.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_types.h"
  22. #include "dp_rx.h"
  23. #include "dp_tx.h"
  24. #include "dp_be_rx.h"
  25. #include "dp_peer.h"
  26. #include "hal_rx.h"
  27. #include "hal_be_rx.h"
  28. #include "hal_api.h"
  29. #include "hal_be_api.h"
  30. #include "qdf_nbuf.h"
  31. #include "hal_be_rx_tlv.h"
  32. #ifdef MESH_MODE_SUPPORT
  33. #include "if_meta_hdr.h"
  34. #endif
  35. #include "dp_internal.h"
  36. #include "dp_ipa.h"
  37. #ifdef FEATURE_WDS
  38. #include "dp_txrx_wds.h"
  39. #endif
  40. #include "dp_hist.h"
  41. #include "dp_rx_buffer_pool.h"
  42. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  43. static inline void
  44. dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  45. {
  46. qdf_nbuf_set_rx_flow_idx_invalid(nbuf,
  47. hal_rx_msdu_flow_idx_invalid_be(rx_tlv_hdr));
  48. qdf_nbuf_set_rx_flow_idx_timeout(nbuf,
  49. hal_rx_msdu_flow_idx_timeout_be(rx_tlv_hdr));
  50. }
  51. #else
  52. static inline void
  53. dp_rx_update_flow_info(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  54. {
  55. }
  56. #endif
  57. #ifndef AST_OFFLOAD_ENABLE
  58. static void
  59. dp_rx_wds_learn(struct dp_soc *soc,
  60. struct dp_vdev *vdev,
  61. uint8_t *rx_tlv_hdr,
  62. struct dp_txrx_peer *txrx_peer,
  63. qdf_nbuf_t nbuf,
  64. struct hal_rx_msdu_metadata msdu_metadata)
  65. {
  66. /* WDS Source Port Learning */
  67. if (qdf_likely(vdev->wds_enabled))
  68. dp_rx_wds_srcport_learn(soc,
  69. rx_tlv_hdr,
  70. txrx_peer,
  71. nbuf,
  72. msdu_metadata);
  73. }
  74. #else
  75. #ifdef QCA_SUPPORT_WDS_EXTENDED
  76. /**
  77. * dp_wds_ext_peer_learn_be() - function to send event to control
  78. * path on receiving 1st 4-address frame from backhaul.
  79. * @soc: DP soc
  80. * @ta_txrx_peer: WDS repeater txrx peer
  81. * @rx_tlv_hdr : start address of rx tlvs
  82. * @nbuf: RX packet buffer
  83. *
  84. * Return: void
  85. */
  86. static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
  87. struct dp_txrx_peer *ta_txrx_peer,
  88. uint8_t *rx_tlv_hdr,
  89. qdf_nbuf_t nbuf)
  90. {
  91. uint8_t wds_ext_src_mac[QDF_MAC_ADDR_SIZE];
  92. struct dp_peer *ta_base_peer;
  93. /* instead of checking addr4 is valid or not in per packet path
  94. * check for init bit, which will be set on reception of
  95. * first addr4 valid packet.
  96. */
  97. if (!ta_txrx_peer->vdev->wds_ext_enabled ||
  98. qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
  99. &ta_txrx_peer->wds_ext.init))
  100. return;
  101. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  102. hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)) {
  103. qdf_atomic_test_and_set_bit(WDS_EXT_PEER_INIT_BIT,
  104. &ta_txrx_peer->wds_ext.init);
  105. ta_base_peer = dp_peer_get_ref_by_id(soc, ta_txrx_peer->peer_id,
  106. DP_MOD_ID_RX);
  107. if (!ta_base_peer)
  108. return;
  109. qdf_mem_copy(wds_ext_src_mac, &ta_base_peer->mac_addr.raw[0],
  110. QDF_MAC_ADDR_SIZE);
  111. dp_peer_unref_delete(ta_base_peer, DP_MOD_ID_RX);
  112. soc->cdp_soc.ol_ops->rx_wds_ext_peer_learn(
  113. soc->ctrl_psoc,
  114. ta_txrx_peer->peer_id,
  115. ta_txrx_peer->vdev->vdev_id,
  116. wds_ext_src_mac);
  117. }
  118. }
  119. #else
  120. static inline void dp_wds_ext_peer_learn_be(struct dp_soc *soc,
  121. struct dp_txrx_peer *ta_txrx_peer,
  122. uint8_t *rx_tlv_hdr,
  123. qdf_nbuf_t nbuf)
  124. {
  125. }
  126. #endif
  127. static void
  128. dp_rx_wds_learn(struct dp_soc *soc,
  129. struct dp_vdev *vdev,
  130. uint8_t *rx_tlv_hdr,
  131. struct dp_txrx_peer *ta_txrx_peer,
  132. qdf_nbuf_t nbuf,
  133. struct hal_rx_msdu_metadata msdu_metadata)
  134. {
  135. dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf);
  136. }
  137. #endif
  138. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  139. static inline void
  140. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  141. {
  142. uint8_t lmac_id;
  143. lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
  144. qdf_nbuf_set_lmac_id(nbuf, lmac_id);
  145. }
  146. #else
  147. static inline void
  148. dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
  149. {
  150. }
  151. #endif
  152. /**
  153. * dp_rx_process_be() - Brain of the Rx processing functionality
  154. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  155. * @int_ctx: per interrupt context
  156. * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
  157. * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
  158. * @quota: No. of units (packets) that can be serviced in one shot.
  159. *
  160. * This function implements the core of Rx functionality. This is
  161. * expected to handle only non-error frames.
  162. *
  163. * Return: uint32_t: No. of elements processed
  164. */
  165. uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
  166. hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
  167. uint32_t quota)
  168. {
  169. hal_ring_desc_t ring_desc;
  170. hal_ring_desc_t last_prefetched_hw_desc;
  171. hal_soc_handle_t hal_soc;
  172. struct dp_rx_desc *rx_desc = NULL;
  173. struct dp_rx_desc *last_prefetched_sw_desc = NULL;
  174. qdf_nbuf_t nbuf, next;
  175. bool near_full;
  176. union dp_rx_desc_list_elem_t *head[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
  177. union dp_rx_desc_list_elem_t *tail[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
  178. uint32_t num_pending = 0;
  179. uint32_t rx_bufs_used = 0, rx_buf_cookie;
  180. uint16_t msdu_len = 0;
  181. uint16_t peer_id;
  182. uint8_t vdev_id;
  183. struct dp_txrx_peer *txrx_peer;
  184. dp_txrx_ref_handle txrx_ref_handle = NULL;
  185. struct dp_vdev *vdev;
  186. uint32_t pkt_len = 0;
  187. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  188. struct hal_rx_msdu_desc_info msdu_desc_info;
  189. enum hal_reo_error_status error;
  190. uint32_t peer_mdata;
  191. uint8_t *rx_tlv_hdr;
  192. uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
  193. uint8_t mac_id = 0;
  194. struct dp_pdev *rx_pdev;
  195. bool enh_flag;
  196. struct dp_srng *dp_rxdma_srng;
  197. struct rx_desc_pool *rx_desc_pool;
  198. struct dp_soc *soc = int_ctx->soc;
  199. struct cdp_tid_rx_stats *tid_stats;
  200. qdf_nbuf_t nbuf_head;
  201. qdf_nbuf_t nbuf_tail;
  202. qdf_nbuf_t deliver_list_head;
  203. qdf_nbuf_t deliver_list_tail;
  204. uint32_t num_rx_bufs_reaped = 0;
  205. uint32_t intr_id;
  206. struct hif_opaque_softc *scn;
  207. int32_t tid = 0;
  208. bool is_prev_msdu_last = true;
  209. uint32_t num_entries_avail = 0;
  210. uint32_t rx_ol_pkt_cnt = 0;
  211. uint32_t num_entries = 0;
  212. struct hal_rx_msdu_metadata msdu_metadata;
  213. QDF_STATUS status;
  214. qdf_nbuf_t ebuf_head;
  215. qdf_nbuf_t ebuf_tail;
  216. uint8_t pkt_capture_offload = 0;
  217. struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
  218. int max_reap_limit, ring_near_full;
  219. struct dp_soc *replenish_soc;
  220. uint8_t chip_id;
  221. uint64_t current_time = 0;
  222. uint32_t old_tid;
  223. uint32_t peer_ext_stats;
  224. uint32_t dsf;
  225. DP_HIST_INIT();
  226. qdf_assert_always(soc && hal_ring_hdl);
  227. hal_soc = soc->hal_soc;
  228. qdf_assert_always(hal_soc);
  229. scn = soc->hif_handle;
  230. intr_id = int_ctx->dp_intr_id;
  231. num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
  232. dp_runtime_pm_mark_last_busy(soc);
  233. more_data:
  234. /* reset local variables here to be re-used in the function */
  235. nbuf_head = NULL;
  236. nbuf_tail = NULL;
  237. deliver_list_head = NULL;
  238. deliver_list_tail = NULL;
  239. txrx_peer = NULL;
  240. vdev = NULL;
  241. num_rx_bufs_reaped = 0;
  242. ebuf_head = NULL;
  243. ebuf_tail = NULL;
  244. ring_near_full = 0;
  245. max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
  246. qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
  247. qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
  248. qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
  249. qdf_mem_zero(head, sizeof(head));
  250. qdf_mem_zero(tail, sizeof(tail));
  251. old_tid = 0xff;
  252. dsf = 0;
  253. peer_ext_stats = 0;
  254. rx_pdev = NULL;
  255. tid_stats = NULL;
  256. dp_pkt_get_timestamp(&current_time);
  257. ring_near_full = _dp_srng_test_and_update_nf_params(soc, rx_ring,
  258. &max_reap_limit);
  259. peer_ext_stats = wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
  260. if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  261. /*
  262. * Need API to convert from hal_ring pointer to
  263. * Ring Type / Ring Id combo
  264. */
  265. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  266. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  267. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  268. goto done;
  269. }
  270. hal_srng_update_ring_usage_wm_no_lock(soc->hal_soc, hal_ring_hdl);
  271. if (!num_pending)
  272. num_pending = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
  273. if (num_pending > quota)
  274. num_pending = quota;
  275. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_pending);
  276. last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
  277. hal_ring_hdl,
  278. num_pending);
  279. /*
  280. * start reaping the buffers from reo ring and queue
  281. * them in per vdev queue.
  282. * Process the received pkts in a different per vdev loop.
  283. */
  284. while (qdf_likely(num_pending)) {
  285. ring_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  286. if (qdf_unlikely(!ring_desc))
  287. break;
  288. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  289. if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
  290. dp_rx_err("%pK: HAL RING 0x%pK:error %d",
  291. soc, hal_ring_hdl, error);
  292. DP_STATS_INC(soc, rx.err.hal_reo_error[reo_ring_num],
  293. 1);
  294. /* Don't know how to deal with this -- assert */
  295. qdf_assert(0);
  296. }
  297. dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
  298. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  299. status = dp_rx_cookie_check_and_invalidate(ring_desc);
  300. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  301. DP_STATS_INC(soc, rx.err.stale_cookie, 1);
  302. break;
  303. }
  304. rx_desc = (struct dp_rx_desc *)
  305. hal_rx_get_reo_desc_va(ring_desc);
  306. dp_rx_desc_sw_cc_check(soc, rx_buf_cookie, &rx_desc);
  307. status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
  308. ring_desc, rx_desc);
  309. if (QDF_IS_STATUS_ERROR(status)) {
  310. if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
  311. qdf_assert_always(!rx_desc->unmapped);
  312. dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
  313. rx_desc->unmapped = 1;
  314. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  315. rx_desc->pool_id);
  316. dp_rx_add_to_free_desc_list(
  317. &head[rx_desc->chip_id][rx_desc->pool_id],
  318. &tail[rx_desc->chip_id][rx_desc->pool_id],
  319. rx_desc);
  320. }
  321. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  322. continue;
  323. }
  324. /*
  325. * this is a unlikely scenario where the host is reaping
  326. * a descriptor which it already reaped just a while ago
  327. * but is yet to replenish it back to HW.
  328. * In this case host will dump the last 128 descriptors
  329. * including the software descriptor rx_desc and assert.
  330. */
  331. if (qdf_unlikely(!rx_desc->in_use)) {
  332. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  333. dp_info_rl("Reaping rx_desc not in use!");
  334. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  335. ring_desc, rx_desc);
  336. /* ignore duplicate RX desc and continue to process */
  337. /* Pop out the descriptor */
  338. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  339. continue;
  340. }
  341. status = dp_rx_desc_nbuf_sanity_check(soc, ring_desc, rx_desc);
  342. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  343. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  344. dp_info_rl("Nbuf sanity check failure!");
  345. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  346. ring_desc, rx_desc);
  347. rx_desc->in_err_state = 1;
  348. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  349. continue;
  350. }
  351. if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
  352. dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
  353. DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
  354. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  355. ring_desc, rx_desc);
  356. }
  357. /* Get MPDU DESC info */
  358. hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
  359. /* Get MSDU DESC info */
  360. hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
  361. /* Set the end bit to identify the last buffer in MPDU */
  362. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  363. qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
  364. if (qdf_unlikely(msdu_desc_info.msdu_flags &
  365. HAL_MSDU_F_MSDU_CONTINUATION)) {
  366. /* In dp_rx_sg_create() until the last buffer,
  367. * end bit should not be set. As continuation bit set,
  368. * this is not a last buffer.
  369. */
  370. qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 0);
  371. /* previous msdu has end bit set, so current one is
  372. * the new MPDU
  373. */
  374. if (is_prev_msdu_last) {
  375. /* Get number of entries available in HW ring */
  376. num_entries_avail =
  377. hal_srng_dst_num_valid(hal_soc,
  378. hal_ring_hdl, 1);
  379. /* For new MPDU check if we can read complete
  380. * MPDU by comparing the number of buffers
  381. * available and number of buffers needed to
  382. * reap this MPDU
  383. */
  384. if ((msdu_desc_info.msdu_len /
  385. (RX_DATA_BUFFER_SIZE -
  386. soc->rx_pkt_tlv_size) + 1) >
  387. num_pending) {
  388. DP_STATS_INC(soc,
  389. rx.msdu_scatter_wait_break,
  390. 1);
  391. dp_rx_cookie_reset_invalid_bit(
  392. ring_desc);
  393. /* As we are going to break out of the
  394. * loop because of unavailability of
  395. * descs to form complete SG, we need to
  396. * reset the TP in the REO destination
  397. * ring.
  398. */
  399. hal_srng_dst_dec_tp(hal_soc,
  400. hal_ring_hdl);
  401. break;
  402. }
  403. is_prev_msdu_last = false;
  404. }
  405. }
  406. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
  407. qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
  408. if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
  409. HAL_MPDU_F_RAW_AMPDU))
  410. qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
  411. if (!is_prev_msdu_last &&
  412. !(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION))
  413. is_prev_msdu_last = true;
  414. rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
  415. peer_mdata = mpdu_desc_info.peer_meta_data;
  416. QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
  417. dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
  418. QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
  419. dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
  420. dp_rx_set_msdu_lmac_id(rx_desc->nbuf, peer_mdata);
  421. /* to indicate whether this msdu is rx offload */
  422. pkt_capture_offload =
  423. DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
  424. /*
  425. * save msdu flags first, last and continuation msdu in
  426. * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
  427. * length to nbuf->cb. This ensures the info required for
  428. * per pkt processing is always in the same cache line.
  429. * This helps in improving throughput for smaller pkt
  430. * sizes.
  431. */
  432. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  433. qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
  434. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  435. qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
  436. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
  437. qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
  438. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
  439. qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
  440. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
  441. qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
  442. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
  443. qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1);
  444. if (qdf_likely(mpdu_desc_info.mpdu_flags &
  445. HAL_MPDU_F_QOS_CONTROL_VALID))
  446. qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
  447. /* set sw exception */
  448. qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
  449. rx_desc->nbuf,
  450. hal_rx_sw_exception_get_be(ring_desc));
  451. QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
  452. QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
  453. /*
  454. * move unmap after scattered msdu waiting break logic
  455. * in case double skb unmap happened.
  456. */
  457. dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
  458. rx_desc->unmapped = 1;
  459. DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
  460. ebuf_tail, rx_desc);
  461. quota -= 1;
  462. num_pending -= 1;
  463. dp_rx_add_to_free_desc_list
  464. (&head[rx_desc->chip_id][rx_desc->pool_id],
  465. &tail[rx_desc->chip_id][rx_desc->pool_id], rx_desc);
  466. num_rx_bufs_reaped++;
  467. dp_rx_prefetch_hw_sw_nbuf_32_byte_desc(soc, hal_soc,
  468. num_pending,
  469. hal_ring_hdl,
  470. &last_prefetched_hw_desc,
  471. &last_prefetched_sw_desc);
  472. /*
  473. * only if complete msdu is received for scatter case,
  474. * then allow break.
  475. */
  476. if (is_prev_msdu_last &&
  477. dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped,
  478. max_reap_limit))
  479. break;
  480. }
  481. done:
  482. dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
  483. qdf_dsb();
  484. dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped);
  485. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  486. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  487. /*
  488. * continue with next mac_id if no pkts were reaped
  489. * from that pool
  490. */
  491. if (!rx_bufs_reaped[chip_id][mac_id])
  492. continue;
  493. replenish_soc = dp_rx_replensih_soc_get(soc, chip_id);
  494. dp_rxdma_srng =
  495. &replenish_soc->rx_refill_buf_ring[mac_id];
  496. rx_desc_pool = &replenish_soc->rx_desc_buf[mac_id];
  497. dp_rx_buffers_replenish_simple(replenish_soc, mac_id,
  498. dp_rxdma_srng,
  499. rx_desc_pool,
  500. rx_bufs_reaped[chip_id][mac_id],
  501. &head[chip_id][mac_id],
  502. &tail[chip_id][mac_id]);
  503. }
  504. }
  505. /* Peer can be NULL is case of LFR */
  506. if (qdf_likely(txrx_peer))
  507. vdev = NULL;
  508. /*
  509. * BIG loop where each nbuf is dequeued from global queue,
  510. * processed and queued back on a per vdev basis. These nbufs
  511. * are sent to stack as and when we run out of nbufs
  512. * or a new nbuf dequeued from global queue has a different
  513. * vdev when compared to previous nbuf.
  514. */
  515. nbuf = nbuf_head;
  516. while (nbuf) {
  517. next = nbuf->next;
  518. dp_rx_prefetch_nbuf_data_be(nbuf, next);
  519. if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
  520. nbuf = next;
  521. DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
  522. continue;
  523. }
  524. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  525. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  526. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  527. if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
  528. peer_id, vdev_id)) {
  529. dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
  530. deliver_list_head,
  531. deliver_list_tail);
  532. deliver_list_head = NULL;
  533. deliver_list_tail = NULL;
  534. }
  535. /* Get TID from struct cb->tid_val, save to tid */
  536. tid = qdf_nbuf_get_tid_val(nbuf);
  537. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS)) {
  538. DP_STATS_INC(soc, rx.err.rx_invalid_tid_err, 1);
  539. dp_rx_nbuf_free(nbuf);
  540. nbuf = next;
  541. continue;
  542. }
  543. if (qdf_unlikely(!txrx_peer)) {
  544. txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
  545. peer_id,
  546. &txrx_ref_handle,
  547. pkt_capture_offload,
  548. &vdev,
  549. &rx_pdev, &dsf,
  550. &old_tid);
  551. if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
  552. nbuf = next;
  553. continue;
  554. }
  555. enh_flag = rx_pdev->enhanced_stats_en;
  556. } else if (txrx_peer && txrx_peer->peer_id != peer_id) {
  557. dp_txrx_peer_unref_delete(txrx_ref_handle,
  558. DP_MOD_ID_RX);
  559. txrx_peer = dp_rx_get_txrx_peer_and_vdev(soc, nbuf,
  560. peer_id,
  561. &txrx_ref_handle,
  562. pkt_capture_offload,
  563. &vdev,
  564. &rx_pdev, &dsf,
  565. &old_tid);
  566. if (qdf_unlikely(!txrx_peer) || qdf_unlikely(!vdev)) {
  567. nbuf = next;
  568. continue;
  569. }
  570. enh_flag = rx_pdev->enhanced_stats_en;
  571. }
  572. if (txrx_peer) {
  573. QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
  574. qdf_dp_trace_set_track(nbuf, QDF_RX);
  575. QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
  576. QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
  577. QDF_NBUF_RX_PKT_DATA_TRACK;
  578. }
  579. rx_bufs_used++;
  580. /* when hlos tid override is enabled, save tid in
  581. * skb->priority
  582. */
  583. if (qdf_unlikely(vdev->skip_sw_tid_classification &
  584. DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
  585. qdf_nbuf_set_priority(nbuf, tid);
  586. DP_RX_TID_SAVE(nbuf, tid);
  587. if (qdf_unlikely(dsf) || qdf_unlikely(peer_ext_stats) ||
  588. dp_rx_pkt_tracepoints_enabled())
  589. qdf_nbuf_set_timestamp(nbuf);
  590. if (qdf_likely(old_tid != tid)) {
  591. tid_stats =
  592. &rx_pdev->stats.tid_stats.tid_rx_stats[reo_ring_num][tid];
  593. old_tid = tid;
  594. }
  595. /*
  596. * Check if DMA completed -- msdu_done is the last bit
  597. * to be written
  598. */
  599. if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
  600. !hal_rx_tlv_msdu_done_get_be(rx_tlv_hdr))) {
  601. dp_err("MSDU DONE failure");
  602. DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
  603. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  604. QDF_TRACE_LEVEL_INFO);
  605. tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
  606. dp_rx_nbuf_free(nbuf);
  607. qdf_assert(0);
  608. nbuf = next;
  609. continue;
  610. }
  611. DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
  612. /*
  613. * First IF condition:
  614. * 802.11 Fragmented pkts are reinjected to REO
  615. * HW block as SG pkts and for these pkts we only
  616. * need to pull the RX TLVS header length.
  617. * Second IF condition:
  618. * The below condition happens when an MSDU is spread
  619. * across multiple buffers. This can happen in two cases
  620. * 1. The nbuf size is smaller then the received msdu.
  621. * ex: we have set the nbuf size to 2048 during
  622. * nbuf_alloc. but we received an msdu which is
  623. * 2304 bytes in size then this msdu is spread
  624. * across 2 nbufs.
  625. *
  626. * 2. AMSDUs when RAW mode is enabled.
  627. * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
  628. * across 1st nbuf and 2nd nbuf and last MSDU is
  629. * spread across 2nd nbuf and 3rd nbuf.
  630. *
  631. * for these scenarios let us create a skb frag_list and
  632. * append these buffers till the last MSDU of the AMSDU
  633. * Third condition:
  634. * This is the most likely case, we receive 802.3 pkts
  635. * decapsulated by HW, here we need to set the pkt length.
  636. */
  637. hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr,
  638. &msdu_metadata);
  639. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  640. bool is_mcbc, is_sa_vld, is_da_vld;
  641. is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  642. rx_tlv_hdr);
  643. is_sa_vld =
  644. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  645. rx_tlv_hdr);
  646. is_da_vld =
  647. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  648. rx_tlv_hdr);
  649. qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
  650. qdf_nbuf_set_da_valid(nbuf, is_da_vld);
  651. qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
  652. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  653. } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  654. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  655. nbuf = dp_rx_sg_create(soc, nbuf);
  656. next = nbuf->next;
  657. if (qdf_nbuf_is_raw_frame(nbuf)) {
  658. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  659. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  660. rx.raw, 1,
  661. msdu_len);
  662. } else {
  663. DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
  664. if (!dp_rx_is_sg_supported()) {
  665. dp_rx_nbuf_free(nbuf);
  666. dp_info_rl("sg msdu len %d, dropped",
  667. msdu_len);
  668. nbuf = next;
  669. continue;
  670. }
  671. }
  672. } else {
  673. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  674. pkt_len = msdu_len +
  675. msdu_metadata.l3_hdr_pad +
  676. soc->rx_pkt_tlv_size;
  677. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  678. dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad);
  679. }
  680. dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
  681. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
  682. dp_rx_err("%pK: Policy Check Drop pkt", soc);
  683. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  684. rx.policy_check_drop, 1);
  685. tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
  686. /* Drop & free packet */
  687. dp_rx_nbuf_free(nbuf);
  688. /* Statistics */
  689. nbuf = next;
  690. continue;
  691. }
  692. /*
  693. * Drop non-EAPOL frames from unauthorized peer.
  694. */
  695. if (qdf_likely(txrx_peer) &&
  696. qdf_unlikely(!txrx_peer->authorize) &&
  697. !qdf_nbuf_is_raw_frame(nbuf)) {
  698. bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
  699. qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
  700. if (!is_eapol) {
  701. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  702. rx.peer_unauth_rx_pkt_drop,
  703. 1);
  704. dp_rx_nbuf_free(nbuf);
  705. nbuf = next;
  706. continue;
  707. }
  708. }
  709. dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
  710. dp_rx_update_flow_info(nbuf, rx_tlv_hdr);
  711. if (qdf_unlikely(!rx_pdev->rx_fast_flag)) {
  712. /*
  713. * process frame for mulitpass phrase processing
  714. */
  715. if (qdf_unlikely(vdev->multipass_en)) {
  716. if (dp_rx_multipass_process(txrx_peer, nbuf,
  717. tid) == false) {
  718. DP_PEER_PER_PKT_STATS_INC
  719. (txrx_peer,
  720. rx.multipass_rx_pkt_drop, 1);
  721. dp_rx_nbuf_free(nbuf);
  722. nbuf = next;
  723. continue;
  724. }
  725. }
  726. if (qdf_unlikely(txrx_peer &&
  727. (txrx_peer->nawds_enabled) &&
  728. (qdf_nbuf_is_da_mcbc(nbuf)) &&
  729. (hal_rx_get_mpdu_mac_ad4_valid_be
  730. (rx_tlv_hdr) == false))) {
  731. tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
  732. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  733. rx.nawds_mcast_drop,
  734. 1);
  735. dp_rx_nbuf_free(nbuf);
  736. nbuf = next;
  737. continue;
  738. }
  739. /* Update the protocol tag in SKB based on CCE metadata
  740. */
  741. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  742. reo_ring_num, false, true);
  743. /* Update the flow tag in SKB based on FSE metadata */
  744. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
  745. true);
  746. if (qdf_likely(vdev->rx_decap_type ==
  747. htt_cmn_pkt_type_ethernet) &&
  748. qdf_likely(!vdev->mesh_vdev)) {
  749. dp_rx_wds_learn(soc, vdev,
  750. rx_tlv_hdr,
  751. txrx_peer,
  752. nbuf,
  753. msdu_metadata);
  754. }
  755. if (qdf_unlikely(vdev->mesh_vdev)) {
  756. if (dp_rx_filter_mesh_packets(vdev, nbuf,
  757. rx_tlv_hdr)
  758. == QDF_STATUS_SUCCESS) {
  759. dp_rx_info("%pK: mesh pkt filtered",
  760. soc);
  761. tid_stats->fail_cnt[MESH_FILTER_DROP]++;
  762. DP_STATS_INC(vdev->pdev,
  763. dropped.mesh_filter, 1);
  764. dp_rx_nbuf_free(nbuf);
  765. nbuf = next;
  766. continue;
  767. }
  768. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
  769. txrx_peer);
  770. }
  771. }
  772. dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
  773. reo_ring_num, tid_stats);
  774. if (qdf_likely(vdev->rx_decap_type ==
  775. htt_cmn_pkt_type_ethernet) &&
  776. qdf_likely(!vdev->mesh_vdev)) {
  777. /* Intrabss-fwd */
  778. if (dp_rx_check_ap_bridge(vdev))
  779. if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
  780. rx_tlv_hdr,
  781. nbuf,
  782. msdu_metadata)) {
  783. nbuf = next;
  784. tid_stats->intrabss_cnt++;
  785. continue; /* Get next desc */
  786. }
  787. }
  788. dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
  789. dp_rx_mark_first_packet_after_wow_wakeup(vdev->pdev, rx_tlv_hdr,
  790. nbuf);
  791. dp_rx_update_stats(soc, nbuf);
  792. dp_pkt_add_timestamp(txrx_peer->vdev, QDF_PKT_RX_DRIVER_ENTRY,
  793. current_time, nbuf);
  794. DP_RX_LIST_APPEND(deliver_list_head,
  795. deliver_list_tail,
  796. nbuf);
  797. DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
  798. QDF_NBUF_CB_RX_PKT_LEN(nbuf),
  799. enh_flag);
  800. if (qdf_unlikely(txrx_peer->in_twt))
  801. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  802. rx.to_stack_twt, 1,
  803. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  804. tid_stats->delivered_to_stack++;
  805. nbuf = next;
  806. }
  807. DP_RX_DELIVER_TO_STACK(soc, vdev, txrx_peer, peer_id,
  808. pkt_capture_offload,
  809. deliver_list_head,
  810. deliver_list_tail);
  811. if (qdf_likely(txrx_peer))
  812. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
  813. /*
  814. * If we are processing in near-full condition, there are 3 scenario
  815. * 1) Ring entries has reached critical state
  816. * 2) Ring entries are still near high threshold
  817. * 3) Ring entries are below the safe level
  818. *
  819. * One more loop will move the state to normal processing and yield
  820. */
  821. if (ring_near_full && quota)
  822. goto more_data;
  823. if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
  824. if (quota) {
  825. num_pending =
  826. dp_rx_srng_get_num_pending(hal_soc,
  827. hal_ring_hdl,
  828. num_entries,
  829. &near_full);
  830. if (num_pending) {
  831. DP_STATS_INC(soc, rx.hp_oos2, 1);
  832. if (!hif_exec_should_yield(scn, intr_id))
  833. goto more_data;
  834. if (qdf_unlikely(near_full)) {
  835. DP_STATS_INC(soc, rx.near_full, 1);
  836. goto more_data;
  837. }
  838. }
  839. }
  840. if (vdev && vdev->osif_fisa_flush)
  841. vdev->osif_fisa_flush(soc, reo_ring_num);
  842. if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
  843. vdev->osif_gro_flush(vdev->osif_vdev,
  844. reo_ring_num);
  845. }
  846. }
  847. /* Update histogram statistics by looping through pdev's */
  848. DP_RX_HIST_STATS_PER_PDEV();
  849. return rx_bufs_used; /* Assume no scale factor for now */
  850. }
  851. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  852. /**
  853. * dp_rx_desc_pool_init_be_cc() - initial RX desc pool for cookie conversion
  854. * @soc: Handle to DP Soc structure
  855. * @rx_desc_pool: Rx descriptor pool handler
  856. * @pool_id: Rx descriptor pool ID
  857. *
  858. * Return: QDF_STATUS_SUCCESS - succeeded, others - failed
  859. */
  860. static QDF_STATUS
  861. dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
  862. struct rx_desc_pool *rx_desc_pool,
  863. uint32_t pool_id)
  864. {
  865. struct dp_hw_cookie_conversion_t *cc_ctx;
  866. struct dp_soc_be *be_soc;
  867. union dp_rx_desc_list_elem_t *rx_desc_elem;
  868. struct dp_spt_page_desc *page_desc;
  869. uint32_t ppt_idx = 0;
  870. uint32_t avail_entry_index = 0;
  871. if (!rx_desc_pool->pool_size) {
  872. dp_err("desc_num 0 !!");
  873. return QDF_STATUS_E_FAILURE;
  874. }
  875. be_soc = dp_get_be_soc_from_dp_soc(soc);
  876. cc_ctx = &be_soc->rx_cc_ctx[pool_id];
  877. page_desc = &cc_ctx->page_desc_base[0];
  878. rx_desc_elem = rx_desc_pool->freelist;
  879. while (rx_desc_elem) {
  880. if (avail_entry_index == 0) {
  881. if (ppt_idx >= cc_ctx->total_page_num) {
  882. dp_alert("insufficient secondary page tables");
  883. qdf_assert_always(0);
  884. }
  885. page_desc = &cc_ctx->page_desc_base[ppt_idx++];
  886. }
  887. /* put each RX Desc VA to SPT pages and
  888. * get corresponding ID
  889. */
  890. DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
  891. avail_entry_index,
  892. &rx_desc_elem->rx_desc);
  893. rx_desc_elem->rx_desc.cookie =
  894. dp_cc_desc_id_generate(page_desc->ppt_index,
  895. avail_entry_index);
  896. rx_desc_elem->rx_desc.chip_id = dp_mlo_get_chip_id(soc);
  897. rx_desc_elem->rx_desc.pool_id = pool_id;
  898. rx_desc_elem->rx_desc.in_use = 0;
  899. rx_desc_elem = rx_desc_elem->next;
  900. avail_entry_index = (avail_entry_index + 1) &
  901. DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
  902. }
  903. return QDF_STATUS_SUCCESS;
  904. }
  905. #else
  906. static QDF_STATUS
  907. dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
  908. struct rx_desc_pool *rx_desc_pool,
  909. uint32_t pool_id)
  910. {
  911. struct dp_hw_cookie_conversion_t *cc_ctx;
  912. struct dp_soc_be *be_soc;
  913. struct dp_spt_page_desc *page_desc;
  914. uint32_t ppt_idx = 0;
  915. uint32_t avail_entry_index = 0;
  916. int i = 0;
  917. if (!rx_desc_pool->pool_size) {
  918. dp_err("desc_num 0 !!");
  919. return QDF_STATUS_E_FAILURE;
  920. }
  921. be_soc = dp_get_be_soc_from_dp_soc(soc);
  922. cc_ctx = &be_soc->rx_cc_ctx[pool_id];
  923. page_desc = &cc_ctx->page_desc_base[0];
  924. for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
  925. if (i == rx_desc_pool->pool_size - 1)
  926. rx_desc_pool->array[i].next = NULL;
  927. else
  928. rx_desc_pool->array[i].next =
  929. &rx_desc_pool->array[i + 1];
  930. if (avail_entry_index == 0) {
  931. if (ppt_idx >= cc_ctx->total_page_num) {
  932. dp_alert("insufficient secondary page tables");
  933. qdf_assert_always(0);
  934. }
  935. page_desc = &cc_ctx->page_desc_base[ppt_idx++];
  936. }
  937. /* put each RX Desc VA to SPT pages and
  938. * get corresponding ID
  939. */
  940. DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
  941. avail_entry_index,
  942. &rx_desc_pool->array[i].rx_desc);
  943. rx_desc_pool->array[i].rx_desc.cookie =
  944. dp_cc_desc_id_generate(page_desc->ppt_index,
  945. avail_entry_index);
  946. rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
  947. rx_desc_pool->array[i].rx_desc.in_use = 0;
  948. rx_desc_pool->array[i].rx_desc.chip_id =
  949. dp_mlo_get_chip_id(soc);
  950. avail_entry_index = (avail_entry_index + 1) &
  951. DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
  952. }
  953. return QDF_STATUS_SUCCESS;
  954. }
  955. #endif
  956. static void
  957. dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc,
  958. struct rx_desc_pool *rx_desc_pool,
  959. uint32_t pool_id)
  960. {
  961. struct dp_spt_page_desc *page_desc;
  962. struct dp_soc_be *be_soc;
  963. int i = 0;
  964. struct dp_hw_cookie_conversion_t *cc_ctx;
  965. be_soc = dp_get_be_soc_from_dp_soc(soc);
  966. cc_ctx = &be_soc->rx_cc_ctx[pool_id];
  967. for (i = 0; i < cc_ctx->total_page_num; i++) {
  968. page_desc = &cc_ctx->page_desc_base[i];
  969. qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
  970. }
  971. }
  972. QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
  973. struct rx_desc_pool *rx_desc_pool,
  974. uint32_t pool_id)
  975. {
  976. QDF_STATUS status = QDF_STATUS_SUCCESS;
  977. /* Only regular RX buffer desc pool use HW cookie conversion */
  978. if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE) {
  979. dp_info("rx_desc_buf pool init");
  980. status = dp_rx_desc_pool_init_be_cc(soc,
  981. rx_desc_pool,
  982. pool_id);
  983. } else {
  984. dp_info("non_rx_desc_buf_pool init");
  985. status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool,
  986. pool_id);
  987. }
  988. return status;
  989. }
  990. void dp_rx_desc_pool_deinit_be(struct dp_soc *soc,
  991. struct rx_desc_pool *rx_desc_pool,
  992. uint32_t pool_id)
  993. {
  994. if (rx_desc_pool->desc_type == DP_RX_DESC_BUF_TYPE)
  995. dp_rx_desc_pool_deinit_be_cc(soc, rx_desc_pool, pool_id);
  996. }
  997. #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
  998. #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
  999. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  1000. void *ring_desc,
  1001. struct dp_rx_desc **r_rx_desc)
  1002. {
  1003. if (hal_rx_wbm_get_cookie_convert_done(ring_desc)) {
  1004. /* HW cookie conversion done */
  1005. *r_rx_desc = (struct dp_rx_desc *)
  1006. hal_rx_wbm_get_desc_va(ring_desc);
  1007. } else {
  1008. /* SW do cookie conversion */
  1009. uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
  1010. *r_rx_desc = (struct dp_rx_desc *)
  1011. dp_cc_desc_find(soc, cookie);
  1012. }
  1013. return QDF_STATUS_SUCCESS;
  1014. }
  1015. #else
  1016. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  1017. void *ring_desc,
  1018. struct dp_rx_desc **r_rx_desc)
  1019. {
  1020. *r_rx_desc = (struct dp_rx_desc *)
  1021. hal_rx_wbm_get_desc_va(ring_desc);
  1022. return QDF_STATUS_SUCCESS;
  1023. }
  1024. #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
  1025. #else
  1026. QDF_STATUS dp_wbm_get_rx_desc_from_hal_desc_be(struct dp_soc *soc,
  1027. void *ring_desc,
  1028. struct dp_rx_desc **r_rx_desc)
  1029. {
  1030. /* SW do cookie conversion */
  1031. uint32_t cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
  1032. *r_rx_desc = (struct dp_rx_desc *)
  1033. dp_cc_desc_find(soc, cookie);
  1034. return QDF_STATUS_SUCCESS;
  1035. }
  1036. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
  1037. struct dp_rx_desc *dp_rx_desc_cookie_2_va_be(struct dp_soc *soc,
  1038. uint32_t cookie)
  1039. {
  1040. return (struct dp_rx_desc *)dp_cc_desc_find(soc, cookie);
  1041. }
  1042. #if defined(WLAN_FEATURE_11BE_MLO)
  1043. #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
  1044. #define DP_RANDOM_MAC_ID_BIT_MASK 0xC0
  1045. #define DP_RANDOM_MAC_OFFSET 1
  1046. #define DP_MAC_LOCAL_ADMBIT_MASK 0x2
  1047. #define DP_MAC_LOCAL_ADMBIT_OFFSET 0
  1048. static inline void dp_rx_dummy_src_mac(struct dp_vdev *vdev,
  1049. qdf_nbuf_t nbuf)
  1050. {
  1051. qdf_ether_header_t *eh =
  1052. (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1053. eh->ether_shost[DP_MAC_LOCAL_ADMBIT_OFFSET] =
  1054. eh->ether_shost[DP_MAC_LOCAL_ADMBIT_OFFSET] |
  1055. DP_MAC_LOCAL_ADMBIT_MASK;
  1056. }
  1057. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1058. static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
  1059. {
  1060. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  1061. }
  1062. #else
  1063. static inline bool dp_rx_mlo_igmp_wds_ext_handler(struct dp_txrx_peer *peer)
  1064. {
  1065. return false;
  1066. }
  1067. #endif
  1068. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  1069. struct dp_vdev *vdev,
  1070. struct dp_txrx_peer *peer,
  1071. qdf_nbuf_t nbuf)
  1072. {
  1073. struct dp_vdev *mcast_primary_vdev = NULL;
  1074. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  1075. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1076. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1077. struct cdp_tid_rx_stats *tid_stats = &peer->vdev->pdev->stats.
  1078. tid_stats.tid_rx_wbm_stats[0][tid];
  1079. if (!(qdf_nbuf_is_ipv4_igmp_pkt(nbuf) ||
  1080. qdf_nbuf_is_ipv6_igmp_pkt(nbuf)))
  1081. return false;
  1082. if (qdf_unlikely(vdev->multipass_en)) {
  1083. if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
  1084. DP_PEER_PER_PKT_STATS_INC(peer,
  1085. rx.multipass_rx_pkt_drop, 1);
  1086. return false;
  1087. }
  1088. }
  1089. if (!peer->bss_peer) {
  1090. if (dp_rx_intrabss_mcbc_fwd(soc, peer, NULL, nbuf, tid_stats))
  1091. dp_rx_err("forwarding failed");
  1092. }
  1093. /*
  1094. * In the case of ME6, Backhaul WDS, NAWDS
  1095. * send the igmp pkt on the same link where it received,
  1096. * as these features will use peer based tcl metadata
  1097. */
  1098. qdf_nbuf_set_next(nbuf, NULL);
  1099. if (vdev->mcast_enhancement_en || be_vdev->mcast_primary ||
  1100. peer->nawds_enabled)
  1101. goto send_pkt;
  1102. if (qdf_unlikely(dp_rx_mlo_igmp_wds_ext_handler(peer)))
  1103. goto send_pkt;
  1104. mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc, be_vdev,
  1105. DP_MOD_ID_RX);
  1106. if (!mcast_primary_vdev) {
  1107. dp_rx_debug("Non mlo vdev");
  1108. goto send_pkt;
  1109. }
  1110. if (qdf_unlikely(vdev->wrap_vdev)) {
  1111. /* In the case of qwrap repeater send the original
  1112. * packet on the interface where it received,
  1113. * packet with dummy src on the mcast primary interface.
  1114. */
  1115. qdf_nbuf_t nbuf_copy;
  1116. nbuf_copy = qdf_nbuf_copy(nbuf);
  1117. if (qdf_likely(nbuf_copy))
  1118. dp_rx_deliver_to_stack(soc, vdev, peer, nbuf_copy,
  1119. NULL);
  1120. }
  1121. dp_rx_dummy_src_mac(vdev, nbuf);
  1122. dp_rx_deliver_to_stack(mcast_primary_vdev->pdev->soc,
  1123. mcast_primary_vdev,
  1124. peer,
  1125. nbuf,
  1126. NULL);
  1127. dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc,
  1128. mcast_primary_vdev,
  1129. DP_MOD_ID_RX);
  1130. return true;
  1131. send_pkt:
  1132. dp_rx_deliver_to_stack(be_vdev->vdev.pdev->soc,
  1133. &be_vdev->vdev,
  1134. peer,
  1135. nbuf,
  1136. NULL);
  1137. return true;
  1138. }
  1139. #else
  1140. bool dp_rx_mlo_igmp_handler(struct dp_soc *soc,
  1141. struct dp_vdev *vdev,
  1142. struct dp_txrx_peer *peer,
  1143. qdf_nbuf_t nbuf)
  1144. {
  1145. return false;
  1146. }
  1147. #endif
  1148. #endif
  1149. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  1150. uint32_t dp_rx_nf_process(struct dp_intr *int_ctx,
  1151. hal_ring_handle_t hal_ring_hdl,
  1152. uint8_t reo_ring_num,
  1153. uint32_t quota)
  1154. {
  1155. struct dp_soc *soc = int_ctx->soc;
  1156. struct dp_srng *rx_ring = &soc->reo_dest_ring[reo_ring_num];
  1157. uint32_t work_done = 0;
  1158. if (dp_srng_get_near_full_level(soc, rx_ring) <
  1159. DP_SRNG_THRESH_NEAR_FULL)
  1160. return 0;
  1161. qdf_atomic_set(&rx_ring->near_full, 1);
  1162. work_done++;
  1163. return work_done;
  1164. }
  1165. #endif
  1166. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1167. #ifdef WLAN_FEATURE_11BE_MLO
  1168. /**
  1169. * dp_rx_intrabss_fwd_mlo_allow() - check if MLO forwarding is allowed
  1170. * @ta_peer: transmitter peer handle
  1171. * @da_peer: destination peer handle
  1172. *
  1173. * Return: true - MLO forwarding case, false: not
  1174. */
  1175. static inline bool
  1176. dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
  1177. struct dp_txrx_peer *da_peer)
  1178. {
  1179. /* TA peer and DA peer's vdev should be partner MLO vdevs */
  1180. if (dp_peer_find_mac_addr_cmp(&ta_peer->vdev->mld_mac_addr,
  1181. &da_peer->vdev->mld_mac_addr))
  1182. return false;
  1183. return true;
  1184. }
  1185. #else
  1186. static inline bool
  1187. dp_rx_intrabss_fwd_mlo_allow(struct dp_txrx_peer *ta_peer,
  1188. struct dp_txrx_peer *da_peer)
  1189. {
  1190. return false;
  1191. }
  1192. #endif
  1193. #ifdef INTRA_BSS_FWD_OFFLOAD
  1194. /**
  1195. * dp_rx_intrabss_ucast_check_be() - Check if intrabss is allowed
  1196. * for unicast frame
  1197. * @nbuf: RX packet buffer
  1198. * @ta_peer: transmitter DP peer handle
  1199. * @rx_tlv_hdr: Rx TLV header
  1200. * @msdu_metadata: MSDU meta data info
  1201. * @params: params to be filled in
  1202. *
  1203. * Return: true - intrabss allowed
  1204. * false - not allow
  1205. */
  1206. static bool
  1207. dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
  1208. struct dp_txrx_peer *ta_peer,
  1209. uint8_t *rx_tlv_hdr,
  1210. struct hal_rx_msdu_metadata *msdu_metadata,
  1211. struct dp_be_intrabss_params *params)
  1212. {
  1213. uint8_t dest_chip_id, dest_chip_pmac_id;
  1214. struct dp_vdev_be *be_vdev =
  1215. dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
  1216. struct dp_soc_be *be_soc =
  1217. dp_get_be_soc_from_dp_soc(params->dest_soc);
  1218. if (!qdf_nbuf_is_intra_bss(nbuf))
  1219. return false;
  1220. hal_rx_tlv_get_dest_chip_pmac_id(rx_tlv_hdr,
  1221. &dest_chip_id,
  1222. &dest_chip_pmac_id);
  1223. qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
  1224. if (dest_chip_id == be_soc->mlo_chip_id) {
  1225. /* TODO: adding to self list is better */
  1226. params->tx_vdev_id = ta_peer->vdev->vdev_id;
  1227. return true;
  1228. }
  1229. params->dest_soc =
  1230. dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
  1231. dest_chip_id);
  1232. if (!params->dest_soc)
  1233. return false;
  1234. params->tx_vdev_id =
  1235. be_vdev->partner_vdev_list[dest_chip_id][dest_chip_pmac_id];
  1236. return true;
  1237. }
  1238. #else
  1239. #ifdef WLAN_MLO_MULTI_CHIP
  1240. static bool
  1241. dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
  1242. struct dp_txrx_peer *ta_peer,
  1243. uint8_t *rx_tlv_hdr,
  1244. struct hal_rx_msdu_metadata *msdu_metadata,
  1245. struct dp_be_intrabss_params *params)
  1246. {
  1247. uint16_t da_peer_id;
  1248. struct dp_txrx_peer *da_peer;
  1249. bool ret = false;
  1250. uint8_t dest_chip_id;
  1251. dp_txrx_ref_handle txrx_ref_handle = NULL;
  1252. struct dp_vdev_be *be_vdev =
  1253. dp_get_be_vdev_from_dp_vdev(ta_peer->vdev);
  1254. struct dp_soc_be *be_soc =
  1255. dp_get_be_soc_from_dp_soc(params->dest_soc);
  1256. if (!(qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf)))
  1257. return false;
  1258. dest_chip_id = HAL_RX_DEST_CHIP_ID_GET(msdu_metadata);
  1259. qdf_assert_always(dest_chip_id <= (DP_MLO_MAX_DEST_CHIP_ID - 1));
  1260. da_peer_id = HAL_RX_PEER_ID_GET(msdu_metadata);
  1261. /* use dest chip id when TA is MLD peer and DA is legacy */
  1262. if (be_soc->mlo_enabled &&
  1263. ta_peer->mld_peer &&
  1264. !(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
  1265. /* validate chip_id, get a ref, and re-assign soc */
  1266. params->dest_soc =
  1267. dp_mlo_get_soc_ref_by_chip_id(be_soc->ml_ctxt,
  1268. dest_chip_id);
  1269. if (!params->dest_soc)
  1270. return false;
  1271. da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
  1272. da_peer_id,
  1273. &txrx_ref_handle,
  1274. DP_MOD_ID_RX);
  1275. if (!da_peer)
  1276. return false;
  1277. } else {
  1278. da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc,
  1279. da_peer_id,
  1280. &txrx_ref_handle,
  1281. DP_MOD_ID_RX);
  1282. if (!da_peer)
  1283. return false;
  1284. params->dest_soc = da_peer->vdev->pdev->soc;
  1285. if (!params->dest_soc)
  1286. goto rel_da_peer;
  1287. }
  1288. params->tx_vdev_id = da_peer->vdev->vdev_id;
  1289. /* If the source or destination peer in the isolation
  1290. * list then dont forward instead push to bridge stack.
  1291. */
  1292. if (dp_get_peer_isolation(ta_peer) ||
  1293. dp_get_peer_isolation(da_peer)) {
  1294. ret = false;
  1295. goto rel_da_peer;
  1296. }
  1297. if (da_peer->bss_peer || (da_peer == ta_peer)) {
  1298. ret = false;
  1299. goto rel_da_peer;
  1300. }
  1301. /* Same vdev, support Inra-BSS */
  1302. if (da_peer->vdev == ta_peer->vdev) {
  1303. ret = true;
  1304. goto rel_da_peer;
  1305. }
  1306. /* MLO specific Intra-BSS check */
  1307. if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
  1308. /* use dest chip id for legacy dest peer */
  1309. if (!(da_peer_id & HAL_RX_DA_IDX_ML_PEER_MASK)) {
  1310. if (!(be_vdev->partner_vdev_list[dest_chip_id][0] ==
  1311. params->tx_vdev_id) &&
  1312. !(be_vdev->partner_vdev_list[dest_chip_id][1] ==
  1313. params->tx_vdev_id)) {
  1314. /*dp_soc_unref_delete(soc);*/
  1315. goto rel_da_peer;
  1316. }
  1317. }
  1318. ret = true;
  1319. }
  1320. rel_da_peer:
  1321. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
  1322. return ret;
  1323. }
  1324. #else
  1325. static bool
  1326. dp_rx_intrabss_ucast_check_be(qdf_nbuf_t nbuf,
  1327. struct dp_txrx_peer *ta_peer,
  1328. uint8_t *rx_tlv_hdr,
  1329. struct hal_rx_msdu_metadata *msdu_metadata,
  1330. struct dp_be_intrabss_params *params)
  1331. {
  1332. uint16_t da_peer_id;
  1333. struct dp_txrx_peer *da_peer;
  1334. bool ret = false;
  1335. dp_txrx_ref_handle txrx_ref_handle = NULL;
  1336. if (!qdf_nbuf_is_da_valid(nbuf) || qdf_nbuf_is_da_mcbc(nbuf))
  1337. return false;
  1338. da_peer_id = dp_rx_peer_metadata_peer_id_get_be(
  1339. params->dest_soc,
  1340. msdu_metadata->da_idx);
  1341. da_peer = dp_txrx_peer_get_ref_by_id(params->dest_soc, da_peer_id,
  1342. &txrx_ref_handle, DP_MOD_ID_RX);
  1343. if (!da_peer)
  1344. return false;
  1345. params->tx_vdev_id = da_peer->vdev->vdev_id;
  1346. /* If the source or destination peer in the isolation
  1347. * list then dont forward instead push to bridge stack.
  1348. */
  1349. if (dp_get_peer_isolation(ta_peer) ||
  1350. dp_get_peer_isolation(da_peer))
  1351. goto rel_da_peer;
  1352. if (da_peer->bss_peer || da_peer == ta_peer)
  1353. goto rel_da_peer;
  1354. /* Same vdev, support Inra-BSS */
  1355. if (da_peer->vdev == ta_peer->vdev) {
  1356. ret = true;
  1357. goto rel_da_peer;
  1358. }
  1359. /* MLO specific Intra-BSS check */
  1360. if (dp_rx_intrabss_fwd_mlo_allow(ta_peer, da_peer)) {
  1361. ret = true;
  1362. goto rel_da_peer;
  1363. }
  1364. rel_da_peer:
  1365. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX);
  1366. return ret;
  1367. }
  1368. #endif /* WLAN_MLO_MULTI_CHIP */
  1369. #endif /* INTRA_BSS_FWD_OFFLOAD */
  1370. #if defined(QCA_MONITOR_2_0_SUPPORT) || defined(CONFIG_WORD_BASED_TLV)
  1371. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  1372. uint32_t *msg_word,
  1373. void *rx_filter)
  1374. {
  1375. struct htt_rx_ring_tlv_filter *tlv_filter =
  1376. (struct htt_rx_ring_tlv_filter *)rx_filter;
  1377. if (!msg_word || !tlv_filter)
  1378. return;
  1379. /* if word mask is zero, FW will set the default values */
  1380. if (!(tlv_filter->rx_mpdu_start_wmask > 0 &&
  1381. tlv_filter->rx_msdu_end_wmask > 0)) {
  1382. msg_word += 4;
  1383. *msg_word = 0;
  1384. goto config_mon;
  1385. }
  1386. HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACTION_ENABLE_SET(*msg_word, 1);
  1387. /* word 14 */
  1388. msg_word += 3;
  1389. *msg_word = 0;
  1390. HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_WORD_MASK_SET(
  1391. *msg_word,
  1392. tlv_filter->rx_mpdu_start_wmask);
  1393. /* word 15 */
  1394. msg_word++;
  1395. *msg_word = 0;
  1396. HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_WORD_MASK_SET(
  1397. *msg_word,
  1398. tlv_filter->rx_msdu_end_wmask);
  1399. config_mon:
  1400. msg_word--;
  1401. dp_mon_rx_wmask_subscribe(soc, msg_word, tlv_filter);
  1402. }
  1403. #else
  1404. void dp_rx_word_mask_subscribe_be(struct dp_soc *soc,
  1405. uint32_t *msg_word,
  1406. void *rx_filter)
  1407. {
  1408. }
  1409. #endif
  1410. #if defined(WLAN_MCAST_MLO) && defined(CONFIG_MLO_SINGLE_DEV)
  1411. static inline
  1412. bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
  1413. qdf_nbuf_t nbuf_copy)
  1414. {
  1415. struct dp_vdev *mcast_primary_vdev = NULL;
  1416. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  1417. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1418. struct cdp_tx_exception_metadata tx_exc_metadata = {0};
  1419. if (!vdev->mlo_vdev)
  1420. return false;
  1421. tx_exc_metadata.is_mlo_mcast = 1;
  1422. mcast_primary_vdev = dp_mlo_get_mcast_primary_vdev(be_soc,
  1423. be_vdev,
  1424. DP_MOD_ID_RX);
  1425. if (!mcast_primary_vdev)
  1426. return false;
  1427. nbuf_copy = dp_tx_send_exception((struct cdp_soc_t *)
  1428. mcast_primary_vdev->pdev->soc,
  1429. mcast_primary_vdev->vdev_id,
  1430. nbuf_copy, &tx_exc_metadata);
  1431. if (nbuf_copy)
  1432. qdf_nbuf_free(nbuf_copy);
  1433. dp_vdev_unref_delete(mcast_primary_vdev->pdev->soc,
  1434. mcast_primary_vdev, DP_MOD_ID_RX);
  1435. return true;
  1436. }
  1437. #else
  1438. static inline
  1439. bool dp_rx_intrabss_mlo_mcbc_fwd(struct dp_soc *soc, struct dp_vdev *vdev,
  1440. qdf_nbuf_t nbuf_copy)
  1441. {
  1442. return false;
  1443. }
  1444. #endif
  1445. /**
  1446. * dp_rx_intrabss_mcast_handler_be() - handler for mcast packets
  1447. * @soc: core txrx main context
  1448. * @ta_txrx_peer: source txrx_peer entry
  1449. * @nbuf_copy: nbuf that has to be intrabss forwarded
  1450. * @tid_stats: tid_stats structure
  1451. *
  1452. * Return: true if it is forwarded else false
  1453. */
  1454. bool
  1455. dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
  1456. struct dp_txrx_peer *ta_txrx_peer,
  1457. qdf_nbuf_t nbuf_copy,
  1458. struct cdp_tid_rx_stats *tid_stats)
  1459. {
  1460. if (qdf_unlikely(ta_txrx_peer->vdev->nawds_enabled)) {
  1461. struct cdp_tx_exception_metadata tx_exc_metadata = {0};
  1462. uint16_t len = QDF_NBUF_CB_RX_PKT_LEN(nbuf_copy);
  1463. tx_exc_metadata.peer_id = ta_txrx_peer->peer_id;
  1464. tx_exc_metadata.is_intrabss_fwd = 1;
  1465. tx_exc_metadata.tid = HTT_TX_EXT_TID_INVALID;
  1466. if (dp_tx_send_exception((struct cdp_soc_t *)soc,
  1467. ta_txrx_peer->vdev->vdev_id,
  1468. nbuf_copy,
  1469. &tx_exc_metadata)) {
  1470. DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
  1471. rx.intra_bss.fail, 1,
  1472. len);
  1473. tid_stats->fail_cnt[INTRABSS_DROP]++;
  1474. qdf_nbuf_free(nbuf_copy);
  1475. } else {
  1476. DP_PEER_PER_PKT_STATS_INC_PKT(ta_txrx_peer,
  1477. rx.intra_bss.pkts, 1,
  1478. len);
  1479. tid_stats->intrabss_cnt++;
  1480. }
  1481. return true;
  1482. }
  1483. if (dp_rx_intrabss_mlo_mcbc_fwd(soc, ta_txrx_peer->vdev,
  1484. nbuf_copy))
  1485. return true;
  1486. return false;
  1487. }
  1488. /*
  1489. * dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
  1490. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  1491. * @soc: core txrx main context
  1492. * @ta_peer: source peer entry
  1493. * @rx_tlv_hdr: start address of rx tlvs
  1494. * @nbuf: nbuf that has to be intrabss forwarded
  1495. * @msdu_metadata: msdu metadata
  1496. *
  1497. * Return: true if it is forwarded else false
  1498. */
  1499. bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  1500. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  1501. struct hal_rx_msdu_metadata msdu_metadata)
  1502. {
  1503. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1504. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1505. struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
  1506. tid_stats.tid_rx_stats[ring_id][tid];
  1507. bool ret = false;
  1508. struct dp_be_intrabss_params params;
  1509. /* if it is a broadcast pkt (eg: ARP) and it is not its own
  1510. * source, then clone the pkt and send the cloned pkt for
  1511. * intra BSS forwarding and original pkt up the network stack
  1512. * Note: how do we handle multicast pkts. do we forward
  1513. * all multicast pkts as is or let a higher layer module
  1514. * like igmpsnoop decide whether to forward or not with
  1515. * Mcast enhancement.
  1516. */
  1517. if (qdf_nbuf_is_da_mcbc(nbuf) && !ta_peer->bss_peer) {
  1518. return dp_rx_intrabss_mcbc_fwd(soc, ta_peer, rx_tlv_hdr,
  1519. nbuf, tid_stats);
  1520. }
  1521. if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
  1522. nbuf))
  1523. return true;
  1524. params.dest_soc = soc;
  1525. if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, rx_tlv_hdr,
  1526. &msdu_metadata, &params)) {
  1527. ret = dp_rx_intrabss_ucast_fwd(params.dest_soc, ta_peer,
  1528. params.tx_vdev_id,
  1529. rx_tlv_hdr, nbuf, tid_stats);
  1530. }
  1531. return ret;
  1532. }
  1533. #endif
  1534. bool dp_rx_chain_msdus_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1535. uint8_t *rx_tlv_hdr, uint8_t mac_id)
  1536. {
  1537. bool mpdu_done = false;
  1538. qdf_nbuf_t curr_nbuf = NULL;
  1539. qdf_nbuf_t tmp_nbuf = NULL;
  1540. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1541. if (!dp_pdev) {
  1542. dp_rx_debug("%pK: pdev is null for mac_id = %d", soc, mac_id);
  1543. return mpdu_done;
  1544. }
  1545. /* if invalid peer SG list has max values free the buffers in list
  1546. * and treat current buffer as start of list
  1547. *
  1548. * current logic to detect the last buffer from attn_tlv is not reliable
  1549. * in OFDMA UL scenario hence add max buffers check to avoid list pile
  1550. * up
  1551. */
  1552. if (!dp_pdev->first_nbuf ||
  1553. (dp_pdev->invalid_peer_head_msdu &&
  1554. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
  1555. (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
  1556. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1557. dp_pdev->first_nbuf = true;
  1558. /* If the new nbuf received is the first msdu of the
  1559. * amsdu and there are msdus in the invalid peer msdu
  1560. * list, then let us free all the msdus of the invalid
  1561. * peer msdu list.
  1562. * This scenario can happen when we start receiving
  1563. * new a-msdu even before the previous a-msdu is completely
  1564. * received.
  1565. */
  1566. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  1567. while (curr_nbuf) {
  1568. tmp_nbuf = curr_nbuf->next;
  1569. dp_rx_nbuf_free(curr_nbuf);
  1570. curr_nbuf = tmp_nbuf;
  1571. }
  1572. dp_pdev->invalid_peer_head_msdu = NULL;
  1573. dp_pdev->invalid_peer_tail_msdu = NULL;
  1574. dp_monitor_get_mpdu_status(dp_pdev, soc, rx_tlv_hdr);
  1575. }
  1576. if (qdf_nbuf_is_rx_chfrag_end(nbuf) &&
  1577. hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1578. qdf_assert_always(dp_pdev->first_nbuf);
  1579. dp_pdev->first_nbuf = false;
  1580. mpdu_done = true;
  1581. }
  1582. /*
  1583. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  1584. * should be NULL here, add the checking for debugging purpose
  1585. * in case some corner case.
  1586. */
  1587. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  1588. dp_pdev->invalid_peer_tail_msdu);
  1589. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  1590. dp_pdev->invalid_peer_tail_msdu,
  1591. nbuf);
  1592. return mpdu_done;
  1593. }