dp_rx_err.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "dp_internal.h"
  25. #include "hal_api.h"
  26. #include "qdf_trace.h"
  27. #include "qdf_nbuf.h"
  28. #include "dp_rx_defrag.h"
  29. #include "dp_ipa.h"
  30. #ifdef WIFI_MONITOR_SUPPORT
  31. #include "dp_htt.h"
  32. #include <dp_mon.h>
  33. #endif
  34. #ifdef FEATURE_WDS
  35. #include "dp_txrx_wds.h"
  36. #endif
  37. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  38. #include "qdf_net_types.h"
  39. #include "dp_rx_buffer_pool.h"
  40. #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
  41. #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
  42. #define dp_rx_err_info(params...) \
  43. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
  44. #define dp_rx_err_info_rl(params...) \
  45. __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
  46. #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
  47. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  48. /* Max regular Rx packet routing error */
  49. #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
  50. #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
  51. #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
  52. #ifdef FEATURE_MEC
  53. bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  54. struct dp_txrx_peer *txrx_peer,
  55. uint8_t *rx_tlv_hdr,
  56. qdf_nbuf_t nbuf)
  57. {
  58. struct dp_vdev *vdev = txrx_peer->vdev;
  59. struct dp_pdev *pdev = vdev->pdev;
  60. struct dp_mec_entry *mecentry = NULL;
  61. struct dp_ast_entry *ase = NULL;
  62. uint16_t sa_idx = 0;
  63. uint8_t *data;
  64. /*
  65. * Multicast Echo Check is required only if vdev is STA and
  66. * received pkt is a multicast/broadcast pkt. otherwise
  67. * skip the MEC check.
  68. */
  69. if (vdev->opmode != wlan_op_mode_sta)
  70. return false;
  71. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  72. return false;
  73. data = qdf_nbuf_data(nbuf);
  74. /*
  75. * if the received pkts src mac addr matches with vdev
  76. * mac address then drop the pkt as it is looped back
  77. */
  78. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  79. vdev->mac_addr.raw,
  80. QDF_MAC_ADDR_SIZE)))
  81. return true;
  82. /*
  83. * In case of qwrap isolation mode, donot drop loopback packets.
  84. * In isolation mode, all packets from the wired stations need to go
  85. * to rootap and loop back to reach the wireless stations and
  86. * vice-versa.
  87. */
  88. if (qdf_unlikely(vdev->isolation_vdev))
  89. return false;
  90. /*
  91. * if the received pkts src mac addr matches with the
  92. * wired PCs MAC addr which is behind the STA or with
  93. * wireless STAs MAC addr which are behind the Repeater,
  94. * then drop the pkt as it is looped back
  95. */
  96. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  97. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  98. if ((sa_idx < 0) ||
  99. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  100. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  101. "invalid sa_idx: %d", sa_idx);
  102. qdf_assert_always(0);
  103. }
  104. qdf_spin_lock_bh(&soc->ast_lock);
  105. ase = soc->ast_table[sa_idx];
  106. /*
  107. * this check was not needed since MEC is not dependent on AST,
  108. * but if we dont have this check SON has some issues in
  109. * dual backhaul scenario. in APS SON mode, client connected
  110. * to RE 2G and sends multicast packets. the RE sends it to CAP
  111. * over 5G backhaul. the CAP loopback it on 2G to RE.
  112. * On receiving in 2G STA vap, we assume that client has roamed
  113. * and kickout the client.
  114. */
  115. if (ase && (ase->peer_id != txrx_peer->peer_id)) {
  116. qdf_spin_unlock_bh(&soc->ast_lock);
  117. goto drop;
  118. }
  119. qdf_spin_unlock_bh(&soc->ast_lock);
  120. }
  121. qdf_spin_lock_bh(&soc->mec_lock);
  122. mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
  123. &data[QDF_MAC_ADDR_SIZE]);
  124. if (!mecentry) {
  125. qdf_spin_unlock_bh(&soc->mec_lock);
  126. return false;
  127. }
  128. qdf_spin_unlock_bh(&soc->mec_lock);
  129. drop:
  130. dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
  131. soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
  132. return true;
  133. }
  134. #endif
  135. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  136. void dp_rx_link_desc_refill_duplicate_check(
  137. struct dp_soc *soc,
  138. struct hal_buf_info *buf_info,
  139. hal_buff_addrinfo_t ring_buf_info)
  140. {
  141. struct hal_buf_info current_link_desc_buf_info = { 0 };
  142. /* do duplicate link desc address check */
  143. hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
  144. &current_link_desc_buf_info);
  145. /*
  146. * TODO - Check if the hal soc api call can be removed
  147. * since the cookie is just used for print.
  148. * buffer_addr_info is the first element of ring_desc
  149. */
  150. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  151. (uint32_t *)ring_buf_info,
  152. &current_link_desc_buf_info);
  153. if (qdf_unlikely(current_link_desc_buf_info.paddr ==
  154. buf_info->paddr)) {
  155. dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
  156. current_link_desc_buf_info.paddr,
  157. current_link_desc_buf_info.sw_cookie);
  158. DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
  159. }
  160. *buf_info = current_link_desc_buf_info;
  161. }
  162. QDF_STATUS
  163. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  164. hal_buff_addrinfo_t link_desc_addr,
  165. uint8_t bm_action)
  166. {
  167. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  168. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  169. hal_soc_handle_t hal_soc = soc->hal_soc;
  170. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  171. void *src_srng_desc;
  172. if (!wbm_rel_srng) {
  173. dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
  174. return status;
  175. }
  176. /* do duplicate link desc address check */
  177. dp_rx_link_desc_refill_duplicate_check(
  178. soc,
  179. &soc->last_op_info.wbm_rel_link_desc,
  180. link_desc_addr);
  181. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  182. /* TODO */
  183. /*
  184. * Need API to convert from hal_ring pointer to
  185. * Ring Type / Ring Id combo
  186. */
  187. dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
  188. soc, wbm_rel_srng);
  189. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  190. goto done;
  191. }
  192. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  193. if (qdf_likely(src_srng_desc)) {
  194. /* Return link descriptor through WBM ring (SW2WBM)*/
  195. hal_rx_msdu_link_desc_set(hal_soc,
  196. src_srng_desc, link_desc_addr, bm_action);
  197. status = QDF_STATUS_SUCCESS;
  198. } else {
  199. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  200. DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
  201. dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
  202. srng->ring_id,
  203. soc->stats.rx.err.hal_ring_access_full_fail);
  204. dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  205. *srng->u.src_ring.hp_addr,
  206. srng->u.src_ring.reap_hp,
  207. *srng->u.src_ring.tp_addr,
  208. srng->u.src_ring.cached_tp);
  209. QDF_BUG(0);
  210. }
  211. done:
  212. hal_srng_access_end(hal_soc, wbm_rel_srng);
  213. return status;
  214. }
  215. qdf_export_symbol(dp_rx_link_desc_return_by_addr);
  216. QDF_STATUS
  217. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  218. uint8_t bm_action)
  219. {
  220. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  221. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  222. }
  223. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  224. /**
  225. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  226. *
  227. * @soc: core txrx main context
  228. * @ring_desc: opaque pointer to the REO error ring descriptor
  229. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  230. * @mac_id: mac ID
  231. * @quota: No. of units (packets) that can be serviced in one shot.
  232. *
  233. * This function is used to drop all MSDU in an MPDU
  234. *
  235. * Return: uint32_t: No. of elements processed
  236. */
  237. static uint32_t
  238. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  239. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  240. uint8_t *mac_id,
  241. uint32_t quota)
  242. {
  243. uint32_t rx_bufs_used = 0;
  244. void *link_desc_va;
  245. struct hal_buf_info buf_info;
  246. struct dp_pdev *pdev;
  247. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  248. int i;
  249. uint8_t *rx_tlv_hdr;
  250. uint32_t tid;
  251. struct rx_desc_pool *rx_desc_pool;
  252. struct dp_rx_desc *rx_desc;
  253. /* First field in REO Dst ring Desc is buffer_addr_info */
  254. void *buf_addr_info = ring_desc;
  255. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  256. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  257. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
  258. /* buffer_addr_info is the first element of ring_desc */
  259. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  260. (uint32_t *)ring_desc,
  261. &buf_info);
  262. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  263. if (!link_desc_va) {
  264. dp_rx_err_debug("link desc va is null, soc %pk", soc);
  265. return rx_bufs_used;
  266. }
  267. more_msdu_link_desc:
  268. /* No UNMAP required -- this is "malloc_consistent" memory */
  269. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  270. &mpdu_desc_info->msdu_count);
  271. for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
  272. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  273. soc, msdu_list.sw_cookie[i]);
  274. qdf_assert_always(rx_desc);
  275. /* all buffers from a MSDU link link belong to same pdev */
  276. *mac_id = rx_desc->pool_id;
  277. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  278. if (!pdev) {
  279. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  280. soc, rx_desc->pool_id);
  281. return rx_bufs_used;
  282. }
  283. if (!dp_rx_desc_check_magic(rx_desc)) {
  284. dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
  285. soc, msdu_list.sw_cookie[i]);
  286. return rx_bufs_used;
  287. }
  288. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  289. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  290. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
  291. rx_desc->unmapped = 1;
  292. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  293. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  294. rx_bufs_used++;
  295. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  296. rx_desc->rx_buf_start);
  297. dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
  298. soc, tid);
  299. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  300. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  301. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  302. dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
  303. rx_desc->nbuf,
  304. QDF_TX_RX_STATUS_DROP, true);
  305. /* Just free the buffers */
  306. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
  307. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  308. &pdev->free_list_tail, rx_desc);
  309. }
  310. /*
  311. * If the msdu's are spread across multiple link-descriptors,
  312. * we cannot depend solely on the msdu_count(e.g., if msdu is
  313. * spread across multiple buffers).Hence, it is
  314. * necessary to check the next link_descriptor and release
  315. * all the msdu's that are part of it.
  316. */
  317. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  318. link_desc_va,
  319. &next_link_desc_addr_info);
  320. if (hal_rx_is_buf_addr_info_valid(
  321. &next_link_desc_addr_info)) {
  322. /* Clear the next link desc info for the current link_desc */
  323. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  324. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  325. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  326. hal_rx_buffer_addr_info_get_paddr(
  327. &next_link_desc_addr_info,
  328. &buf_info);
  329. /* buffer_addr_info is the first element of ring_desc */
  330. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  331. (uint32_t *)&next_link_desc_addr_info,
  332. &buf_info);
  333. cur_link_desc_addr_info = next_link_desc_addr_info;
  334. buf_addr_info = &cur_link_desc_addr_info;
  335. link_desc_va =
  336. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  337. goto more_msdu_link_desc;
  338. }
  339. quota--;
  340. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  341. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  342. return rx_bufs_used;
  343. }
  344. /**
  345. * dp_rx_pn_error_handle() - Handles PN check errors
  346. *
  347. * @soc: core txrx main context
  348. * @ring_desc: opaque pointer to the REO error ring descriptor
  349. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  350. * @mac_id: mac ID
  351. * @quota: No. of units (packets) that can be serviced in one shot.
  352. *
  353. * This function implements PN error handling
  354. * If the peer is configured to ignore the PN check errors
  355. * or if DP feels, that this frame is still OK, the frame can be
  356. * re-injected back to REO to use some of the other features
  357. * of REO e.g. duplicate detection/routing to other cores
  358. *
  359. * Return: uint32_t: No. of elements processed
  360. */
  361. static uint32_t
  362. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  363. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  364. uint8_t *mac_id,
  365. uint32_t quota)
  366. {
  367. uint16_t peer_id;
  368. uint32_t rx_bufs_used = 0;
  369. struct dp_txrx_peer *txrx_peer;
  370. bool peer_pn_policy = false;
  371. dp_txrx_ref_handle txrx_ref_handle = NULL;
  372. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  373. mpdu_desc_info->peer_meta_data);
  374. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  375. &txrx_ref_handle,
  376. DP_MOD_ID_RX_ERR);
  377. if (qdf_likely(txrx_peer)) {
  378. /*
  379. * TODO: Check for peer specific policies & set peer_pn_policy
  380. */
  381. dp_err_rl("discard rx due to PN error for peer %pK",
  382. txrx_peer);
  383. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  384. }
  385. dp_rx_err_err("%pK: Packet received with PN error", soc);
  386. /* No peer PN policy -- definitely drop */
  387. if (!peer_pn_policy)
  388. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  389. mpdu_desc_info,
  390. mac_id, quota);
  391. return rx_bufs_used;
  392. }
  393. #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
  394. /**
  395. * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
  396. * @soc: Datapath soc handler
  397. * @txrx_peer: pointer to DP peer
  398. * @nbuf: pointer to the skb of RX frame
  399. * @frame_mask: the mask for special frame needed
  400. * @rx_tlv_hdr: start of rx tlv header
  401. *
  402. * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
  403. * single nbuf is expected.
  404. *
  405. * return: true - nbuf has been delivered to stack, false - not.
  406. */
  407. static bool
  408. dp_rx_deliver_oor_frame(struct dp_soc *soc,
  409. struct dp_txrx_peer *txrx_peer,
  410. qdf_nbuf_t nbuf, uint32_t frame_mask,
  411. uint8_t *rx_tlv_hdr)
  412. {
  413. uint32_t l2_hdr_offset = 0;
  414. uint16_t msdu_len = 0;
  415. uint32_t skip_len;
  416. l2_hdr_offset =
  417. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  418. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  419. skip_len = l2_hdr_offset;
  420. } else {
  421. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  422. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  423. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  424. }
  425. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  426. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  427. qdf_nbuf_pull_head(nbuf, skip_len);
  428. qdf_nbuf_set_exc_frame(nbuf, 1);
  429. dp_info_rl("OOR frame, mpdu sn 0x%x",
  430. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  431. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
  432. return true;
  433. }
  434. #else
  435. static bool
  436. dp_rx_deliver_oor_frame(struct dp_soc *soc,
  437. struct dp_txrx_peer *txrx_peer,
  438. qdf_nbuf_t nbuf, uint32_t frame_mask,
  439. uint8_t *rx_tlv_hdr)
  440. {
  441. return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
  442. rx_tlv_hdr);
  443. }
  444. #endif
  445. /**
  446. * dp_rx_oor_handle() - Handles the msdu which is OOR error
  447. *
  448. * @soc: core txrx main context
  449. * @nbuf: pointer to msdu skb
  450. * @peer_id: dp peer ID
  451. * @rx_tlv_hdr: start of rx tlv header
  452. *
  453. * This function process the msdu delivered from REO2TCL
  454. * ring with error type OOR
  455. *
  456. * Return: None
  457. */
  458. static void
  459. dp_rx_oor_handle(struct dp_soc *soc,
  460. qdf_nbuf_t nbuf,
  461. uint16_t peer_id,
  462. uint8_t *rx_tlv_hdr)
  463. {
  464. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  465. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  466. struct dp_txrx_peer *txrx_peer = NULL;
  467. dp_txrx_ref_handle txrx_ref_handle = NULL;
  468. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  469. &txrx_ref_handle,
  470. DP_MOD_ID_RX_ERR);
  471. if (!txrx_peer) {
  472. dp_info_rl("peer not found");
  473. goto free_nbuf;
  474. }
  475. if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
  476. rx_tlv_hdr)) {
  477. DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
  478. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  479. return;
  480. }
  481. free_nbuf:
  482. if (txrx_peer)
  483. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  484. DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
  485. dp_rx_nbuf_free(nbuf);
  486. }
  487. /**
  488. * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
  489. * is a monotonous increment of packet number
  490. * from the previous successfully re-ordered
  491. * frame.
  492. * @soc: Datapath SOC handle
  493. * @ring_desc: REO ring descriptor
  494. * @nbuf: Current packet
  495. *
  496. * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
  497. */
  498. static inline QDF_STATUS
  499. dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  500. qdf_nbuf_t nbuf)
  501. {
  502. uint64_t prev_pn, curr_pn[2];
  503. if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
  504. return QDF_STATUS_SUCCESS;
  505. hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
  506. hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
  507. if (curr_pn[0] > prev_pn)
  508. return QDF_STATUS_SUCCESS;
  509. return QDF_STATUS_E_FAILURE;
  510. }
  511. #ifdef WLAN_SKIP_BAR_UPDATE
  512. static
  513. void dp_rx_err_handle_bar(struct dp_soc *soc,
  514. struct dp_peer *peer,
  515. qdf_nbuf_t nbuf)
  516. {
  517. dp_info_rl("BAR update to H.W is skipped");
  518. DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
  519. }
  520. #else
  521. static
  522. void dp_rx_err_handle_bar(struct dp_soc *soc,
  523. struct dp_peer *peer,
  524. qdf_nbuf_t nbuf)
  525. {
  526. uint8_t *rx_tlv_hdr;
  527. unsigned char type, subtype;
  528. uint16_t start_seq_num;
  529. uint32_t tid;
  530. QDF_STATUS status;
  531. struct ieee80211_frame_bar *bar;
  532. /*
  533. * 1. Is this a BAR frame. If not Discard it.
  534. * 2. If it is, get the peer id, tid, ssn
  535. * 2a Do a tid update
  536. */
  537. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  538. bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
  539. type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
  540. subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
  541. if (!(type == IEEE80211_FC0_TYPE_CTL &&
  542. subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
  543. dp_err_rl("Not a BAR frame!");
  544. return;
  545. }
  546. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  547. qdf_assert_always(tid < DP_MAX_TIDS);
  548. start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
  549. dp_info_rl("tid %u window_size %u start_seq_num %u",
  550. tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
  551. status = dp_rx_tid_update_wifi3(peer, tid,
  552. peer->rx_tid[tid].ba_win_size,
  553. start_seq_num,
  554. true);
  555. if (status != QDF_STATUS_SUCCESS) {
  556. dp_err_rl("failed to handle bar frame update rx tid");
  557. DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
  558. } else {
  559. DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
  560. }
  561. }
  562. #endif
  563. /**
  564. * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
  565. * @soc: Datapath SoC handle
  566. * @nbuf: packet being processed
  567. * @mpdu_desc_info: mpdu desc info for the current packet
  568. * @tid: tid on which the packet arrived
  569. * @err_status: Flag to indicate if REO encountered an error while routing this
  570. * frame
  571. * @error_code: REO error code
  572. *
  573. * Return: None
  574. */
  575. static void
  576. _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  577. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  578. uint32_t tid, uint8_t err_status, uint32_t error_code)
  579. {
  580. uint16_t peer_id;
  581. struct dp_peer *peer;
  582. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  583. mpdu_desc_info->peer_meta_data);
  584. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  585. if (!peer)
  586. return;
  587. dp_info_rl("BAR frame: "
  588. " peer_id = %d"
  589. " tid = %u"
  590. " SSN = %d"
  591. " error status = %d",
  592. peer->peer_id,
  593. tid,
  594. mpdu_desc_info->mpdu_seq,
  595. err_status);
  596. if (err_status == HAL_REO_ERROR_DETECTED) {
  597. switch (error_code) {
  598. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  599. case HAL_REO_ERR_BAR_FRAME_OOR:
  600. dp_rx_err_handle_bar(soc, peer, nbuf);
  601. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  602. break;
  603. default:
  604. DP_STATS_INC(soc, rx.bar_frame, 1);
  605. }
  606. }
  607. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  608. }
  609. /**
  610. * dp_rx_bar_frame_handle() - Function to handle err BAR frames
  611. * @soc: core DP main context
  612. * @ring_desc: Hal ring desc
  613. * @rx_desc: dp rx desc
  614. * @mpdu_desc_info: mpdu desc info
  615. * @err_status: error status
  616. * @err_code: error code
  617. *
  618. * Handle the error BAR frames received. Ensure the SOC level
  619. * stats are updated based on the REO error code. The BAR frames
  620. * are further processed by updating the Rx tids with the start
  621. * sequence number (SSN) and BA window size. Desc is returned
  622. * to the free desc list
  623. *
  624. * Return: none
  625. */
  626. static void
  627. dp_rx_bar_frame_handle(struct dp_soc *soc,
  628. hal_ring_desc_t ring_desc,
  629. struct dp_rx_desc *rx_desc,
  630. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  631. uint8_t err_status,
  632. uint32_t err_code)
  633. {
  634. qdf_nbuf_t nbuf;
  635. struct dp_pdev *pdev;
  636. struct rx_desc_pool *rx_desc_pool;
  637. uint8_t *rx_tlv_hdr;
  638. uint32_t tid;
  639. nbuf = rx_desc->nbuf;
  640. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  641. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  642. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  643. rx_desc->unmapped = 1;
  644. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  645. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  646. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  647. rx_tlv_hdr);
  648. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  649. if (!pdev) {
  650. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  651. soc, rx_desc->pool_id);
  652. return;
  653. }
  654. _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
  655. err_code);
  656. dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
  657. QDF_TX_RX_STATUS_DROP, true);
  658. dp_rx_link_desc_return(soc, ring_desc,
  659. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  660. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  661. rx_desc->pool_id);
  662. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  663. &pdev->free_list_tail,
  664. rx_desc);
  665. }
  666. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  667. void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  668. uint16_t peer_id, uint8_t tid)
  669. {
  670. struct dp_peer *peer = NULL;
  671. struct dp_rx_tid *rx_tid = NULL;
  672. uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
  673. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  674. if (!peer) {
  675. dp_rx_err_info_rl("%pK: peer not found", soc);
  676. goto free_nbuf;
  677. }
  678. if (tid >= DP_MAX_TIDS) {
  679. dp_info_rl("invalid tid");
  680. goto nbuf_deliver;
  681. }
  682. rx_tid = &peer->rx_tid[tid];
  683. qdf_spin_lock_bh(&rx_tid->tid_lock);
  684. /* only if BA session is active, allow send Delba */
  685. if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  686. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  687. goto nbuf_deliver;
  688. }
  689. if (!rx_tid->delba_tx_status) {
  690. rx_tid->delba_tx_retry++;
  691. rx_tid->delba_tx_status = 1;
  692. rx_tid->delba_rcode =
  693. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  694. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  695. if (soc->cdp_soc.ol_ops->send_delba) {
  696. DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
  697. 1);
  698. soc->cdp_soc.ol_ops->send_delba(
  699. peer->vdev->pdev->soc->ctrl_psoc,
  700. peer->vdev->vdev_id,
  701. peer->mac_addr.raw,
  702. tid,
  703. rx_tid->delba_rcode,
  704. CDP_DELBA_2K_JUMP);
  705. }
  706. } else {
  707. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  708. }
  709. nbuf_deliver:
  710. if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask,
  711. rx_tlv_hdr)) {
  712. DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
  713. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  714. return;
  715. }
  716. free_nbuf:
  717. if (peer)
  718. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  719. DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
  720. dp_rx_nbuf_free(nbuf);
  721. }
  722. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  723. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
  724. bool
  725. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  726. uint8_t pool_id,
  727. uint8_t *rx_tlv_hdr,
  728. qdf_nbuf_t nbuf)
  729. {
  730. struct dp_peer *peer = NULL;
  731. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  732. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  733. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  734. if (!pdev) {
  735. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  736. soc, pool_id);
  737. return false;
  738. }
  739. /*
  740. * WAR- In certain types of packets if peer_id is not correct then
  741. * driver may not be able find. Try finding peer by addr_2 of
  742. * received MPDU
  743. */
  744. if (wh)
  745. peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
  746. DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
  747. if (peer) {
  748. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  749. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  750. QDF_TRACE_LEVEL_DEBUG);
  751. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  752. 1, qdf_nbuf_len(nbuf));
  753. dp_rx_nbuf_free(nbuf);
  754. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  755. return true;
  756. }
  757. return false;
  758. }
  759. #else
  760. bool
  761. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  762. uint8_t pool_id,
  763. uint8_t *rx_tlv_hdr,
  764. qdf_nbuf_t nbuf)
  765. {
  766. return false;
  767. }
  768. #endif
  769. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  770. {
  771. if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
  772. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  773. 1, pkt_len);
  774. return true;
  775. } else {
  776. return false;
  777. }
  778. }
  779. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  780. void
  781. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  782. struct dp_vdev *vdev,
  783. struct dp_txrx_peer *txrx_peer,
  784. qdf_nbuf_t nbuf,
  785. qdf_nbuf_t tail,
  786. bool is_eapol)
  787. {
  788. if (is_eapol && soc->eapol_over_control_port)
  789. dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  790. else
  791. dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  792. }
  793. #else
  794. void
  795. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  796. struct dp_vdev *vdev,
  797. struct dp_txrx_peer *txrx_peer,
  798. qdf_nbuf_t nbuf,
  799. qdf_nbuf_t tail,
  800. bool is_eapol)
  801. {
  802. dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  803. }
  804. #endif
  805. #ifdef WLAN_FEATURE_11BE_MLO
  806. int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
  807. {
  808. return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
  809. QDF_MAC_ADDR_SIZE) == 0) ||
  810. (qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
  811. QDF_MAC_ADDR_SIZE) == 0));
  812. }
  813. #else
  814. int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
  815. {
  816. return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
  817. QDF_MAC_ADDR_SIZE) == 0);
  818. }
  819. #endif
  820. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  821. bool
  822. dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
  823. {
  824. struct dp_soc *soc = vdev->pdev->soc;
  825. if (!vdev->drop_3addr_mcast)
  826. return false;
  827. if (vdev->opmode != wlan_op_mode_sta)
  828. return false;
  829. if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  830. return true;
  831. return false;
  832. }
  833. /**
  834. * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
  835. * for this frame received in REO error ring.
  836. * @soc: Datapath SOC handle
  837. * @error: REO error detected or not
  838. * @error_code: Error code in case of REO error
  839. *
  840. * Return: true if pn check if needed in software,
  841. * false, if pn check if not needed.
  842. */
  843. static inline bool
  844. dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
  845. uint32_t error_code)
  846. {
  847. return (soc->features.pn_in_reo_dest &&
  848. (error == HAL_REO_ERROR_DETECTED &&
  849. (hal_rx_reo_is_2k_jump(error_code) ||
  850. hal_rx_reo_is_oor_error(error_code) ||
  851. hal_rx_reo_is_bar_oor_2k_jump(error_code))));
  852. }
  853. #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
  854. static inline void
  855. dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
  856. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  857. bool first_msdu_in_mpdu_processed)
  858. {
  859. if (first_msdu_in_mpdu_processed) {
  860. /*
  861. * This is the 2nd indication of first_msdu in the same mpdu.
  862. * Skip re-parsing the mdpu_desc_info and use the cached one,
  863. * since this msdu is most probably from the current mpdu
  864. * which is being processed
  865. */
  866. } else {
  867. hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
  868. qdf_nbuf_data(nbuf),
  869. mpdu_desc_info);
  870. }
  871. }
  872. #else
  873. static inline void
  874. dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
  875. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  876. bool first_msdu_in_mpdu_processed)
  877. {
  878. hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
  879. mpdu_desc_info);
  880. }
  881. #endif
  882. /**
  883. * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
  884. *
  885. * @soc: core txrx main context
  886. * @ring_desc: opaque pointer to the REO error ring descriptor
  887. * @mpdu_desc_info: pointer to mpdu level description info
  888. * @link_desc_va: pointer to msdu_link_desc virtual address
  889. * @err_code: reo error code fetched from ring entry
  890. *
  891. * Function to handle msdus fetched from msdu link desc, currently
  892. * support REO error NULL queue, 2K jump, OOR.
  893. *
  894. * Return: msdu count processed
  895. */
  896. static uint32_t
  897. dp_rx_reo_err_entry_process(struct dp_soc *soc,
  898. void *ring_desc,
  899. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  900. void *link_desc_va,
  901. enum hal_reo_error_code err_code)
  902. {
  903. uint32_t rx_bufs_used = 0;
  904. struct dp_pdev *pdev;
  905. int i;
  906. uint8_t *rx_tlv_hdr_first;
  907. uint8_t *rx_tlv_hdr_last;
  908. uint32_t tid = DP_MAX_TIDS;
  909. uint16_t peer_id;
  910. struct dp_rx_desc *rx_desc;
  911. struct rx_desc_pool *rx_desc_pool;
  912. qdf_nbuf_t nbuf;
  913. struct hal_buf_info buf_info;
  914. struct hal_rx_msdu_list msdu_list;
  915. uint16_t num_msdus;
  916. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  917. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  918. /* First field in REO Dst ring Desc is buffer_addr_info */
  919. void *buf_addr_info = ring_desc;
  920. qdf_nbuf_t head_nbuf = NULL;
  921. qdf_nbuf_t tail_nbuf = NULL;
  922. uint16_t msdu_processed = 0;
  923. QDF_STATUS status;
  924. bool ret, is_pn_check_needed;
  925. uint8_t rx_desc_pool_id;
  926. struct dp_txrx_peer *txrx_peer = NULL;
  927. dp_txrx_ref_handle txrx_ref_handle = NULL;
  928. hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  929. bool first_msdu_in_mpdu_processed = false;
  930. bool msdu_dropped = false;
  931. uint8_t link_id = 0;
  932. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  933. mpdu_desc_info->peer_meta_data);
  934. is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
  935. HAL_REO_ERROR_DETECTED,
  936. err_code);
  937. more_msdu_link_desc:
  938. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  939. &num_msdus);
  940. for (i = 0; i < num_msdus; i++) {
  941. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  942. soc,
  943. msdu_list.sw_cookie[i]);
  944. qdf_assert_always(rx_desc);
  945. nbuf = rx_desc->nbuf;
  946. /*
  947. * this is a unlikely scenario where the host is reaping
  948. * a descriptor which it already reaped just a while ago
  949. * but is yet to replenish it back to HW.
  950. * In this case host will dump the last 128 descriptors
  951. * including the software descriptor rx_desc and assert.
  952. */
  953. if (qdf_unlikely(!rx_desc->in_use) ||
  954. qdf_unlikely(!nbuf)) {
  955. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  956. dp_info_rl("Reaping rx_desc not in use!");
  957. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  958. ring_desc, rx_desc);
  959. /* ignore duplicate RX desc and continue to process */
  960. /* Pop out the descriptor */
  961. msdu_dropped = true;
  962. continue;
  963. }
  964. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  965. msdu_list.paddr[i]);
  966. if (!ret) {
  967. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  968. rx_desc->in_err_state = 1;
  969. msdu_dropped = true;
  970. continue;
  971. }
  972. rx_desc_pool_id = rx_desc->pool_id;
  973. /* all buffers from a MSDU link belong to same pdev */
  974. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
  975. rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
  976. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  977. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  978. rx_desc->unmapped = 1;
  979. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  980. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
  981. rx_bufs_used++;
  982. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  983. &pdev->free_list_tail, rx_desc);
  984. DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
  985. if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
  986. HAL_MSDU_F_MSDU_CONTINUATION))
  987. continue;
  988. if (dp_rx_buffer_pool_refill(soc, head_nbuf,
  989. rx_desc_pool_id)) {
  990. /* MSDU queued back to the pool */
  991. msdu_dropped = true;
  992. goto process_next_msdu;
  993. }
  994. if (is_pn_check_needed) {
  995. if (msdu_list.msdu_info[i].msdu_flags &
  996. HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
  997. dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
  998. mpdu_desc_info,
  999. first_msdu_in_mpdu_processed);
  1000. first_msdu_in_mpdu_processed = true;
  1001. } else {
  1002. if (!first_msdu_in_mpdu_processed) {
  1003. /*
  1004. * If no msdu in this mpdu was dropped
  1005. * due to failed sanity checks, then
  1006. * its not expected to hit this
  1007. * condition. Hence we assert here.
  1008. */
  1009. if (!msdu_dropped)
  1010. qdf_assert_always(0);
  1011. /*
  1012. * We do not have valid mpdu_desc_info
  1013. * to process this nbuf, hence drop it.
  1014. */
  1015. dp_rx_nbuf_free(nbuf);
  1016. /* TODO - Increment stats */
  1017. goto process_next_msdu;
  1018. }
  1019. /*
  1020. * DO NOTHING -
  1021. * Continue using the same mpdu_desc_info
  1022. * details populated from the first msdu in
  1023. * the mpdu.
  1024. */
  1025. }
  1026. status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
  1027. if (QDF_IS_STATUS_ERROR(status)) {
  1028. DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
  1029. 1);
  1030. dp_rx_nbuf_free(nbuf);
  1031. goto process_next_msdu;
  1032. }
  1033. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  1034. mpdu_desc_info->peer_meta_data);
  1035. if (mpdu_desc_info->bar_frame)
  1036. _dp_rx_bar_frame_handle(soc, nbuf,
  1037. mpdu_desc_info, tid,
  1038. HAL_REO_ERROR_DETECTED,
  1039. err_code);
  1040. }
  1041. rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
  1042. rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
  1043. if (qdf_unlikely(head_nbuf != tail_nbuf)) {
  1044. nbuf = dp_rx_sg_create(soc, head_nbuf);
  1045. qdf_nbuf_set_is_frag(nbuf, 1);
  1046. DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
  1047. }
  1048. switch (err_code) {
  1049. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1050. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  1051. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1052. /*
  1053. * only first msdu, mpdu start description tlv valid?
  1054. * and use it for following msdu.
  1055. */
  1056. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1057. rx_tlv_hdr_last))
  1058. tid = hal_rx_mpdu_start_tid_get(
  1059. soc->hal_soc,
  1060. rx_tlv_hdr_first);
  1061. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
  1062. peer_id, tid);
  1063. break;
  1064. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  1065. case HAL_REO_ERR_BAR_FRAME_OOR:
  1066. dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
  1067. break;
  1068. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1069. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
  1070. soc, peer_id,
  1071. &txrx_ref_handle,
  1072. DP_MOD_ID_RX_ERR);
  1073. if (!txrx_peer)
  1074. dp_info_rl("txrx_peer is null peer_id %u",
  1075. peer_id);
  1076. soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
  1077. rx_tlv_hdr_last,
  1078. rx_desc_pool_id,
  1079. txrx_peer,
  1080. TRUE,
  1081. link_id);
  1082. if (txrx_peer)
  1083. dp_txrx_peer_unref_delete(txrx_ref_handle,
  1084. DP_MOD_ID_RX_ERR);
  1085. break;
  1086. default:
  1087. dp_err_rl("Non-support error code %d", err_code);
  1088. dp_rx_nbuf_free(nbuf);
  1089. }
  1090. process_next_msdu:
  1091. msdu_processed++;
  1092. head_nbuf = NULL;
  1093. tail_nbuf = NULL;
  1094. }
  1095. /*
  1096. * If the msdu's are spread across multiple link-descriptors,
  1097. * we cannot depend solely on the msdu_count(e.g., if msdu is
  1098. * spread across multiple buffers).Hence, it is
  1099. * necessary to check the next link_descriptor and release
  1100. * all the msdu's that are part of it.
  1101. */
  1102. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  1103. link_desc_va,
  1104. &next_link_desc_addr_info);
  1105. if (hal_rx_is_buf_addr_info_valid(
  1106. &next_link_desc_addr_info)) {
  1107. /* Clear the next link desc info for the current link_desc */
  1108. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  1109. dp_rx_link_desc_return_by_addr(
  1110. soc,
  1111. buf_addr_info,
  1112. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1113. hal_rx_buffer_addr_info_get_paddr(
  1114. &next_link_desc_addr_info,
  1115. &buf_info);
  1116. /* buffer_addr_info is the first element of ring_desc */
  1117. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  1118. (uint32_t *)&next_link_desc_addr_info,
  1119. &buf_info);
  1120. link_desc_va =
  1121. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1122. cur_link_desc_addr_info = next_link_desc_addr_info;
  1123. buf_addr_info = &cur_link_desc_addr_info;
  1124. goto more_msdu_link_desc;
  1125. }
  1126. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  1127. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1128. if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
  1129. DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
  1130. return rx_bufs_used;
  1131. }
  1132. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1133. void
  1134. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1135. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  1136. uint8_t err_code, uint8_t mac_id, uint8_t link_id)
  1137. {
  1138. uint32_t pkt_len, l2_hdr_offset;
  1139. uint16_t msdu_len;
  1140. struct dp_vdev *vdev;
  1141. qdf_ether_header_t *eh;
  1142. bool is_broadcast;
  1143. /*
  1144. * Check if DMA completed -- msdu_done is the last bit
  1145. * to be written
  1146. */
  1147. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1148. dp_err_rl("MSDU DONE failure");
  1149. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1150. QDF_TRACE_LEVEL_INFO);
  1151. qdf_assert(0);
  1152. }
  1153. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  1154. rx_tlv_hdr);
  1155. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  1156. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  1157. if (dp_rx_check_pkt_len(soc, pkt_len)) {
  1158. /* Drop & free packet */
  1159. dp_rx_nbuf_free(nbuf);
  1160. return;
  1161. }
  1162. /* Set length in nbuf */
  1163. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1164. qdf_nbuf_set_next(nbuf, NULL);
  1165. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1166. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  1167. if (!txrx_peer) {
  1168. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
  1169. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1170. qdf_nbuf_len(nbuf));
  1171. /* Trigger invalid peer handler wrapper */
  1172. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  1173. return;
  1174. }
  1175. vdev = txrx_peer->vdev;
  1176. if (!vdev) {
  1177. dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
  1178. vdev);
  1179. /* Drop & free packet */
  1180. dp_rx_nbuf_free(nbuf);
  1181. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1182. return;
  1183. }
  1184. /*
  1185. * Advance the packet start pointer by total size of
  1186. * pre-header TLV's
  1187. */
  1188. dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
  1189. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  1190. uint8_t *pkt_type;
  1191. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  1192. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  1193. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  1194. htons(QDF_LLC_STP)) {
  1195. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  1196. goto process_mesh;
  1197. } else {
  1198. goto process_rx;
  1199. }
  1200. }
  1201. }
  1202. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  1203. goto process_mesh;
  1204. /*
  1205. * WAPI cert AP sends rekey frames as unencrypted.
  1206. * Thus RXDMA will report unencrypted frame error.
  1207. * To pass WAPI cert case, SW needs to pass unencrypted
  1208. * rekey frame to stack.
  1209. */
  1210. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1211. goto process_rx;
  1212. }
  1213. /*
  1214. * In dynamic WEP case rekey frames are not encrypted
  1215. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  1216. * key install is already done
  1217. */
  1218. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  1219. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  1220. goto process_rx;
  1221. process_mesh:
  1222. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  1223. dp_rx_nbuf_free(nbuf);
  1224. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1225. return;
  1226. }
  1227. if (vdev->mesh_vdev) {
  1228. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  1229. == QDF_STATUS_SUCCESS) {
  1230. dp_rx_err_info("%pK: mesh pkt filtered", soc);
  1231. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  1232. dp_rx_nbuf_free(nbuf);
  1233. return;
  1234. }
  1235. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
  1236. }
  1237. process_rx:
  1238. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1239. rx_tlv_hdr) &&
  1240. (vdev->rx_decap_type ==
  1241. htt_cmn_pkt_type_ethernet))) {
  1242. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1243. is_broadcast = (QDF_IS_ADDR_BROADCAST
  1244. (eh->ether_dhost)) ? 1 : 0 ;
  1245. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
  1246. qdf_nbuf_len(nbuf), link_id);
  1247. if (is_broadcast) {
  1248. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
  1249. qdf_nbuf_len(nbuf),
  1250. link_id);
  1251. }
  1252. } else {
  1253. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
  1254. qdf_nbuf_len(nbuf),
  1255. link_id);
  1256. }
  1257. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  1258. dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
  1259. } else {
  1260. /* Update the protocol tag in SKB based on CCE metadata */
  1261. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1262. EXCEPTION_DEST_RING_ID, true, true);
  1263. /* Update the flow tag in SKB based on FSE metadata */
  1264. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  1265. DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
  1266. qdf_nbuf_set_exc_frame(nbuf, 1);
  1267. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
  1268. qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
  1269. }
  1270. return;
  1271. }
  1272. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1273. uint8_t *rx_tlv_hdr,
  1274. struct dp_txrx_peer *txrx_peer)
  1275. {
  1276. struct dp_vdev *vdev = NULL;
  1277. struct dp_pdev *pdev = NULL;
  1278. struct ol_if_ops *tops = NULL;
  1279. uint16_t rx_seq, fragno;
  1280. uint8_t is_raw;
  1281. unsigned int tid;
  1282. QDF_STATUS status;
  1283. struct cdp_rx_mic_err_info mic_failure_info;
  1284. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1285. rx_tlv_hdr))
  1286. return;
  1287. if (!txrx_peer) {
  1288. dp_info_rl("txrx_peer not found");
  1289. goto fail;
  1290. }
  1291. vdev = txrx_peer->vdev;
  1292. if (!vdev) {
  1293. dp_info_rl("VDEV not found");
  1294. goto fail;
  1295. }
  1296. pdev = vdev->pdev;
  1297. if (!pdev) {
  1298. dp_info_rl("PDEV not found");
  1299. goto fail;
  1300. }
  1301. is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
  1302. if (is_raw) {
  1303. fragno = dp_rx_frag_get_mpdu_frag_number(soc,
  1304. qdf_nbuf_data(nbuf));
  1305. /* Can get only last fragment */
  1306. if (fragno) {
  1307. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  1308. qdf_nbuf_data(nbuf));
  1309. rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
  1310. qdf_nbuf_data(nbuf));
  1311. status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
  1312. tid, rx_seq, nbuf);
  1313. dp_info_rl("Frag pkt seq# %d frag# %d consumed "
  1314. "status %d !", rx_seq, fragno, status);
  1315. return;
  1316. }
  1317. }
  1318. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  1319. &mic_failure_info.da_mac_addr.bytes[0])) {
  1320. dp_err_rl("Failed to get da_mac_addr");
  1321. goto fail;
  1322. }
  1323. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  1324. &mic_failure_info.ta_mac_addr.bytes[0])) {
  1325. dp_err_rl("Failed to get ta_mac_addr");
  1326. goto fail;
  1327. }
  1328. mic_failure_info.key_id = 0;
  1329. mic_failure_info.multicast =
  1330. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  1331. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  1332. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  1333. mic_failure_info.data = NULL;
  1334. mic_failure_info.vdev_id = vdev->vdev_id;
  1335. tops = pdev->soc->cdp_soc.ol_ops;
  1336. if (tops->rx_mic_error)
  1337. tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
  1338. &mic_failure_info);
  1339. fail:
  1340. dp_rx_nbuf_free(nbuf);
  1341. return;
  1342. }
  1343. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1344. defined(WLAN_MCAST_MLO) && !defined(CONFIG_MLO_SINGLE_DEV)
  1345. static bool dp_rx_igmp_handler(struct dp_soc *soc,
  1346. struct dp_vdev *vdev,
  1347. struct dp_txrx_peer *peer,
  1348. qdf_nbuf_t nbuf,
  1349. uint8_t link_id)
  1350. {
  1351. if (soc->arch_ops.dp_rx_mcast_handler) {
  1352. if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
  1353. nbuf, link_id))
  1354. return true;
  1355. }
  1356. return false;
  1357. }
  1358. #else
  1359. static bool dp_rx_igmp_handler(struct dp_soc *soc,
  1360. struct dp_vdev *vdev,
  1361. struct dp_txrx_peer *peer,
  1362. qdf_nbuf_t nbuf,
  1363. uint8_t link_id)
  1364. {
  1365. return false;
  1366. }
  1367. #endif
  1368. /**
  1369. * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
  1370. * Free any other packet which comes in
  1371. * this path.
  1372. *
  1373. * @soc: core DP main context
  1374. * @nbuf: buffer pointer
  1375. * @txrx_peer: txrx peer handle
  1376. * @rx_tlv_hdr: start of rx tlv header
  1377. * @err_src: rxdma/reo
  1378. * @link_id: link id on which the packet is received
  1379. *
  1380. * This function indicates EAPOL frame received in wbm error ring to stack.
  1381. * Any other frame should be dropped.
  1382. *
  1383. * Return: SUCCESS if delivered to stack
  1384. */
  1385. static void
  1386. dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1387. struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
  1388. enum hal_rx_wbm_error_source err_src,
  1389. uint8_t link_id)
  1390. {
  1391. uint32_t pkt_len;
  1392. uint16_t msdu_len;
  1393. struct dp_vdev *vdev;
  1394. struct hal_rx_msdu_metadata msdu_metadata;
  1395. bool is_eapol;
  1396. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  1397. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  1398. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
  1399. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  1400. if (dp_rx_check_pkt_len(soc, pkt_len))
  1401. goto drop_nbuf;
  1402. /* Set length in nbuf */
  1403. qdf_nbuf_set_pktlen(
  1404. nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
  1405. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  1406. }
  1407. /*
  1408. * Check if DMA completed -- msdu_done is the last bit
  1409. * to be written
  1410. */
  1411. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1412. dp_err_rl("MSDU DONE failure");
  1413. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1414. QDF_TRACE_LEVEL_INFO);
  1415. qdf_assert(0);
  1416. }
  1417. if (!txrx_peer)
  1418. goto drop_nbuf;
  1419. vdev = txrx_peer->vdev;
  1420. if (!vdev) {
  1421. dp_err_rl("Null vdev!");
  1422. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1423. goto drop_nbuf;
  1424. }
  1425. /*
  1426. * Advance the packet start pointer by total size of
  1427. * pre-header TLV's
  1428. */
  1429. if (qdf_nbuf_is_frag(nbuf))
  1430. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1431. else
  1432. qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
  1433. soc->rx_pkt_tlv_size));
  1434. if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
  1435. return;
  1436. dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
  1437. /*
  1438. * Indicate EAPOL frame to stack only when vap mac address
  1439. * matches the destination address.
  1440. */
  1441. is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
  1442. if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1443. qdf_ether_header_t *eh =
  1444. (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1445. if (dp_rx_err_match_dhost(eh, vdev)) {
  1446. DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
  1447. qdf_nbuf_len(nbuf));
  1448. /*
  1449. * Update the protocol tag in SKB based on
  1450. * CCE metadata.
  1451. */
  1452. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1453. EXCEPTION_DEST_RING_ID,
  1454. true, true);
  1455. /* Update the flow tag in SKB based on FSE metadata */
  1456. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1457. true);
  1458. DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
  1459. qdf_nbuf_len(nbuf),
  1460. vdev->pdev->enhanced_stats_en);
  1461. qdf_nbuf_set_exc_frame(nbuf, 1);
  1462. qdf_nbuf_set_next(nbuf, NULL);
  1463. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
  1464. NULL, is_eapol);
  1465. return;
  1466. }
  1467. }
  1468. drop_nbuf:
  1469. DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
  1470. err_src == HAL_RX_WBM_ERR_SRC_REO);
  1471. DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
  1472. err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
  1473. dp_rx_nbuf_free(nbuf);
  1474. }
  1475. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1476. #ifdef DP_RX_DESC_COOKIE_INVALIDATE
  1477. /**
  1478. * dp_rx_link_cookie_check() - Validate link desc cookie
  1479. * @ring_desc: ring descriptor
  1480. *
  1481. * Return: qdf status
  1482. */
  1483. static inline QDF_STATUS
  1484. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1485. {
  1486. if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
  1487. return QDF_STATUS_E_FAILURE;
  1488. return QDF_STATUS_SUCCESS;
  1489. }
  1490. /**
  1491. * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
  1492. * @ring_desc: ring descriptor
  1493. *
  1494. * Return: None
  1495. */
  1496. static inline void
  1497. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1498. {
  1499. HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
  1500. }
  1501. #else
  1502. static inline QDF_STATUS
  1503. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1504. {
  1505. return QDF_STATUS_SUCCESS;
  1506. }
  1507. static inline void
  1508. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1509. {
  1510. }
  1511. #endif
  1512. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  1513. /**
  1514. * dp_rx_err_ring_record_entry() - Record rx err ring history
  1515. * @soc: Datapath soc structure
  1516. * @paddr: paddr of the buffer in RX err ring
  1517. * @sw_cookie: SW cookie of the buffer in RX err ring
  1518. * @rbm: Return buffer manager of the buffer in RX err ring
  1519. *
  1520. * Return: None
  1521. */
  1522. static inline void
  1523. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1524. uint32_t sw_cookie, uint8_t rbm)
  1525. {
  1526. struct dp_buf_info_record *record;
  1527. uint32_t idx;
  1528. if (qdf_unlikely(!soc->rx_err_ring_history))
  1529. return;
  1530. idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
  1531. DP_RX_ERR_HIST_MAX);
  1532. /* No NULL check needed for record since its an array */
  1533. record = &soc->rx_err_ring_history->entry[idx];
  1534. record->timestamp = qdf_get_log_timestamp();
  1535. record->hbi.paddr = paddr;
  1536. record->hbi.sw_cookie = sw_cookie;
  1537. record->hbi.rbm = rbm;
  1538. }
  1539. #else
  1540. static inline void
  1541. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1542. uint32_t sw_cookie, uint8_t rbm)
  1543. {
  1544. }
  1545. #endif
  1546. #ifdef HANDLE_RX_REROUTE_ERR
  1547. static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
  1548. hal_ring_desc_t ring_desc)
  1549. {
  1550. int lmac_id = DP_INVALID_LMAC_ID;
  1551. struct dp_rx_desc *rx_desc;
  1552. struct hal_buf_info hbi;
  1553. struct dp_pdev *pdev;
  1554. struct rx_desc_pool *rx_desc_pool;
  1555. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1556. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
  1557. /* sanity */
  1558. if (!rx_desc) {
  1559. DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
  1560. goto assert_return;
  1561. }
  1562. if (!rx_desc->nbuf)
  1563. goto assert_return;
  1564. dp_rx_err_ring_record_entry(soc, hbi.paddr,
  1565. hbi.sw_cookie,
  1566. hal_rx_ret_buf_manager_get(soc->hal_soc,
  1567. ring_desc));
  1568. if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
  1569. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1570. rx_desc->in_err_state = 1;
  1571. goto assert_return;
  1572. }
  1573. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  1574. /* After this point the rx_desc and nbuf are valid */
  1575. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  1576. qdf_assert_always(!rx_desc->unmapped);
  1577. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
  1578. rx_desc->unmapped = 1;
  1579. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  1580. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  1581. rx_desc->pool_id);
  1582. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  1583. lmac_id = rx_desc->pool_id;
  1584. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  1585. &pdev->free_list_tail,
  1586. rx_desc);
  1587. return lmac_id;
  1588. assert_return:
  1589. qdf_assert(0);
  1590. return lmac_id;
  1591. }
  1592. static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
  1593. {
  1594. int ret;
  1595. uint64_t cur_time_stamp;
  1596. DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
  1597. /* Recover if overall error count exceeds threshold */
  1598. if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
  1599. DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
  1600. dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
  1601. soc->stats.rx.err.reo_err_msdu_buf_rcved,
  1602. soc->rx_route_err_start_pkt_ts);
  1603. qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
  1604. }
  1605. cur_time_stamp = qdf_get_log_timestamp_usecs();
  1606. if (!soc->rx_route_err_start_pkt_ts)
  1607. soc->rx_route_err_start_pkt_ts = cur_time_stamp;
  1608. /* Recover if threshold number of packets received in threshold time */
  1609. if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
  1610. DP_RX_ERR_ROUTE_TIMEOUT_US) {
  1611. soc->rx_route_err_start_pkt_ts = cur_time_stamp;
  1612. if (soc->rx_route_err_in_window >
  1613. DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
  1614. qdf_trigger_self_recovery(NULL,
  1615. QDF_RX_REG_PKT_ROUTE_ERR);
  1616. dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
  1617. soc->stats.rx.err.reo_err_msdu_buf_rcved,
  1618. soc->rx_route_err_start_pkt_ts);
  1619. } else {
  1620. soc->rx_route_err_in_window = 1;
  1621. }
  1622. } else {
  1623. soc->rx_route_err_in_window++;
  1624. }
  1625. ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
  1626. return ret;
  1627. }
  1628. #else /* HANDLE_RX_REROUTE_ERR */
  1629. static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
  1630. {
  1631. qdf_assert_always(0);
  1632. return DP_INVALID_LMAC_ID;
  1633. }
  1634. #endif /* HANDLE_RX_REROUTE_ERR */
  1635. #ifdef WLAN_MLO_MULTI_CHIP
  1636. /**
  1637. * dp_idle_link_bm_id_check() - war for HW issue
  1638. *
  1639. * @soc: DP SOC handle
  1640. * @rbm: idle link RBM value
  1641. * @ring_desc: reo error link descriptor
  1642. *
  1643. * This is a war for HW issue where link descriptor
  1644. * of partner soc received due to packets wrongly
  1645. * interpreted as fragments
  1646. *
  1647. * Return: true in case link desc is consumed
  1648. * false in other cases
  1649. */
  1650. static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
  1651. void *ring_desc)
  1652. {
  1653. struct dp_soc *replenish_soc = NULL;
  1654. /* return ok incase of link desc of same soc */
  1655. if (rbm == soc->idle_link_bm_id)
  1656. return false;
  1657. if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
  1658. replenish_soc =
  1659. soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
  1660. qdf_assert_always(replenish_soc);
  1661. /*
  1662. * For WIN usecase we should only get fragment packets in
  1663. * this ring as for MLO case fragmentation is not supported
  1664. * we should not see links from other soc.
  1665. *
  1666. * Drop all packets from partner soc and replenish the descriptors
  1667. */
  1668. dp_handle_wbm_internal_error(replenish_soc, ring_desc,
  1669. HAL_WBM_RELEASE_RING_2_DESC_TYPE);
  1670. return true;
  1671. }
  1672. #else
  1673. static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
  1674. void *ring_desc)
  1675. {
  1676. return false;
  1677. }
  1678. #endif
  1679. uint32_t
  1680. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1681. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1682. {
  1683. hal_ring_desc_t ring_desc;
  1684. hal_soc_handle_t hal_soc;
  1685. uint32_t count = 0;
  1686. uint32_t rx_bufs_used = 0;
  1687. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1688. uint8_t mac_id = 0;
  1689. uint8_t buf_type;
  1690. uint8_t err_status;
  1691. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1692. struct hal_buf_info hbi;
  1693. struct dp_pdev *dp_pdev;
  1694. struct dp_srng *dp_rxdma_srng;
  1695. struct rx_desc_pool *rx_desc_pool;
  1696. void *link_desc_va;
  1697. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1698. uint16_t num_msdus;
  1699. struct dp_rx_desc *rx_desc = NULL;
  1700. QDF_STATUS status;
  1701. bool ret;
  1702. uint32_t error_code = 0;
  1703. bool sw_pn_check_needed;
  1704. int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
  1705. int i, rx_bufs_reaped_total;
  1706. /* Debug -- Remove later */
  1707. qdf_assert(soc && hal_ring_hdl);
  1708. hal_soc = soc->hal_soc;
  1709. /* Debug -- Remove later */
  1710. qdf_assert(hal_soc);
  1711. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1712. /* TODO */
  1713. /*
  1714. * Need API to convert from hal_ring pointer to
  1715. * Ring Type / Ring Id combo
  1716. */
  1717. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1718. dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
  1719. hal_ring_hdl);
  1720. goto done;
  1721. }
  1722. while (qdf_likely(quota-- && (ring_desc =
  1723. hal_srng_dst_peek(hal_soc,
  1724. hal_ring_hdl)))) {
  1725. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1726. err_status = hal_rx_err_status_get(hal_soc, ring_desc);
  1727. buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
  1728. if (err_status == HAL_REO_ERROR_DETECTED)
  1729. error_code = hal_rx_get_reo_error_code(hal_soc,
  1730. ring_desc);
  1731. qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
  1732. sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
  1733. err_status,
  1734. error_code);
  1735. if (!sw_pn_check_needed) {
  1736. /*
  1737. * MPDU desc info will be present in the REO desc
  1738. * only in the below scenarios
  1739. * 1) pn_in_dest_disabled: always
  1740. * 2) pn_in_dest enabled: All cases except 2k-jup
  1741. * and OOR errors
  1742. */
  1743. hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
  1744. &mpdu_desc_info);
  1745. }
  1746. if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
  1747. goto next_entry;
  1748. /*
  1749. * For REO error ring, only MSDU LINK DESC is expected.
  1750. * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
  1751. */
  1752. if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
  1753. int lmac_id;
  1754. lmac_id = dp_rx_err_exception(soc, ring_desc);
  1755. if (lmac_id >= 0)
  1756. rx_bufs_reaped[lmac_id] += 1;
  1757. goto next_entry;
  1758. }
  1759. hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
  1760. &hbi);
  1761. /*
  1762. * check for the magic number in the sw cookie
  1763. */
  1764. qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
  1765. soc->link_desc_id_start);
  1766. if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
  1767. DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
  1768. goto next_entry;
  1769. }
  1770. status = dp_rx_link_cookie_check(ring_desc);
  1771. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  1772. DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
  1773. break;
  1774. }
  1775. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1776. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1777. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1778. &num_msdus);
  1779. if (!num_msdus ||
  1780. !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
  1781. dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
  1782. num_msdus, msdu_list.sw_cookie[0]);
  1783. dp_rx_link_desc_return(soc, ring_desc,
  1784. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1785. goto next_entry;
  1786. }
  1787. dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
  1788. msdu_list.sw_cookie[0],
  1789. msdu_list.rbm[0]);
  1790. // TODO - BE- Check if the RBM is to be checked for all chips
  1791. if (qdf_unlikely((msdu_list.rbm[0] !=
  1792. dp_rx_get_rx_bm_id(soc)) &&
  1793. (msdu_list.rbm[0] !=
  1794. soc->idle_link_bm_id) &&
  1795. (msdu_list.rbm[0] !=
  1796. dp_rx_get_defrag_bm_id(soc)))) {
  1797. /* TODO */
  1798. /* Call appropriate handler */
  1799. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1800. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1801. dp_rx_err_err("%pK: Invalid RBM %d",
  1802. soc, msdu_list.rbm[0]);
  1803. }
  1804. /* Return link descriptor through WBM ring (SW2WBM)*/
  1805. dp_rx_link_desc_return(soc, ring_desc,
  1806. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1807. goto next_entry;
  1808. }
  1809. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  1810. soc,
  1811. msdu_list.sw_cookie[0]);
  1812. qdf_assert_always(rx_desc);
  1813. mac_id = rx_desc->pool_id;
  1814. if (sw_pn_check_needed) {
  1815. goto process_reo_error_code;
  1816. }
  1817. if (mpdu_desc_info.bar_frame) {
  1818. qdf_assert_always(mpdu_desc_info.msdu_count == 1);
  1819. dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
  1820. &mpdu_desc_info, err_status,
  1821. error_code);
  1822. rx_bufs_reaped[mac_id] += 1;
  1823. goto next_entry;
  1824. }
  1825. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1826. /*
  1827. * We only handle one msdu per link desc for fragmented
  1828. * case. We drop the msdus and release the link desc
  1829. * back if there are more than one msdu in link desc.
  1830. */
  1831. if (qdf_unlikely(num_msdus > 1)) {
  1832. count = dp_rx_msdus_drop(soc, ring_desc,
  1833. &mpdu_desc_info,
  1834. &mac_id, quota);
  1835. rx_bufs_reaped[mac_id] += count;
  1836. goto next_entry;
  1837. }
  1838. /*
  1839. * this is a unlikely scenario where the host is reaping
  1840. * a descriptor which it already reaped just a while ago
  1841. * but is yet to replenish it back to HW.
  1842. * In this case host will dump the last 128 descriptors
  1843. * including the software descriptor rx_desc and assert.
  1844. */
  1845. if (qdf_unlikely(!rx_desc->in_use)) {
  1846. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  1847. dp_info_rl("Reaping rx_desc not in use!");
  1848. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1849. ring_desc, rx_desc);
  1850. /* ignore duplicate RX desc and continue */
  1851. /* Pop out the descriptor */
  1852. goto next_entry;
  1853. }
  1854. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  1855. msdu_list.paddr[0]);
  1856. if (!ret) {
  1857. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1858. rx_desc->in_err_state = 1;
  1859. goto next_entry;
  1860. }
  1861. count = dp_rx_frag_handle(soc,
  1862. ring_desc, &mpdu_desc_info,
  1863. rx_desc, &mac_id, quota);
  1864. rx_bufs_reaped[mac_id] += count;
  1865. DP_STATS_INC(soc, rx.rx_frags, 1);
  1866. goto next_entry;
  1867. }
  1868. process_reo_error_code:
  1869. /*
  1870. * Expect REO errors to be handled after this point
  1871. */
  1872. qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
  1873. dp_info_rl("Got pkt with REO ERROR: %d", error_code);
  1874. switch (error_code) {
  1875. case HAL_REO_ERR_PN_CHECK_FAILED:
  1876. case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
  1877. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1878. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1879. if (dp_pdev)
  1880. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1881. count = dp_rx_pn_error_handle(soc,
  1882. ring_desc,
  1883. &mpdu_desc_info, &mac_id,
  1884. quota);
  1885. rx_bufs_reaped[mac_id] += count;
  1886. break;
  1887. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1888. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  1889. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1890. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  1891. case HAL_REO_ERR_BAR_FRAME_OOR:
  1892. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1893. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1894. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1895. if (dp_pdev)
  1896. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1897. count = dp_rx_reo_err_entry_process(
  1898. soc,
  1899. ring_desc,
  1900. &mpdu_desc_info,
  1901. link_desc_va,
  1902. error_code);
  1903. rx_bufs_reaped[mac_id] += count;
  1904. break;
  1905. case HAL_REO_ERR_QUEUE_DESC_INVALID:
  1906. case HAL_REO_ERR_AMPDU_IN_NON_BA:
  1907. case HAL_REO_ERR_NON_BA_DUPLICATE:
  1908. case HAL_REO_ERR_BA_DUPLICATE:
  1909. case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
  1910. case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
  1911. case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
  1912. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1913. count = dp_rx_msdus_drop(soc, ring_desc,
  1914. &mpdu_desc_info,
  1915. &mac_id, quota);
  1916. rx_bufs_reaped[mac_id] += count;
  1917. break;
  1918. default:
  1919. /* Assert if unexpected error type */
  1920. qdf_assert_always(0);
  1921. }
  1922. next_entry:
  1923. dp_rx_link_cookie_invalidate(ring_desc);
  1924. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  1925. rx_bufs_reaped_total = 0;
  1926. for (i = 0; i < MAX_PDEV_CNT; i++)
  1927. rx_bufs_reaped_total += rx_bufs_reaped[i];
  1928. if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
  1929. max_reap_limit))
  1930. break;
  1931. }
  1932. done:
  1933. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1934. if (soc->rx.flags.defrag_timeout_check) {
  1935. uint32_t now_ms =
  1936. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1937. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1938. dp_rx_defrag_waitlist_flush(soc);
  1939. }
  1940. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1941. if (rx_bufs_reaped[mac_id]) {
  1942. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1943. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1944. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1945. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1946. rx_desc_pool,
  1947. rx_bufs_reaped[mac_id],
  1948. &dp_pdev->free_list_head,
  1949. &dp_pdev->free_list_tail,
  1950. false);
  1951. rx_bufs_used += rx_bufs_reaped[mac_id];
  1952. }
  1953. }
  1954. return rx_bufs_used; /* Assume no scale factor for now */
  1955. }
  1956. #ifdef DROP_RXDMA_DECRYPT_ERR
  1957. /**
  1958. * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
  1959. *
  1960. * Return: true if rxdma decrypt err frames are handled and false otherwise
  1961. */
  1962. static inline bool dp_handle_rxdma_decrypt_err(void)
  1963. {
  1964. return false;
  1965. }
  1966. #else
  1967. static inline bool dp_handle_rxdma_decrypt_err(void)
  1968. {
  1969. return true;
  1970. }
  1971. #endif
  1972. void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
  1973. {
  1974. if (soc->wbm_sg_last_msdu_war) {
  1975. uint32_t len;
  1976. qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
  1977. len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
  1978. qdf_nbuf_data(temp));
  1979. temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
  1980. while (temp) {
  1981. QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
  1982. temp = temp->next;
  1983. }
  1984. }
  1985. }
  1986. #ifdef RX_DESC_DEBUG_CHECK
  1987. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  1988. hal_ring_handle_t hal_ring_hdl,
  1989. hal_ring_desc_t ring_desc,
  1990. struct dp_rx_desc *rx_desc)
  1991. {
  1992. struct hal_buf_info hbi;
  1993. hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1994. /* Sanity check for possible buffer paddr corruption */
  1995. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  1996. return QDF_STATUS_SUCCESS;
  1997. hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
  1998. return QDF_STATUS_E_FAILURE;
  1999. }
  2000. #else
  2001. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  2002. hal_ring_handle_t hal_ring_hdl,
  2003. hal_ring_desc_t ring_desc,
  2004. struct dp_rx_desc *rx_desc)
  2005. {
  2006. return QDF_STATUS_SUCCESS;
  2007. }
  2008. #endif
  2009. bool
  2010. dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
  2011. {
  2012. /*
  2013. * Currently Null Queue and Unencrypted error handlers has support for
  2014. * SG. Other error handler do not deal with SG buffer.
  2015. */
  2016. if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
  2017. (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
  2018. ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
  2019. (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
  2020. return true;
  2021. return false;
  2022. }
  2023. #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
  2024. void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
  2025. qdf_nbuf_t nbuf)
  2026. {
  2027. /*
  2028. * In case of fast recycle TX driver can avoid invalidate
  2029. * of buffer in case of SFE forward. We need to invalidate
  2030. * the TLV headers after writing to this location
  2031. */
  2032. qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
  2033. (void *)(nbuf->data +
  2034. soc->rx_pkt_tlv_size +
  2035. L3_HEADER_PAD));
  2036. }
  2037. #else
  2038. void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
  2039. qdf_nbuf_t nbuf)
  2040. {
  2041. }
  2042. #endif
  2043. #ifndef CONFIG_NBUF_AP_PLATFORM
  2044. static inline uint16_t
  2045. dp_rx_get_peer_id(struct dp_soc *soc,
  2046. uint8_t *rx_tlv_hdr,
  2047. qdf_nbuf_t nbuf)
  2048. {
  2049. uint32_t peer_mdata = 0;
  2050. peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
  2051. rx_tlv_hdr);
  2052. return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
  2053. }
  2054. static inline void
  2055. dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
  2056. qdf_nbuf_t nbuf,
  2057. uint8_t *rx_tlv_hdr,
  2058. union hal_wbm_err_info_u *wbm_err)
  2059. {
  2060. hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
  2061. (uint8_t *)&wbm_err->info,
  2062. sizeof(wbm_err));
  2063. }
  2064. void
  2065. dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
  2066. qdf_nbuf_t nbuf,
  2067. union hal_wbm_err_info_u wbm_err)
  2068. {
  2069. hal_rx_priv_info_set_in_tlv(soc->hal_soc,
  2070. qdf_nbuf_data(nbuf),
  2071. (uint8_t *)&wbm_err.info,
  2072. sizeof(wbm_err));
  2073. }
  2074. #else
  2075. static inline uint16_t
  2076. dp_rx_get_peer_id(struct dp_soc *soc,
  2077. uint8_t *rx_tlv_hdr,
  2078. qdf_nbuf_t nbuf)
  2079. {
  2080. return QDF_NBUF_CB_RX_PEER_ID(nbuf);
  2081. }
  2082. static inline void
  2083. dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
  2084. qdf_nbuf_t nbuf,
  2085. uint8_t *rx_tlv_hdr,
  2086. union hal_wbm_err_info_u *wbm_err)
  2087. {
  2088. wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf);
  2089. }
  2090. void
  2091. dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
  2092. qdf_nbuf_t nbuf,
  2093. union hal_wbm_err_info_u wbm_err)
  2094. {
  2095. QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info;
  2096. }
  2097. #endif /* CONFIG_NBUF_AP_PLATFORM */
  2098. uint32_t
  2099. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2100. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  2101. {
  2102. hal_soc_handle_t hal_soc;
  2103. uint32_t rx_bufs_used = 0;
  2104. struct dp_pdev *dp_pdev;
  2105. uint8_t *rx_tlv_hdr;
  2106. bool is_tkip_mic_err;
  2107. qdf_nbuf_t nbuf_head = NULL;
  2108. qdf_nbuf_t nbuf, next;
  2109. union hal_wbm_err_info_u wbm_err = { 0 };
  2110. uint8_t pool_id;
  2111. uint8_t tid = 0;
  2112. uint8_t link_id = 0;
  2113. /* Debug -- Remove later */
  2114. qdf_assert(soc && hal_ring_hdl);
  2115. hal_soc = soc->hal_soc;
  2116. /* Debug -- Remove later */
  2117. qdf_assert(hal_soc);
  2118. nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
  2119. hal_ring_hdl,
  2120. quota,
  2121. &rx_bufs_used);
  2122. nbuf = nbuf_head;
  2123. while (nbuf) {
  2124. struct dp_txrx_peer *txrx_peer;
  2125. struct dp_peer *peer;
  2126. uint16_t peer_id;
  2127. uint8_t err_code;
  2128. uint8_t *tlv_hdr;
  2129. dp_txrx_ref_handle txrx_ref_handle = NULL;
  2130. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2131. /*
  2132. * retrieve the wbm desc info from nbuf CB/TLV, so we can
  2133. * handle error cases appropriately
  2134. */
  2135. dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf,
  2136. rx_tlv_hdr,
  2137. &wbm_err);
  2138. peer_id = dp_rx_get_peer_id(soc,
  2139. rx_tlv_hdr,
  2140. nbuf);
  2141. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  2142. &txrx_ref_handle,
  2143. DP_MOD_ID_RX_ERR);
  2144. if (!txrx_peer)
  2145. dp_info_rl("peer is null peer_id %u err_src %u, "
  2146. "REO: push_rsn %u err_code %u, "
  2147. "RXDMA: push_rsn %u err_code %u",
  2148. peer_id, wbm_err.info_bit.wbm_err_src,
  2149. wbm_err.info_bit.reo_psh_rsn,
  2150. wbm_err.info_bit.reo_err_code,
  2151. wbm_err.info_bit.rxdma_psh_rsn,
  2152. wbm_err.info_bit.rxdma_err_code);
  2153. /* Set queue_mapping in nbuf to 0 */
  2154. dp_set_rx_queue(nbuf, 0);
  2155. next = nbuf->next;
  2156. /*
  2157. * Form the SG for msdu continued buffers
  2158. * QCN9000 has this support
  2159. */
  2160. if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  2161. nbuf = dp_rx_sg_create(soc, nbuf);
  2162. next = nbuf->next;
  2163. /*
  2164. * SG error handling is not done correctly,
  2165. * drop SG frames for now.
  2166. */
  2167. dp_rx_nbuf_free(nbuf);
  2168. dp_info_rl("scattered msdu dropped");
  2169. nbuf = next;
  2170. if (txrx_peer)
  2171. dp_txrx_peer_unref_delete(txrx_ref_handle,
  2172. DP_MOD_ID_RX_ERR);
  2173. continue;
  2174. }
  2175. pool_id = wbm_err.info_bit.pool_id;
  2176. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  2177. if (dp_pdev && dp_pdev->link_peer_stats &&
  2178. txrx_peer && txrx_peer->is_mld_peer) {
  2179. link_id = dp_rx_get_stats_arr_idx_from_link_id(
  2180. nbuf,
  2181. txrx_peer);
  2182. } else {
  2183. link_id = 0;
  2184. }
  2185. if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  2186. if (wbm_err.info_bit.reo_psh_rsn
  2187. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  2188. DP_STATS_INC(soc,
  2189. rx.err.reo_error
  2190. [wbm_err.info_bit.reo_err_code], 1);
  2191. /* increment @pdev level */
  2192. if (dp_pdev)
  2193. DP_STATS_INC(dp_pdev, err.reo_error,
  2194. 1);
  2195. switch (wbm_err.info_bit.reo_err_code) {
  2196. /*
  2197. * Handling for packets which have NULL REO
  2198. * queue descriptor
  2199. */
  2200. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  2201. pool_id = wbm_err.info_bit.pool_id;
  2202. soc->arch_ops.dp_rx_null_q_desc_handle(
  2203. soc, nbuf,
  2204. rx_tlv_hdr,
  2205. pool_id,
  2206. txrx_peer,
  2207. FALSE,
  2208. link_id);
  2209. break;
  2210. /* TODO */
  2211. /* Add per error code accounting */
  2212. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  2213. if (txrx_peer)
  2214. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2215. rx.err.jump_2k_err,
  2216. 1,
  2217. link_id);
  2218. pool_id = wbm_err.info_bit.pool_id;
  2219. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  2220. rx_tlv_hdr)) {
  2221. tid =
  2222. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  2223. }
  2224. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2225. hal_rx_msdu_start_msdu_len_get(
  2226. soc->hal_soc, rx_tlv_hdr);
  2227. nbuf->next = NULL;
  2228. dp_2k_jump_handle(soc, nbuf,
  2229. rx_tlv_hdr,
  2230. peer_id, tid);
  2231. break;
  2232. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  2233. if (txrx_peer)
  2234. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2235. rx.err.oor_err,
  2236. 1,
  2237. link_id);
  2238. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  2239. rx_tlv_hdr)) {
  2240. tid =
  2241. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  2242. }
  2243. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2244. hal_rx_msdu_start_msdu_len_get(
  2245. soc->hal_soc, rx_tlv_hdr);
  2246. nbuf->next = NULL;
  2247. dp_rx_oor_handle(soc, nbuf,
  2248. peer_id,
  2249. rx_tlv_hdr);
  2250. break;
  2251. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  2252. case HAL_REO_ERR_BAR_FRAME_OOR:
  2253. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  2254. if (peer) {
  2255. dp_rx_err_handle_bar(soc, peer,
  2256. nbuf);
  2257. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  2258. }
  2259. dp_rx_nbuf_free(nbuf);
  2260. break;
  2261. case HAL_REO_ERR_PN_CHECK_FAILED:
  2262. case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
  2263. if (txrx_peer)
  2264. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2265. rx.err.pn_err,
  2266. 1,
  2267. link_id);
  2268. dp_rx_nbuf_free(nbuf);
  2269. break;
  2270. default:
  2271. dp_info_rl("Got pkt with REO ERROR: %d",
  2272. wbm_err.info_bit.
  2273. reo_err_code);
  2274. dp_rx_nbuf_free(nbuf);
  2275. }
  2276. } else if (wbm_err.info_bit.reo_psh_rsn
  2277. == HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
  2278. dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
  2279. rx_tlv_hdr,
  2280. HAL_RX_WBM_ERR_SRC_REO,
  2281. link_id);
  2282. } else {
  2283. /* should not enter here */
  2284. dp_rx_err_alert("invalid reo push reason %u",
  2285. wbm_err.info_bit.reo_psh_rsn);
  2286. dp_rx_nbuf_free(nbuf);
  2287. qdf_assert_always(0);
  2288. }
  2289. } else if (wbm_err.info_bit.wbm_err_src ==
  2290. HAL_RX_WBM_ERR_SRC_RXDMA) {
  2291. if (wbm_err.info_bit.rxdma_psh_rsn
  2292. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2293. DP_STATS_INC(soc,
  2294. rx.err.rxdma_error
  2295. [wbm_err.info_bit.rxdma_err_code], 1);
  2296. /* increment @pdev level */
  2297. if (dp_pdev)
  2298. DP_STATS_INC(dp_pdev,
  2299. err.rxdma_error, 1);
  2300. switch (wbm_err.info_bit.rxdma_err_code) {
  2301. case HAL_RXDMA_ERR_UNENCRYPTED:
  2302. case HAL_RXDMA_ERR_WIFI_PARSE:
  2303. if (txrx_peer)
  2304. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2305. rx.err.rxdma_wifi_parse_err,
  2306. 1,
  2307. link_id);
  2308. pool_id = wbm_err.info_bit.pool_id;
  2309. dp_rx_process_rxdma_err(soc, nbuf,
  2310. rx_tlv_hdr,
  2311. txrx_peer,
  2312. wbm_err.
  2313. info_bit.
  2314. rxdma_err_code,
  2315. pool_id,
  2316. link_id);
  2317. break;
  2318. case HAL_RXDMA_ERR_TKIP_MIC:
  2319. dp_rx_process_mic_error(soc, nbuf,
  2320. rx_tlv_hdr,
  2321. txrx_peer);
  2322. if (txrx_peer)
  2323. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2324. rx.err.mic_err,
  2325. 1,
  2326. link_id);
  2327. break;
  2328. case HAL_RXDMA_ERR_DECRYPT:
  2329. /* All the TKIP-MIC failures are treated as Decrypt Errors
  2330. * for QCN9224 Targets
  2331. */
  2332. is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
  2333. if (is_tkip_mic_err && txrx_peer) {
  2334. dp_rx_process_mic_error(soc, nbuf,
  2335. rx_tlv_hdr,
  2336. txrx_peer);
  2337. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2338. rx.err.mic_err,
  2339. 1,
  2340. link_id);
  2341. break;
  2342. }
  2343. if (txrx_peer) {
  2344. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2345. rx.err.decrypt_err,
  2346. 1,
  2347. link_id);
  2348. dp_rx_nbuf_free(nbuf);
  2349. break;
  2350. }
  2351. if (!dp_handle_rxdma_decrypt_err()) {
  2352. dp_rx_nbuf_free(nbuf);
  2353. break;
  2354. }
  2355. pool_id = wbm_err.info_bit.pool_id;
  2356. err_code = wbm_err.info_bit.rxdma_err_code;
  2357. tlv_hdr = rx_tlv_hdr;
  2358. dp_rx_process_rxdma_err(soc, nbuf,
  2359. tlv_hdr, NULL,
  2360. err_code,
  2361. pool_id,
  2362. link_id);
  2363. break;
  2364. case HAL_RXDMA_MULTICAST_ECHO:
  2365. if (txrx_peer)
  2366. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  2367. rx.mec_drop, 1,
  2368. qdf_nbuf_len(nbuf),
  2369. link_id);
  2370. dp_rx_nbuf_free(nbuf);
  2371. break;
  2372. case HAL_RXDMA_UNAUTHORIZED_WDS:
  2373. pool_id = wbm_err.info_bit.pool_id;
  2374. err_code = wbm_err.info_bit.rxdma_err_code;
  2375. tlv_hdr = rx_tlv_hdr;
  2376. dp_rx_process_rxdma_err(soc, nbuf,
  2377. tlv_hdr,
  2378. txrx_peer,
  2379. err_code,
  2380. pool_id,
  2381. link_id);
  2382. break;
  2383. default:
  2384. dp_rx_nbuf_free(nbuf);
  2385. dp_err_rl("RXDMA error %d",
  2386. wbm_err.info_bit.rxdma_err_code);
  2387. }
  2388. } else if (wbm_err.info_bit.rxdma_psh_rsn
  2389. == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
  2390. dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
  2391. rx_tlv_hdr,
  2392. HAL_RX_WBM_ERR_SRC_RXDMA,
  2393. link_id);
  2394. } else if (wbm_err.info_bit.rxdma_psh_rsn
  2395. == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
  2396. dp_rx_err_err("rxdma push reason %u",
  2397. wbm_err.info_bit.rxdma_psh_rsn);
  2398. DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
  2399. dp_rx_nbuf_free(nbuf);
  2400. } else {
  2401. /* should not enter here */
  2402. dp_rx_err_alert("invalid rxdma push reason %u",
  2403. wbm_err.info_bit.rxdma_psh_rsn);
  2404. dp_rx_nbuf_free(nbuf);
  2405. qdf_assert_always(0);
  2406. }
  2407. } else {
  2408. /* Should not come here */
  2409. qdf_assert(0);
  2410. }
  2411. if (txrx_peer)
  2412. dp_txrx_peer_unref_delete(txrx_ref_handle,
  2413. DP_MOD_ID_RX_ERR);
  2414. nbuf = next;
  2415. }
  2416. return rx_bufs_used; /* Assume no scale factor for now */
  2417. }
  2418. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2419. /**
  2420. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  2421. *
  2422. * @soc: core DP main context
  2423. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2424. * @rx_desc: void pointer to rx descriptor
  2425. *
  2426. * Return: void
  2427. */
  2428. static void dup_desc_dbg(struct dp_soc *soc,
  2429. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2430. void *rx_desc)
  2431. {
  2432. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  2433. dp_rx_dump_info_and_assert(
  2434. soc,
  2435. soc->rx_rel_ring.hal_srng,
  2436. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  2437. rx_desc);
  2438. }
  2439. /**
  2440. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  2441. *
  2442. * @soc: core DP main context
  2443. * @mac_id: mac id which is one of 3 mac_ids
  2444. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2445. * @head: head of descs list to be freed
  2446. * @tail: tail of decs list to be freed
  2447. *
  2448. * Return: number of msdu in MPDU to be popped
  2449. */
  2450. static inline uint32_t
  2451. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2452. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2453. union dp_rx_desc_list_elem_t **head,
  2454. union dp_rx_desc_list_elem_t **tail)
  2455. {
  2456. void *rx_msdu_link_desc;
  2457. qdf_nbuf_t msdu;
  2458. qdf_nbuf_t last;
  2459. struct hal_rx_msdu_list msdu_list;
  2460. uint16_t num_msdus;
  2461. struct hal_buf_info buf_info;
  2462. uint32_t rx_bufs_used = 0;
  2463. uint32_t msdu_cnt;
  2464. uint32_t i;
  2465. uint8_t push_reason;
  2466. uint8_t rxdma_error_code = 0;
  2467. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  2468. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2469. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2470. hal_rxdma_desc_t ring_desc;
  2471. struct rx_desc_pool *rx_desc_pool;
  2472. if (!pdev) {
  2473. dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
  2474. soc, mac_id);
  2475. return rx_bufs_used;
  2476. }
  2477. msdu = 0;
  2478. last = NULL;
  2479. hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
  2480. &buf_info, &msdu_cnt);
  2481. push_reason =
  2482. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  2483. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2484. rxdma_error_code =
  2485. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  2486. }
  2487. do {
  2488. rx_msdu_link_desc =
  2489. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2490. qdf_assert_always(rx_msdu_link_desc);
  2491. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2492. &msdu_list, &num_msdus);
  2493. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2494. /* if the msdus belongs to NSS offloaded radio &&
  2495. * the rbm is not SW1_BM then return the msdu_link
  2496. * descriptor without freeing the msdus (nbufs). let
  2497. * these buffers be given to NSS completion ring for
  2498. * NSS to free them.
  2499. * else iterate through the msdu link desc list and
  2500. * free each msdu in the list.
  2501. */
  2502. if (msdu_list.rbm[0] !=
  2503. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
  2504. wlan_cfg_get_dp_pdev_nss_enabled(
  2505. pdev->wlan_cfg_ctx))
  2506. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  2507. else {
  2508. for (i = 0; i < num_msdus; i++) {
  2509. struct dp_rx_desc *rx_desc =
  2510. soc->arch_ops.
  2511. dp_rx_desc_cookie_2_va(
  2512. soc,
  2513. msdu_list.sw_cookie[i]);
  2514. qdf_assert_always(rx_desc);
  2515. msdu = rx_desc->nbuf;
  2516. /*
  2517. * this is a unlikely scenario
  2518. * where the host is reaping
  2519. * a descriptor which
  2520. * it already reaped just a while ago
  2521. * but is yet to replenish
  2522. * it back to HW.
  2523. * In this case host will dump
  2524. * the last 128 descriptors
  2525. * including the software descriptor
  2526. * rx_desc and assert.
  2527. */
  2528. ring_desc = rxdma_dst_ring_desc;
  2529. if (qdf_unlikely(!rx_desc->in_use)) {
  2530. dup_desc_dbg(soc,
  2531. ring_desc,
  2532. rx_desc);
  2533. continue;
  2534. }
  2535. if (rx_desc->unmapped == 0) {
  2536. rx_desc_pool =
  2537. &soc->rx_desc_buf[rx_desc->pool_id];
  2538. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2539. dp_rx_nbuf_unmap_pool(soc,
  2540. rx_desc_pool,
  2541. msdu);
  2542. rx_desc->unmapped = 1;
  2543. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2544. }
  2545. dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
  2546. soc, msdu);
  2547. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2548. rx_desc->pool_id);
  2549. rx_bufs_used++;
  2550. dp_rx_add_to_free_desc_list(head,
  2551. tail, rx_desc);
  2552. }
  2553. }
  2554. } else {
  2555. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  2556. }
  2557. /*
  2558. * Store the current link buffer into to the local structure
  2559. * to be used for release purpose.
  2560. */
  2561. hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
  2562. buf_info.paddr, buf_info.sw_cookie,
  2563. buf_info.rbm);
  2564. hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
  2565. &buf_info);
  2566. dp_rx_link_desc_return_by_addr(soc,
  2567. (hal_buff_addrinfo_t)
  2568. rx_link_buf_info,
  2569. bm_action);
  2570. } while (buf_info.paddr);
  2571. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  2572. if (pdev)
  2573. DP_STATS_INC(pdev, err.rxdma_error, 1);
  2574. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  2575. dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
  2576. }
  2577. return rx_bufs_used;
  2578. }
  2579. uint32_t
  2580. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2581. uint32_t mac_id, uint32_t quota)
  2582. {
  2583. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2584. hal_rxdma_desc_t rxdma_dst_ring_desc;
  2585. hal_soc_handle_t hal_soc;
  2586. void *err_dst_srng;
  2587. union dp_rx_desc_list_elem_t *head = NULL;
  2588. union dp_rx_desc_list_elem_t *tail = NULL;
  2589. struct dp_srng *dp_rxdma_srng;
  2590. struct rx_desc_pool *rx_desc_pool;
  2591. uint32_t work_done = 0;
  2592. uint32_t rx_bufs_used = 0;
  2593. if (!pdev)
  2594. return 0;
  2595. err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
  2596. if (!err_dst_srng) {
  2597. dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  2598. soc, err_dst_srng);
  2599. return 0;
  2600. }
  2601. hal_soc = soc->hal_soc;
  2602. qdf_assert(hal_soc);
  2603. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  2604. dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  2605. soc, err_dst_srng);
  2606. return 0;
  2607. }
  2608. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  2609. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  2610. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  2611. rxdma_dst_ring_desc,
  2612. &head, &tail);
  2613. }
  2614. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  2615. if (rx_bufs_used) {
  2616. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2617. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2618. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2619. } else {
  2620. dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
  2621. rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
  2622. }
  2623. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2624. rx_desc_pool, rx_bufs_used, &head, &tail, false);
  2625. work_done += rx_bufs_used;
  2626. }
  2627. return work_done;
  2628. }
  2629. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2630. static inline void
  2631. dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2632. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2633. union dp_rx_desc_list_elem_t **head,
  2634. union dp_rx_desc_list_elem_t **tail,
  2635. uint32_t *rx_bufs_used)
  2636. {
  2637. void *rx_msdu_link_desc;
  2638. qdf_nbuf_t msdu;
  2639. qdf_nbuf_t last;
  2640. struct hal_rx_msdu_list msdu_list;
  2641. uint16_t num_msdus;
  2642. struct hal_buf_info buf_info;
  2643. uint32_t msdu_cnt, i;
  2644. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2645. struct rx_desc_pool *rx_desc_pool;
  2646. struct dp_rx_desc *rx_desc;
  2647. msdu = 0;
  2648. last = NULL;
  2649. hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
  2650. &buf_info, &msdu_cnt);
  2651. do {
  2652. rx_msdu_link_desc =
  2653. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2654. if (!rx_msdu_link_desc) {
  2655. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
  2656. break;
  2657. }
  2658. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2659. &msdu_list, &num_msdus);
  2660. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2661. for (i = 0; i < num_msdus; i++) {
  2662. if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
  2663. dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
  2664. msdu_list.sw_cookie[i]);
  2665. continue;
  2666. }
  2667. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  2668. soc,
  2669. msdu_list.sw_cookie[i]);
  2670. qdf_assert_always(rx_desc);
  2671. rx_desc_pool =
  2672. &soc->rx_desc_buf[rx_desc->pool_id];
  2673. msdu = rx_desc->nbuf;
  2674. /*
  2675. * this is a unlikely scenario where the host is reaping
  2676. * a descriptor which it already reaped just a while ago
  2677. * but is yet to replenish it back to HW.
  2678. */
  2679. if (qdf_unlikely(!rx_desc->in_use) ||
  2680. qdf_unlikely(!msdu)) {
  2681. dp_rx_err_info_rl("Reaping rx_desc not in use!");
  2682. continue;
  2683. }
  2684. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2685. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
  2686. rx_desc->unmapped = 1;
  2687. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2688. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2689. rx_desc->pool_id);
  2690. rx_bufs_used[rx_desc->pool_id]++;
  2691. dp_rx_add_to_free_desc_list(head,
  2692. tail, rx_desc);
  2693. }
  2694. }
  2695. /*
  2696. * Store the current link buffer into to the local structure
  2697. * to be used for release purpose.
  2698. */
  2699. hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
  2700. buf_info.paddr, buf_info.sw_cookie,
  2701. buf_info.rbm);
  2702. hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
  2703. &buf_info);
  2704. dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
  2705. rx_link_buf_info,
  2706. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  2707. } while (buf_info.paddr);
  2708. }
  2709. void
  2710. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  2711. uint32_t buf_type)
  2712. {
  2713. struct hal_buf_info buf_info = {0};
  2714. struct dp_rx_desc *rx_desc = NULL;
  2715. struct rx_desc_pool *rx_desc_pool;
  2716. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
  2717. union dp_rx_desc_list_elem_t *head = NULL;
  2718. union dp_rx_desc_list_elem_t *tail = NULL;
  2719. uint8_t pool_id;
  2720. uint8_t mac_id;
  2721. hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
  2722. if (!buf_info.paddr) {
  2723. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
  2724. return;
  2725. }
  2726. /* buffer_addr_info is the first element of ring_desc */
  2727. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
  2728. &buf_info);
  2729. if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
  2730. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
  2731. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  2732. soc,
  2733. buf_info.sw_cookie);
  2734. if (rx_desc && rx_desc->nbuf) {
  2735. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  2736. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2737. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
  2738. rx_desc->nbuf);
  2739. rx_desc->unmapped = 1;
  2740. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2741. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  2742. rx_desc->pool_id);
  2743. dp_rx_add_to_free_desc_list(&head,
  2744. &tail,
  2745. rx_desc);
  2746. rx_bufs_reaped[rx_desc->pool_id]++;
  2747. }
  2748. } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
  2749. pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
  2750. dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
  2751. &head, &tail, rx_bufs_reaped);
  2752. }
  2753. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  2754. struct rx_desc_pool *rx_desc_pool;
  2755. struct dp_srng *dp_rxdma_srng;
  2756. if (!rx_bufs_reaped[mac_id])
  2757. continue;
  2758. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
  2759. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2760. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2761. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2762. rx_desc_pool,
  2763. rx_bufs_reaped[mac_id],
  2764. &head, &tail, false);
  2765. }
  2766. }
  2767. #endif /* QCA_HOST_MODE_WIFI_DISABLED */