dp_rx_err.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "dp_internal.h"
  25. #include "hal_api.h"
  26. #include "qdf_trace.h"
  27. #include "qdf_nbuf.h"
  28. #include "dp_rx_defrag.h"
  29. #include "dp_ipa.h"
  30. #ifdef WIFI_MONITOR_SUPPORT
  31. #include "dp_htt.h"
  32. #include <dp_mon.h>
  33. #endif
  34. #ifdef FEATURE_WDS
  35. #include "dp_txrx_wds.h"
  36. #endif
  37. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  38. #include "qdf_net_types.h"
  39. #include "dp_rx_buffer_pool.h"
  40. #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
  41. #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
  42. #define dp_rx_err_info(params...) \
  43. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
  44. #define dp_rx_err_info_rl(params...) \
  45. __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
  46. #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
  47. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  48. /* Max regular Rx packet routing error */
  49. #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
  50. #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
  51. #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
  52. #ifdef FEATURE_MEC
  53. bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  54. struct dp_txrx_peer *txrx_peer,
  55. uint8_t *rx_tlv_hdr,
  56. qdf_nbuf_t nbuf)
  57. {
  58. struct dp_vdev *vdev = txrx_peer->vdev;
  59. struct dp_pdev *pdev = vdev->pdev;
  60. struct dp_mec_entry *mecentry = NULL;
  61. struct dp_ast_entry *ase = NULL;
  62. uint16_t sa_idx = 0;
  63. uint8_t *data;
  64. /*
  65. * Multicast Echo Check is required only if vdev is STA and
  66. * received pkt is a multicast/broadcast pkt. otherwise
  67. * skip the MEC check.
  68. */
  69. if (vdev->opmode != wlan_op_mode_sta)
  70. return false;
  71. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  72. return false;
  73. data = qdf_nbuf_data(nbuf);
  74. /*
  75. * if the received pkts src mac addr matches with vdev
  76. * mac address then drop the pkt as it is looped back
  77. */
  78. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  79. vdev->mac_addr.raw,
  80. QDF_MAC_ADDR_SIZE)))
  81. return true;
  82. /*
  83. * In case of qwrap isolation mode, donot drop loopback packets.
  84. * In isolation mode, all packets from the wired stations need to go
  85. * to rootap and loop back to reach the wireless stations and
  86. * vice-versa.
  87. */
  88. if (qdf_unlikely(vdev->isolation_vdev))
  89. return false;
  90. /*
  91. * if the received pkts src mac addr matches with the
  92. * wired PCs MAC addr which is behind the STA or with
  93. * wireless STAs MAC addr which are behind the Repeater,
  94. * then drop the pkt as it is looped back
  95. */
  96. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  97. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  98. if ((sa_idx < 0) ||
  99. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  100. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  101. "invalid sa_idx: %d", sa_idx);
  102. qdf_assert_always(0);
  103. }
  104. qdf_spin_lock_bh(&soc->ast_lock);
  105. ase = soc->ast_table[sa_idx];
  106. /*
  107. * this check was not needed since MEC is not dependent on AST,
  108. * but if we dont have this check SON has some issues in
  109. * dual backhaul scenario. in APS SON mode, client connected
  110. * to RE 2G and sends multicast packets. the RE sends it to CAP
  111. * over 5G backhaul. the CAP loopback it on 2G to RE.
  112. * On receiving in 2G STA vap, we assume that client has roamed
  113. * and kickout the client.
  114. */
  115. if (ase && (ase->peer_id != txrx_peer->peer_id)) {
  116. qdf_spin_unlock_bh(&soc->ast_lock);
  117. goto drop;
  118. }
  119. qdf_spin_unlock_bh(&soc->ast_lock);
  120. }
  121. qdf_spin_lock_bh(&soc->mec_lock);
  122. mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
  123. &data[QDF_MAC_ADDR_SIZE]);
  124. if (!mecentry) {
  125. qdf_spin_unlock_bh(&soc->mec_lock);
  126. return false;
  127. }
  128. qdf_spin_unlock_bh(&soc->mec_lock);
  129. drop:
  130. dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
  131. soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
  132. return true;
  133. }
  134. #endif
  135. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  136. void dp_rx_link_desc_refill_duplicate_check(
  137. struct dp_soc *soc,
  138. struct hal_buf_info *buf_info,
  139. hal_buff_addrinfo_t ring_buf_info)
  140. {
  141. struct hal_buf_info current_link_desc_buf_info = { 0 };
  142. /* do duplicate link desc address check */
  143. hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
  144. &current_link_desc_buf_info);
  145. /*
  146. * TODO - Check if the hal soc api call can be removed
  147. * since the cookie is just used for print.
  148. * buffer_addr_info is the first element of ring_desc
  149. */
  150. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  151. (uint32_t *)ring_buf_info,
  152. &current_link_desc_buf_info);
  153. if (qdf_unlikely(current_link_desc_buf_info.paddr ==
  154. buf_info->paddr)) {
  155. dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
  156. current_link_desc_buf_info.paddr,
  157. current_link_desc_buf_info.sw_cookie);
  158. DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
  159. }
  160. *buf_info = current_link_desc_buf_info;
  161. }
  162. QDF_STATUS
  163. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  164. hal_buff_addrinfo_t link_desc_addr,
  165. uint8_t bm_action)
  166. {
  167. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  168. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  169. hal_soc_handle_t hal_soc = soc->hal_soc;
  170. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  171. void *src_srng_desc;
  172. if (!wbm_rel_srng) {
  173. dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
  174. return status;
  175. }
  176. /* do duplicate link desc address check */
  177. dp_rx_link_desc_refill_duplicate_check(
  178. soc,
  179. &soc->last_op_info.wbm_rel_link_desc,
  180. link_desc_addr);
  181. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  182. /* TODO */
  183. /*
  184. * Need API to convert from hal_ring pointer to
  185. * Ring Type / Ring Id combo
  186. */
  187. dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
  188. soc, wbm_rel_srng);
  189. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  190. goto done;
  191. }
  192. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  193. if (qdf_likely(src_srng_desc)) {
  194. /* Return link descriptor through WBM ring (SW2WBM)*/
  195. hal_rx_msdu_link_desc_set(hal_soc,
  196. src_srng_desc, link_desc_addr, bm_action);
  197. status = QDF_STATUS_SUCCESS;
  198. } else {
  199. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  200. DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
  201. dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
  202. srng->ring_id,
  203. soc->stats.rx.err.hal_ring_access_full_fail);
  204. dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  205. *srng->u.src_ring.hp_addr,
  206. srng->u.src_ring.reap_hp,
  207. *srng->u.src_ring.tp_addr,
  208. srng->u.src_ring.cached_tp);
  209. QDF_BUG(0);
  210. }
  211. done:
  212. hal_srng_access_end(hal_soc, wbm_rel_srng);
  213. return status;
  214. }
  215. qdf_export_symbol(dp_rx_link_desc_return_by_addr);
  216. QDF_STATUS
  217. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  218. uint8_t bm_action)
  219. {
  220. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  221. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  222. }
  223. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  224. /**
  225. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  226. *
  227. * @soc: core txrx main context
  228. * @ring_desc: opaque pointer to the REO error ring descriptor
  229. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  230. * @mac_id: mac ID
  231. * @quota: No. of units (packets) that can be serviced in one shot.
  232. *
  233. * This function is used to drop all MSDU in an MPDU
  234. *
  235. * Return: uint32_t: No. of elements processed
  236. */
  237. static uint32_t
  238. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  239. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  240. uint8_t *mac_id,
  241. uint32_t quota)
  242. {
  243. uint32_t rx_bufs_used = 0;
  244. void *link_desc_va;
  245. struct hal_buf_info buf_info;
  246. struct dp_pdev *pdev;
  247. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  248. int i;
  249. uint8_t *rx_tlv_hdr;
  250. uint32_t tid;
  251. struct rx_desc_pool *rx_desc_pool;
  252. struct dp_rx_desc *rx_desc;
  253. /* First field in REO Dst ring Desc is buffer_addr_info */
  254. void *buf_addr_info = ring_desc;
  255. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  256. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  257. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
  258. /* buffer_addr_info is the first element of ring_desc */
  259. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  260. (uint32_t *)ring_desc,
  261. &buf_info);
  262. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  263. if (!link_desc_va) {
  264. dp_rx_err_debug("link desc va is null, soc %pk", soc);
  265. return rx_bufs_used;
  266. }
  267. more_msdu_link_desc:
  268. /* No UNMAP required -- this is "malloc_consistent" memory */
  269. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  270. &mpdu_desc_info->msdu_count);
  271. for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
  272. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  273. soc, msdu_list.sw_cookie[i]);
  274. qdf_assert_always(rx_desc);
  275. /* all buffers from a MSDU link link belong to same pdev */
  276. *mac_id = rx_desc->pool_id;
  277. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  278. if (!pdev) {
  279. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  280. soc, rx_desc->pool_id);
  281. return rx_bufs_used;
  282. }
  283. if (!dp_rx_desc_check_magic(rx_desc)) {
  284. dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
  285. soc, msdu_list.sw_cookie[i]);
  286. return rx_bufs_used;
  287. }
  288. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  289. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  290. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
  291. rx_desc->unmapped = 1;
  292. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  293. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  294. rx_bufs_used++;
  295. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  296. rx_desc->rx_buf_start);
  297. dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
  298. soc, tid);
  299. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  300. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  301. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  302. dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
  303. rx_desc->nbuf,
  304. QDF_TX_RX_STATUS_DROP, true);
  305. /* Just free the buffers */
  306. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
  307. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  308. &pdev->free_list_tail, rx_desc);
  309. }
  310. /*
  311. * If the msdu's are spread across multiple link-descriptors,
  312. * we cannot depend solely on the msdu_count(e.g., if msdu is
  313. * spread across multiple buffers).Hence, it is
  314. * necessary to check the next link_descriptor and release
  315. * all the msdu's that are part of it.
  316. */
  317. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  318. link_desc_va,
  319. &next_link_desc_addr_info);
  320. if (hal_rx_is_buf_addr_info_valid(
  321. &next_link_desc_addr_info)) {
  322. /* Clear the next link desc info for the current link_desc */
  323. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  324. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  325. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  326. hal_rx_buffer_addr_info_get_paddr(
  327. &next_link_desc_addr_info,
  328. &buf_info);
  329. /* buffer_addr_info is the first element of ring_desc */
  330. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  331. (uint32_t *)&next_link_desc_addr_info,
  332. &buf_info);
  333. cur_link_desc_addr_info = next_link_desc_addr_info;
  334. buf_addr_info = &cur_link_desc_addr_info;
  335. link_desc_va =
  336. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  337. goto more_msdu_link_desc;
  338. }
  339. quota--;
  340. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  341. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  342. return rx_bufs_used;
  343. }
  344. /**
  345. * dp_rx_pn_error_handle() - Handles PN check errors
  346. *
  347. * @soc: core txrx main context
  348. * @ring_desc: opaque pointer to the REO error ring descriptor
  349. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  350. * @mac_id: mac ID
  351. * @quota: No. of units (packets) that can be serviced in one shot.
  352. *
  353. * This function implements PN error handling
  354. * If the peer is configured to ignore the PN check errors
  355. * or if DP feels, that this frame is still OK, the frame can be
  356. * re-injected back to REO to use some of the other features
  357. * of REO e.g. duplicate detection/routing to other cores
  358. *
  359. * Return: uint32_t: No. of elements processed
  360. */
  361. static uint32_t
  362. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  363. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  364. uint8_t *mac_id,
  365. uint32_t quota)
  366. {
  367. uint16_t peer_id;
  368. uint32_t rx_bufs_used = 0;
  369. struct dp_txrx_peer *txrx_peer;
  370. bool peer_pn_policy = false;
  371. dp_txrx_ref_handle txrx_ref_handle = NULL;
  372. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  373. mpdu_desc_info->peer_meta_data);
  374. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  375. &txrx_ref_handle,
  376. DP_MOD_ID_RX_ERR);
  377. if (qdf_likely(txrx_peer)) {
  378. /*
  379. * TODO: Check for peer specific policies & set peer_pn_policy
  380. */
  381. dp_err_rl("discard rx due to PN error for peer %pK",
  382. txrx_peer);
  383. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  384. }
  385. dp_rx_err_err("%pK: Packet received with PN error", soc);
  386. /* No peer PN policy -- definitely drop */
  387. if (!peer_pn_policy)
  388. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  389. mpdu_desc_info,
  390. mac_id, quota);
  391. return rx_bufs_used;
  392. }
  393. #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
  394. /**
  395. * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
  396. * @soc: Datapath soc handler
  397. * @txrx_peer: pointer to DP peer
  398. * @nbuf: pointer to the skb of RX frame
  399. * @frame_mask: the mask for special frame needed
  400. * @rx_tlv_hdr: start of rx tlv header
  401. *
  402. * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
  403. * single nbuf is expected.
  404. *
  405. * return: true - nbuf has been delivered to stack, false - not.
  406. */
  407. static bool
  408. dp_rx_deliver_oor_frame(struct dp_soc *soc,
  409. struct dp_txrx_peer *txrx_peer,
  410. qdf_nbuf_t nbuf, uint32_t frame_mask,
  411. uint8_t *rx_tlv_hdr)
  412. {
  413. uint32_t l2_hdr_offset = 0;
  414. uint16_t msdu_len = 0;
  415. uint32_t skip_len;
  416. l2_hdr_offset =
  417. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  418. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  419. skip_len = l2_hdr_offset;
  420. } else {
  421. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  422. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  423. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  424. }
  425. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  426. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  427. qdf_nbuf_pull_head(nbuf, skip_len);
  428. qdf_nbuf_set_exc_frame(nbuf, 1);
  429. dp_info_rl("OOR frame, mpdu sn 0x%x",
  430. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  431. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
  432. return true;
  433. }
  434. #else
  435. static bool
  436. dp_rx_deliver_oor_frame(struct dp_soc *soc,
  437. struct dp_txrx_peer *txrx_peer,
  438. qdf_nbuf_t nbuf, uint32_t frame_mask,
  439. uint8_t *rx_tlv_hdr)
  440. {
  441. return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
  442. rx_tlv_hdr);
  443. }
  444. #endif
  445. /**
  446. * dp_rx_oor_handle() - Handles the msdu which is OOR error
  447. *
  448. * @soc: core txrx main context
  449. * @nbuf: pointer to msdu skb
  450. * @peer_id: dp peer ID
  451. * @rx_tlv_hdr: start of rx tlv header
  452. *
  453. * This function process the msdu delivered from REO2TCL
  454. * ring with error type OOR
  455. *
  456. * Return: None
  457. */
  458. static void
  459. dp_rx_oor_handle(struct dp_soc *soc,
  460. qdf_nbuf_t nbuf,
  461. uint16_t peer_id,
  462. uint8_t *rx_tlv_hdr)
  463. {
  464. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  465. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  466. struct dp_txrx_peer *txrx_peer = NULL;
  467. dp_txrx_ref_handle txrx_ref_handle = NULL;
  468. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  469. &txrx_ref_handle,
  470. DP_MOD_ID_RX_ERR);
  471. if (!txrx_peer) {
  472. dp_info_rl("peer not found");
  473. goto free_nbuf;
  474. }
  475. if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
  476. rx_tlv_hdr)) {
  477. DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
  478. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  479. return;
  480. }
  481. free_nbuf:
  482. if (txrx_peer)
  483. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  484. DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
  485. dp_rx_nbuf_free(nbuf);
  486. }
  487. /**
  488. * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
  489. * is a monotonous increment of packet number
  490. * from the previous successfully re-ordered
  491. * frame.
  492. * @soc: Datapath SOC handle
  493. * @ring_desc: REO ring descriptor
  494. * @nbuf: Current packet
  495. *
  496. * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
  497. */
  498. static inline QDF_STATUS
  499. dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  500. qdf_nbuf_t nbuf)
  501. {
  502. uint64_t prev_pn, curr_pn[2];
  503. if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
  504. return QDF_STATUS_SUCCESS;
  505. hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
  506. hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
  507. if (curr_pn[0] > prev_pn)
  508. return QDF_STATUS_SUCCESS;
  509. return QDF_STATUS_E_FAILURE;
  510. }
  511. #ifdef WLAN_SKIP_BAR_UPDATE
  512. static
  513. void dp_rx_err_handle_bar(struct dp_soc *soc,
  514. struct dp_peer *peer,
  515. qdf_nbuf_t nbuf)
  516. {
  517. dp_info_rl("BAR update to H.W is skipped");
  518. DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
  519. }
  520. #else
  521. static
  522. void dp_rx_err_handle_bar(struct dp_soc *soc,
  523. struct dp_peer *peer,
  524. qdf_nbuf_t nbuf)
  525. {
  526. uint8_t *rx_tlv_hdr;
  527. unsigned char type, subtype;
  528. uint16_t start_seq_num;
  529. uint32_t tid;
  530. QDF_STATUS status;
  531. struct ieee80211_frame_bar *bar;
  532. /*
  533. * 1. Is this a BAR frame. If not Discard it.
  534. * 2. If it is, get the peer id, tid, ssn
  535. * 2a Do a tid update
  536. */
  537. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  538. bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
  539. type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
  540. subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
  541. if (!(type == IEEE80211_FC0_TYPE_CTL &&
  542. subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
  543. dp_err_rl("Not a BAR frame!");
  544. return;
  545. }
  546. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  547. qdf_assert_always(tid < DP_MAX_TIDS);
  548. start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
  549. dp_info_rl("tid %u window_size %u start_seq_num %u",
  550. tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
  551. status = dp_rx_tid_update_wifi3(peer, tid,
  552. peer->rx_tid[tid].ba_win_size,
  553. start_seq_num,
  554. true);
  555. if (status != QDF_STATUS_SUCCESS) {
  556. dp_err_rl("failed to handle bar frame update rx tid");
  557. DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
  558. } else {
  559. DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
  560. }
  561. }
  562. #endif
  563. /**
  564. * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
  565. * @soc: Datapath SoC handle
  566. * @nbuf: packet being processed
  567. * @mpdu_desc_info: mpdu desc info for the current packet
  568. * @tid: tid on which the packet arrived
  569. * @err_status: Flag to indicate if REO encountered an error while routing this
  570. * frame
  571. * @error_code: REO error code
  572. *
  573. * Return: None
  574. */
  575. static void
  576. _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  577. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  578. uint32_t tid, uint8_t err_status, uint32_t error_code)
  579. {
  580. uint16_t peer_id;
  581. struct dp_peer *peer;
  582. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  583. mpdu_desc_info->peer_meta_data);
  584. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  585. if (!peer)
  586. return;
  587. dp_info_rl("BAR frame: "
  588. " peer_id = %d"
  589. " tid = %u"
  590. " SSN = %d"
  591. " error status = %d",
  592. peer->peer_id,
  593. tid,
  594. mpdu_desc_info->mpdu_seq,
  595. err_status);
  596. if (err_status == HAL_REO_ERROR_DETECTED) {
  597. switch (error_code) {
  598. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  599. case HAL_REO_ERR_BAR_FRAME_OOR:
  600. dp_rx_err_handle_bar(soc, peer, nbuf);
  601. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  602. break;
  603. default:
  604. DP_STATS_INC(soc, rx.bar_frame, 1);
  605. }
  606. }
  607. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  608. }
  609. /**
  610. * dp_rx_bar_frame_handle() - Function to handle err BAR frames
  611. * @soc: core DP main context
  612. * @ring_desc: Hal ring desc
  613. * @rx_desc: dp rx desc
  614. * @mpdu_desc_info: mpdu desc info
  615. * @err_status: error status
  616. * @err_code: error code
  617. *
  618. * Handle the error BAR frames received. Ensure the SOC level
  619. * stats are updated based on the REO error code. The BAR frames
  620. * are further processed by updating the Rx tids with the start
  621. * sequence number (SSN) and BA window size. Desc is returned
  622. * to the free desc list
  623. *
  624. * Return: none
  625. */
  626. static void
  627. dp_rx_bar_frame_handle(struct dp_soc *soc,
  628. hal_ring_desc_t ring_desc,
  629. struct dp_rx_desc *rx_desc,
  630. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  631. uint8_t err_status,
  632. uint32_t err_code)
  633. {
  634. qdf_nbuf_t nbuf;
  635. struct dp_pdev *pdev;
  636. struct rx_desc_pool *rx_desc_pool;
  637. uint8_t *rx_tlv_hdr;
  638. uint32_t tid;
  639. nbuf = rx_desc->nbuf;
  640. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  641. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  642. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  643. rx_desc->unmapped = 1;
  644. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  645. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  646. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  647. rx_tlv_hdr);
  648. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  649. if (!pdev) {
  650. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  651. soc, rx_desc->pool_id);
  652. return;
  653. }
  654. _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
  655. err_code);
  656. dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
  657. QDF_TX_RX_STATUS_DROP, true);
  658. dp_rx_link_desc_return(soc, ring_desc,
  659. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  660. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  661. rx_desc->pool_id);
  662. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  663. &pdev->free_list_tail,
  664. rx_desc);
  665. }
  666. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  667. void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  668. uint16_t peer_id, uint8_t tid)
  669. {
  670. struct dp_peer *peer = NULL;
  671. struct dp_rx_tid *rx_tid = NULL;
  672. uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
  673. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  674. if (!peer) {
  675. dp_rx_err_info_rl("%pK: peer not found", soc);
  676. goto free_nbuf;
  677. }
  678. if (tid >= DP_MAX_TIDS) {
  679. dp_info_rl("invalid tid");
  680. goto nbuf_deliver;
  681. }
  682. rx_tid = &peer->rx_tid[tid];
  683. qdf_spin_lock_bh(&rx_tid->tid_lock);
  684. /* only if BA session is active, allow send Delba */
  685. if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  686. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  687. goto nbuf_deliver;
  688. }
  689. if (!rx_tid->delba_tx_status) {
  690. rx_tid->delba_tx_retry++;
  691. rx_tid->delba_tx_status = 1;
  692. rx_tid->delba_rcode =
  693. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  694. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  695. if (soc->cdp_soc.ol_ops->send_delba) {
  696. DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
  697. 1);
  698. soc->cdp_soc.ol_ops->send_delba(
  699. peer->vdev->pdev->soc->ctrl_psoc,
  700. peer->vdev->vdev_id,
  701. peer->mac_addr.raw,
  702. tid,
  703. rx_tid->delba_rcode,
  704. CDP_DELBA_2K_JUMP);
  705. }
  706. } else {
  707. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  708. }
  709. nbuf_deliver:
  710. if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask,
  711. rx_tlv_hdr)) {
  712. DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
  713. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  714. return;
  715. }
  716. free_nbuf:
  717. if (peer)
  718. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  719. DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
  720. dp_rx_nbuf_free(nbuf);
  721. }
  722. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  723. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
  724. bool
  725. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  726. uint8_t pool_id,
  727. uint8_t *rx_tlv_hdr,
  728. qdf_nbuf_t nbuf)
  729. {
  730. struct dp_peer *peer = NULL;
  731. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  732. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  733. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  734. if (!pdev) {
  735. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  736. soc, pool_id);
  737. return false;
  738. }
  739. /*
  740. * WAR- In certain types of packets if peer_id is not correct then
  741. * driver may not be able find. Try finding peer by addr_2 of
  742. * received MPDU
  743. */
  744. if (wh)
  745. peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
  746. DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
  747. if (peer) {
  748. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  749. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  750. QDF_TRACE_LEVEL_DEBUG);
  751. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  752. 1, qdf_nbuf_len(nbuf));
  753. dp_rx_nbuf_free(nbuf);
  754. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  755. return true;
  756. }
  757. return false;
  758. }
  759. #else
  760. bool
  761. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  762. uint8_t pool_id,
  763. uint8_t *rx_tlv_hdr,
  764. qdf_nbuf_t nbuf)
  765. {
  766. return false;
  767. }
  768. #endif
  769. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  770. {
  771. if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
  772. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  773. 1, pkt_len);
  774. return true;
  775. } else {
  776. return false;
  777. }
  778. }
  779. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  780. void
  781. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  782. struct dp_vdev *vdev,
  783. struct dp_txrx_peer *txrx_peer,
  784. qdf_nbuf_t nbuf,
  785. qdf_nbuf_t tail,
  786. bool is_eapol)
  787. {
  788. if (is_eapol && soc->eapol_over_control_port)
  789. dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  790. else
  791. dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  792. }
  793. #else
  794. void
  795. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  796. struct dp_vdev *vdev,
  797. struct dp_txrx_peer *txrx_peer,
  798. qdf_nbuf_t nbuf,
  799. qdf_nbuf_t tail,
  800. bool is_eapol)
  801. {
  802. dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  803. }
  804. #endif
  805. #ifdef WLAN_FEATURE_11BE_MLO
  806. int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
  807. {
  808. return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
  809. QDF_MAC_ADDR_SIZE) == 0) ||
  810. (qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
  811. QDF_MAC_ADDR_SIZE) == 0));
  812. }
  813. #else
  814. int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
  815. {
  816. return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
  817. QDF_MAC_ADDR_SIZE) == 0);
  818. }
  819. #endif
  820. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  821. bool
  822. dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
  823. {
  824. struct dp_soc *soc = vdev->pdev->soc;
  825. if (!vdev->drop_3addr_mcast)
  826. return false;
  827. if (vdev->opmode != wlan_op_mode_sta)
  828. return false;
  829. if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  830. return true;
  831. return false;
  832. }
  833. /**
  834. * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
  835. * for this frame received in REO error ring.
  836. * @soc: Datapath SOC handle
  837. * @error: REO error detected or not
  838. * @error_code: Error code in case of REO error
  839. *
  840. * Return: true if pn check if needed in software,
  841. * false, if pn check if not needed.
  842. */
  843. static inline bool
  844. dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
  845. uint32_t error_code)
  846. {
  847. return (soc->features.pn_in_reo_dest &&
  848. (error == HAL_REO_ERROR_DETECTED &&
  849. (hal_rx_reo_is_2k_jump(error_code) ||
  850. hal_rx_reo_is_oor_error(error_code) ||
  851. hal_rx_reo_is_bar_oor_2k_jump(error_code))));
  852. }
  853. #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
  854. static inline void
  855. dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
  856. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  857. bool first_msdu_in_mpdu_processed)
  858. {
  859. if (first_msdu_in_mpdu_processed) {
  860. /*
  861. * This is the 2nd indication of first_msdu in the same mpdu.
  862. * Skip re-parsing the mdpu_desc_info and use the cached one,
  863. * since this msdu is most probably from the current mpdu
  864. * which is being processed
  865. */
  866. } else {
  867. hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
  868. qdf_nbuf_data(nbuf),
  869. mpdu_desc_info);
  870. }
  871. }
  872. #else
  873. static inline void
  874. dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
  875. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  876. bool first_msdu_in_mpdu_processed)
  877. {
  878. hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
  879. mpdu_desc_info);
  880. }
  881. #endif
  882. /**
  883. * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
  884. *
  885. * @soc: core txrx main context
  886. * @ring_desc: opaque pointer to the REO error ring descriptor
  887. * @mpdu_desc_info: pointer to mpdu level description info
  888. * @link_desc_va: pointer to msdu_link_desc virtual address
  889. * @err_code: reo error code fetched from ring entry
  890. *
  891. * Function to handle msdus fetched from msdu link desc, currently
  892. * support REO error NULL queue, 2K jump, OOR.
  893. *
  894. * Return: msdu count processed
  895. */
  896. static uint32_t
  897. dp_rx_reo_err_entry_process(struct dp_soc *soc,
  898. void *ring_desc,
  899. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  900. void *link_desc_va,
  901. enum hal_reo_error_code err_code)
  902. {
  903. uint32_t rx_bufs_used = 0;
  904. struct dp_pdev *pdev;
  905. int i;
  906. uint8_t *rx_tlv_hdr_first;
  907. uint8_t *rx_tlv_hdr_last;
  908. uint32_t tid = DP_MAX_TIDS;
  909. uint16_t peer_id;
  910. struct dp_rx_desc *rx_desc;
  911. struct rx_desc_pool *rx_desc_pool;
  912. qdf_nbuf_t nbuf;
  913. struct hal_buf_info buf_info;
  914. struct hal_rx_msdu_list msdu_list;
  915. uint16_t num_msdus;
  916. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  917. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  918. /* First field in REO Dst ring Desc is buffer_addr_info */
  919. void *buf_addr_info = ring_desc;
  920. qdf_nbuf_t head_nbuf = NULL;
  921. qdf_nbuf_t tail_nbuf = NULL;
  922. uint16_t msdu_processed = 0;
  923. QDF_STATUS status;
  924. bool ret, is_pn_check_needed;
  925. uint8_t rx_desc_pool_id;
  926. struct dp_txrx_peer *txrx_peer = NULL;
  927. dp_txrx_ref_handle txrx_ref_handle = NULL;
  928. hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  929. bool first_msdu_in_mpdu_processed = false;
  930. bool msdu_dropped = false;
  931. uint8_t link_id = 0;
  932. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  933. mpdu_desc_info->peer_meta_data);
  934. is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
  935. HAL_REO_ERROR_DETECTED,
  936. err_code);
  937. more_msdu_link_desc:
  938. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  939. &num_msdus);
  940. for (i = 0; i < num_msdus; i++) {
  941. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  942. soc,
  943. msdu_list.sw_cookie[i]);
  944. qdf_assert_always(rx_desc);
  945. nbuf = rx_desc->nbuf;
  946. /*
  947. * this is a unlikely scenario where the host is reaping
  948. * a descriptor which it already reaped just a while ago
  949. * but is yet to replenish it back to HW.
  950. * In this case host will dump the last 128 descriptors
  951. * including the software descriptor rx_desc and assert.
  952. */
  953. if (qdf_unlikely(!rx_desc->in_use) ||
  954. qdf_unlikely(!nbuf)) {
  955. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  956. dp_info_rl("Reaping rx_desc not in use!");
  957. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  958. ring_desc, rx_desc);
  959. /* ignore duplicate RX desc and continue to process */
  960. /* Pop out the descriptor */
  961. msdu_dropped = true;
  962. continue;
  963. }
  964. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  965. msdu_list.paddr[i]);
  966. if (!ret) {
  967. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  968. rx_desc->in_err_state = 1;
  969. msdu_dropped = true;
  970. continue;
  971. }
  972. rx_desc_pool_id = rx_desc->pool_id;
  973. /* all buffers from a MSDU link belong to same pdev */
  974. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
  975. rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
  976. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  977. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  978. rx_desc->unmapped = 1;
  979. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  980. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
  981. rx_bufs_used++;
  982. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  983. &pdev->free_list_tail, rx_desc);
  984. DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
  985. if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
  986. HAL_MSDU_F_MSDU_CONTINUATION)) {
  987. qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
  988. continue;
  989. }
  990. if (dp_rx_buffer_pool_refill(soc, head_nbuf,
  991. rx_desc_pool_id)) {
  992. /* MSDU queued back to the pool */
  993. msdu_dropped = true;
  994. goto process_next_msdu;
  995. }
  996. if (is_pn_check_needed) {
  997. if (msdu_list.msdu_info[i].msdu_flags &
  998. HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
  999. dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
  1000. mpdu_desc_info,
  1001. first_msdu_in_mpdu_processed);
  1002. first_msdu_in_mpdu_processed = true;
  1003. } else {
  1004. if (!first_msdu_in_mpdu_processed) {
  1005. /*
  1006. * If no msdu in this mpdu was dropped
  1007. * due to failed sanity checks, then
  1008. * its not expected to hit this
  1009. * condition. Hence we assert here.
  1010. */
  1011. if (!msdu_dropped)
  1012. qdf_assert_always(0);
  1013. /*
  1014. * We do not have valid mpdu_desc_info
  1015. * to process this nbuf, hence drop it.
  1016. */
  1017. dp_rx_nbuf_free(nbuf);
  1018. /* TODO - Increment stats */
  1019. goto process_next_msdu;
  1020. }
  1021. /*
  1022. * DO NOTHING -
  1023. * Continue using the same mpdu_desc_info
  1024. * details populated from the first msdu in
  1025. * the mpdu.
  1026. */
  1027. }
  1028. status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
  1029. if (QDF_IS_STATUS_ERROR(status)) {
  1030. DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
  1031. 1);
  1032. dp_rx_nbuf_free(nbuf);
  1033. goto process_next_msdu;
  1034. }
  1035. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  1036. mpdu_desc_info->peer_meta_data);
  1037. if (mpdu_desc_info->bar_frame)
  1038. _dp_rx_bar_frame_handle(soc, nbuf,
  1039. mpdu_desc_info, tid,
  1040. HAL_REO_ERROR_DETECTED,
  1041. err_code);
  1042. }
  1043. rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
  1044. rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
  1045. if (qdf_unlikely(head_nbuf != tail_nbuf)) {
  1046. /*
  1047. * For SG case, only the length of last skb is valid
  1048. * as HW only populate the msdu_len for last msdu
  1049. * in rx link descriptor, use the length from
  1050. * last skb to overwrite the head skb for further
  1051. * SG processing.
  1052. */
  1053. QDF_NBUF_CB_RX_PKT_LEN(head_nbuf) =
  1054. QDF_NBUF_CB_RX_PKT_LEN(tail_nbuf);
  1055. nbuf = dp_rx_sg_create(soc, head_nbuf);
  1056. qdf_nbuf_set_is_frag(nbuf, 1);
  1057. DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
  1058. }
  1059. switch (err_code) {
  1060. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1061. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  1062. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1063. /*
  1064. * only first msdu, mpdu start description tlv valid?
  1065. * and use it for following msdu.
  1066. */
  1067. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1068. rx_tlv_hdr_last))
  1069. tid = hal_rx_mpdu_start_tid_get(
  1070. soc->hal_soc,
  1071. rx_tlv_hdr_first);
  1072. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
  1073. peer_id, tid);
  1074. break;
  1075. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  1076. case HAL_REO_ERR_BAR_FRAME_OOR:
  1077. dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
  1078. break;
  1079. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1080. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
  1081. soc, peer_id,
  1082. &txrx_ref_handle,
  1083. DP_MOD_ID_RX_ERR);
  1084. if (!txrx_peer)
  1085. dp_info_rl("txrx_peer is null peer_id %u",
  1086. peer_id);
  1087. soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
  1088. rx_tlv_hdr_last,
  1089. rx_desc_pool_id,
  1090. txrx_peer,
  1091. TRUE,
  1092. link_id);
  1093. if (txrx_peer)
  1094. dp_txrx_peer_unref_delete(txrx_ref_handle,
  1095. DP_MOD_ID_RX_ERR);
  1096. break;
  1097. default:
  1098. dp_err_rl("Non-support error code %d", err_code);
  1099. dp_rx_nbuf_free(nbuf);
  1100. }
  1101. process_next_msdu:
  1102. msdu_processed++;
  1103. head_nbuf = NULL;
  1104. tail_nbuf = NULL;
  1105. }
  1106. /*
  1107. * If the msdu's are spread across multiple link-descriptors,
  1108. * we cannot depend solely on the msdu_count(e.g., if msdu is
  1109. * spread across multiple buffers).Hence, it is
  1110. * necessary to check the next link_descriptor and release
  1111. * all the msdu's that are part of it.
  1112. */
  1113. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  1114. link_desc_va,
  1115. &next_link_desc_addr_info);
  1116. if (hal_rx_is_buf_addr_info_valid(
  1117. &next_link_desc_addr_info)) {
  1118. /* Clear the next link desc info for the current link_desc */
  1119. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  1120. dp_rx_link_desc_return_by_addr(
  1121. soc,
  1122. buf_addr_info,
  1123. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1124. hal_rx_buffer_addr_info_get_paddr(
  1125. &next_link_desc_addr_info,
  1126. &buf_info);
  1127. /* buffer_addr_info is the first element of ring_desc */
  1128. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  1129. (uint32_t *)&next_link_desc_addr_info,
  1130. &buf_info);
  1131. link_desc_va =
  1132. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1133. cur_link_desc_addr_info = next_link_desc_addr_info;
  1134. buf_addr_info = &cur_link_desc_addr_info;
  1135. goto more_msdu_link_desc;
  1136. }
  1137. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  1138. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1139. if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
  1140. DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
  1141. return rx_bufs_used;
  1142. }
  1143. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1144. void
  1145. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1146. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  1147. uint8_t err_code, uint8_t mac_id, uint8_t link_id)
  1148. {
  1149. uint32_t pkt_len, l2_hdr_offset;
  1150. uint16_t msdu_len;
  1151. struct dp_vdev *vdev;
  1152. qdf_ether_header_t *eh;
  1153. bool is_broadcast;
  1154. /*
  1155. * Check if DMA completed -- msdu_done is the last bit
  1156. * to be written
  1157. */
  1158. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1159. dp_err_rl("MSDU DONE failure");
  1160. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1161. QDF_TRACE_LEVEL_INFO);
  1162. qdf_assert(0);
  1163. }
  1164. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  1165. rx_tlv_hdr);
  1166. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  1167. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  1168. if (dp_rx_check_pkt_len(soc, pkt_len)) {
  1169. /* Drop & free packet */
  1170. dp_rx_nbuf_free(nbuf);
  1171. return;
  1172. }
  1173. /* Set length in nbuf */
  1174. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1175. qdf_nbuf_set_next(nbuf, NULL);
  1176. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1177. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  1178. if (!txrx_peer) {
  1179. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
  1180. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1181. qdf_nbuf_len(nbuf));
  1182. /* Trigger invalid peer handler wrapper */
  1183. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  1184. return;
  1185. }
  1186. vdev = txrx_peer->vdev;
  1187. if (!vdev) {
  1188. dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
  1189. vdev);
  1190. /* Drop & free packet */
  1191. dp_rx_nbuf_free(nbuf);
  1192. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1193. return;
  1194. }
  1195. /*
  1196. * Advance the packet start pointer by total size of
  1197. * pre-header TLV's
  1198. */
  1199. dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
  1200. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  1201. uint8_t *pkt_type;
  1202. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  1203. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  1204. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  1205. htons(QDF_LLC_STP)) {
  1206. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  1207. goto process_mesh;
  1208. } else {
  1209. goto process_rx;
  1210. }
  1211. }
  1212. }
  1213. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  1214. goto process_mesh;
  1215. /*
  1216. * WAPI cert AP sends rekey frames as unencrypted.
  1217. * Thus RXDMA will report unencrypted frame error.
  1218. * To pass WAPI cert case, SW needs to pass unencrypted
  1219. * rekey frame to stack.
  1220. */
  1221. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1222. goto process_rx;
  1223. }
  1224. /*
  1225. * In dynamic WEP case rekey frames are not encrypted
  1226. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  1227. * key install is already done
  1228. */
  1229. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  1230. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  1231. goto process_rx;
  1232. process_mesh:
  1233. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  1234. dp_rx_nbuf_free(nbuf);
  1235. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1236. return;
  1237. }
  1238. if (vdev->mesh_vdev) {
  1239. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  1240. == QDF_STATUS_SUCCESS) {
  1241. dp_rx_err_info("%pK: mesh pkt filtered", soc);
  1242. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  1243. dp_rx_nbuf_free(nbuf);
  1244. return;
  1245. }
  1246. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
  1247. }
  1248. process_rx:
  1249. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1250. rx_tlv_hdr) &&
  1251. (vdev->rx_decap_type ==
  1252. htt_cmn_pkt_type_ethernet))) {
  1253. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1254. is_broadcast = (QDF_IS_ADDR_BROADCAST
  1255. (eh->ether_dhost)) ? 1 : 0 ;
  1256. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
  1257. qdf_nbuf_len(nbuf), link_id);
  1258. if (is_broadcast) {
  1259. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
  1260. qdf_nbuf_len(nbuf),
  1261. link_id);
  1262. }
  1263. } else {
  1264. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
  1265. qdf_nbuf_len(nbuf),
  1266. link_id);
  1267. }
  1268. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  1269. dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
  1270. } else {
  1271. /* Update the protocol tag in SKB based on CCE metadata */
  1272. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1273. EXCEPTION_DEST_RING_ID, true, true);
  1274. /* Update the flow tag in SKB based on FSE metadata */
  1275. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  1276. DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
  1277. qdf_nbuf_set_exc_frame(nbuf, 1);
  1278. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
  1279. qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
  1280. }
  1281. return;
  1282. }
  1283. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1284. uint8_t *rx_tlv_hdr,
  1285. struct dp_txrx_peer *txrx_peer)
  1286. {
  1287. struct dp_vdev *vdev = NULL;
  1288. struct dp_pdev *pdev = NULL;
  1289. struct ol_if_ops *tops = NULL;
  1290. uint16_t rx_seq, fragno;
  1291. uint8_t is_raw;
  1292. unsigned int tid;
  1293. QDF_STATUS status;
  1294. struct cdp_rx_mic_err_info mic_failure_info;
  1295. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1296. rx_tlv_hdr))
  1297. return;
  1298. if (!txrx_peer) {
  1299. dp_info_rl("txrx_peer not found");
  1300. goto fail;
  1301. }
  1302. vdev = txrx_peer->vdev;
  1303. if (!vdev) {
  1304. dp_info_rl("VDEV not found");
  1305. goto fail;
  1306. }
  1307. pdev = vdev->pdev;
  1308. if (!pdev) {
  1309. dp_info_rl("PDEV not found");
  1310. goto fail;
  1311. }
  1312. is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
  1313. if (is_raw) {
  1314. fragno = dp_rx_frag_get_mpdu_frag_number(soc,
  1315. qdf_nbuf_data(nbuf));
  1316. /* Can get only last fragment */
  1317. if (fragno) {
  1318. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  1319. qdf_nbuf_data(nbuf));
  1320. rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
  1321. qdf_nbuf_data(nbuf));
  1322. status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
  1323. tid, rx_seq, nbuf);
  1324. dp_info_rl("Frag pkt seq# %d frag# %d consumed "
  1325. "status %d !", rx_seq, fragno, status);
  1326. return;
  1327. }
  1328. }
  1329. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  1330. &mic_failure_info.da_mac_addr.bytes[0])) {
  1331. dp_err_rl("Failed to get da_mac_addr");
  1332. goto fail;
  1333. }
  1334. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  1335. &mic_failure_info.ta_mac_addr.bytes[0])) {
  1336. dp_err_rl("Failed to get ta_mac_addr");
  1337. goto fail;
  1338. }
  1339. mic_failure_info.key_id = 0;
  1340. mic_failure_info.multicast =
  1341. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  1342. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  1343. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  1344. mic_failure_info.data = NULL;
  1345. mic_failure_info.vdev_id = vdev->vdev_id;
  1346. tops = pdev->soc->cdp_soc.ol_ops;
  1347. if (tops->rx_mic_error)
  1348. tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
  1349. &mic_failure_info);
  1350. fail:
  1351. dp_rx_nbuf_free(nbuf);
  1352. return;
  1353. }
  1354. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1355. defined(WLAN_MCAST_MLO)
  1356. static bool dp_rx_igmp_handler(struct dp_soc *soc,
  1357. struct dp_vdev *vdev,
  1358. struct dp_txrx_peer *peer,
  1359. qdf_nbuf_t nbuf,
  1360. uint8_t link_id)
  1361. {
  1362. if (soc->arch_ops.dp_rx_mcast_handler) {
  1363. if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
  1364. nbuf, link_id))
  1365. return true;
  1366. }
  1367. return false;
  1368. }
  1369. #else
  1370. static bool dp_rx_igmp_handler(struct dp_soc *soc,
  1371. struct dp_vdev *vdev,
  1372. struct dp_txrx_peer *peer,
  1373. qdf_nbuf_t nbuf,
  1374. uint8_t link_id)
  1375. {
  1376. return false;
  1377. }
  1378. #endif
  1379. /**
  1380. * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
  1381. * Free any other packet which comes in
  1382. * this path.
  1383. *
  1384. * @soc: core DP main context
  1385. * @nbuf: buffer pointer
  1386. * @txrx_peer: txrx peer handle
  1387. * @rx_tlv_hdr: start of rx tlv header
  1388. * @err_src: rxdma/reo
  1389. * @link_id: link id on which the packet is received
  1390. *
  1391. * This function indicates EAPOL frame received in wbm error ring to stack.
  1392. * Any other frame should be dropped.
  1393. *
  1394. * Return: SUCCESS if delivered to stack
  1395. */
  1396. static void
  1397. dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1398. struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
  1399. enum hal_rx_wbm_error_source err_src,
  1400. uint8_t link_id)
  1401. {
  1402. uint32_t pkt_len;
  1403. uint16_t msdu_len;
  1404. struct dp_vdev *vdev;
  1405. struct hal_rx_msdu_metadata msdu_metadata;
  1406. bool is_eapol;
  1407. qdf_nbuf_set_rx_chfrag_start(
  1408. nbuf,
  1409. hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1410. rx_tlv_hdr));
  1411. qdf_nbuf_set_rx_chfrag_end(nbuf,
  1412. hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
  1413. rx_tlv_hdr));
  1414. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1415. rx_tlv_hdr));
  1416. qdf_nbuf_set_da_valid(nbuf,
  1417. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  1418. rx_tlv_hdr));
  1419. qdf_nbuf_set_sa_valid(nbuf,
  1420. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  1421. rx_tlv_hdr));
  1422. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  1423. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  1424. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
  1425. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  1426. if (dp_rx_check_pkt_len(soc, pkt_len))
  1427. goto drop_nbuf;
  1428. /* Set length in nbuf */
  1429. qdf_nbuf_set_pktlen(
  1430. nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
  1431. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  1432. }
  1433. /*
  1434. * Check if DMA completed -- msdu_done is the last bit
  1435. * to be written
  1436. */
  1437. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1438. dp_err_rl("MSDU DONE failure");
  1439. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1440. QDF_TRACE_LEVEL_INFO);
  1441. qdf_assert(0);
  1442. }
  1443. if (!txrx_peer)
  1444. goto drop_nbuf;
  1445. vdev = txrx_peer->vdev;
  1446. if (!vdev) {
  1447. dp_err_rl("Null vdev!");
  1448. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1449. goto drop_nbuf;
  1450. }
  1451. /*
  1452. * Advance the packet start pointer by total size of
  1453. * pre-header TLV's
  1454. */
  1455. if (qdf_nbuf_is_frag(nbuf))
  1456. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1457. else
  1458. qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
  1459. soc->rx_pkt_tlv_size));
  1460. QDF_NBUF_CB_RX_PEER_ID(nbuf) = txrx_peer->peer_id;
  1461. if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
  1462. return;
  1463. dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
  1464. /*
  1465. * Indicate EAPOL frame to stack only when vap mac address
  1466. * matches the destination address.
  1467. */
  1468. is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
  1469. if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1470. qdf_ether_header_t *eh =
  1471. (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1472. if (dp_rx_err_match_dhost(eh, vdev)) {
  1473. DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
  1474. qdf_nbuf_len(nbuf));
  1475. /*
  1476. * Update the protocol tag in SKB based on
  1477. * CCE metadata.
  1478. */
  1479. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1480. EXCEPTION_DEST_RING_ID,
  1481. true, true);
  1482. /* Update the flow tag in SKB based on FSE metadata */
  1483. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1484. true);
  1485. DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
  1486. qdf_nbuf_len(nbuf),
  1487. vdev->pdev->enhanced_stats_en);
  1488. qdf_nbuf_set_exc_frame(nbuf, 1);
  1489. qdf_nbuf_set_next(nbuf, NULL);
  1490. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
  1491. NULL, is_eapol);
  1492. return;
  1493. }
  1494. }
  1495. drop_nbuf:
  1496. DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
  1497. err_src == HAL_RX_WBM_ERR_SRC_REO);
  1498. DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
  1499. err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
  1500. dp_rx_nbuf_free(nbuf);
  1501. }
  1502. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1503. #ifdef DP_RX_DESC_COOKIE_INVALIDATE
  1504. /**
  1505. * dp_rx_link_cookie_check() - Validate link desc cookie
  1506. * @ring_desc: ring descriptor
  1507. *
  1508. * Return: qdf status
  1509. */
  1510. static inline QDF_STATUS
  1511. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1512. {
  1513. if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
  1514. return QDF_STATUS_E_FAILURE;
  1515. return QDF_STATUS_SUCCESS;
  1516. }
  1517. /**
  1518. * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
  1519. * @ring_desc: ring descriptor
  1520. *
  1521. * Return: None
  1522. */
  1523. static inline void
  1524. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1525. {
  1526. HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
  1527. }
  1528. #else
  1529. static inline QDF_STATUS
  1530. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1531. {
  1532. return QDF_STATUS_SUCCESS;
  1533. }
  1534. static inline void
  1535. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1536. {
  1537. }
  1538. #endif
  1539. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  1540. /**
  1541. * dp_rx_err_ring_record_entry() - Record rx err ring history
  1542. * @soc: Datapath soc structure
  1543. * @paddr: paddr of the buffer in RX err ring
  1544. * @sw_cookie: SW cookie of the buffer in RX err ring
  1545. * @rbm: Return buffer manager of the buffer in RX err ring
  1546. *
  1547. * Return: None
  1548. */
  1549. static inline void
  1550. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1551. uint32_t sw_cookie, uint8_t rbm)
  1552. {
  1553. struct dp_buf_info_record *record;
  1554. uint32_t idx;
  1555. if (qdf_unlikely(!soc->rx_err_ring_history))
  1556. return;
  1557. idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
  1558. DP_RX_ERR_HIST_MAX);
  1559. /* No NULL check needed for record since its an array */
  1560. record = &soc->rx_err_ring_history->entry[idx];
  1561. record->timestamp = qdf_get_log_timestamp();
  1562. record->hbi.paddr = paddr;
  1563. record->hbi.sw_cookie = sw_cookie;
  1564. record->hbi.rbm = rbm;
  1565. }
  1566. #else
  1567. static inline void
  1568. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1569. uint32_t sw_cookie, uint8_t rbm)
  1570. {
  1571. }
  1572. #endif
  1573. #ifdef HANDLE_RX_REROUTE_ERR
  1574. static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
  1575. hal_ring_desc_t ring_desc)
  1576. {
  1577. int lmac_id = DP_INVALID_LMAC_ID;
  1578. struct dp_rx_desc *rx_desc;
  1579. struct hal_buf_info hbi;
  1580. struct dp_pdev *pdev;
  1581. struct rx_desc_pool *rx_desc_pool;
  1582. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1583. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
  1584. /* sanity */
  1585. if (!rx_desc) {
  1586. DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
  1587. goto assert_return;
  1588. }
  1589. if (!rx_desc->nbuf)
  1590. goto assert_return;
  1591. dp_rx_err_ring_record_entry(soc, hbi.paddr,
  1592. hbi.sw_cookie,
  1593. hal_rx_ret_buf_manager_get(soc->hal_soc,
  1594. ring_desc));
  1595. if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
  1596. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1597. rx_desc->in_err_state = 1;
  1598. goto assert_return;
  1599. }
  1600. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  1601. /* After this point the rx_desc and nbuf are valid */
  1602. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  1603. qdf_assert_always(!rx_desc->unmapped);
  1604. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
  1605. rx_desc->unmapped = 1;
  1606. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  1607. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  1608. rx_desc->pool_id);
  1609. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  1610. lmac_id = rx_desc->pool_id;
  1611. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  1612. &pdev->free_list_tail,
  1613. rx_desc);
  1614. return lmac_id;
  1615. assert_return:
  1616. qdf_assert(0);
  1617. return lmac_id;
  1618. }
  1619. static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
  1620. {
  1621. int ret;
  1622. uint64_t cur_time_stamp;
  1623. DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
  1624. /* Recover if overall error count exceeds threshold */
  1625. if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
  1626. DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
  1627. dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
  1628. soc->stats.rx.err.reo_err_msdu_buf_rcved,
  1629. soc->rx_route_err_start_pkt_ts);
  1630. qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
  1631. }
  1632. cur_time_stamp = qdf_get_log_timestamp_usecs();
  1633. if (!soc->rx_route_err_start_pkt_ts)
  1634. soc->rx_route_err_start_pkt_ts = cur_time_stamp;
  1635. /* Recover if threshold number of packets received in threshold time */
  1636. if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
  1637. DP_RX_ERR_ROUTE_TIMEOUT_US) {
  1638. soc->rx_route_err_start_pkt_ts = cur_time_stamp;
  1639. if (soc->rx_route_err_in_window >
  1640. DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
  1641. qdf_trigger_self_recovery(NULL,
  1642. QDF_RX_REG_PKT_ROUTE_ERR);
  1643. dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
  1644. soc->stats.rx.err.reo_err_msdu_buf_rcved,
  1645. soc->rx_route_err_start_pkt_ts);
  1646. } else {
  1647. soc->rx_route_err_in_window = 1;
  1648. }
  1649. } else {
  1650. soc->rx_route_err_in_window++;
  1651. }
  1652. ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
  1653. return ret;
  1654. }
  1655. #else /* HANDLE_RX_REROUTE_ERR */
  1656. static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
  1657. {
  1658. qdf_assert_always(0);
  1659. return DP_INVALID_LMAC_ID;
  1660. }
  1661. #endif /* HANDLE_RX_REROUTE_ERR */
  1662. #ifdef WLAN_MLO_MULTI_CHIP
  1663. /**
  1664. * dp_idle_link_bm_id_check() - war for HW issue
  1665. *
  1666. * @soc: DP SOC handle
  1667. * @rbm: idle link RBM value
  1668. * @ring_desc: reo error link descriptor
  1669. *
  1670. * This is a war for HW issue where link descriptor
  1671. * of partner soc received due to packets wrongly
  1672. * interpreted as fragments
  1673. *
  1674. * Return: true in case link desc is consumed
  1675. * false in other cases
  1676. */
  1677. static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
  1678. void *ring_desc)
  1679. {
  1680. struct dp_soc *replenish_soc = NULL;
  1681. /* return ok incase of link desc of same soc */
  1682. if (rbm == soc->idle_link_bm_id)
  1683. return false;
  1684. if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
  1685. replenish_soc =
  1686. soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
  1687. qdf_assert_always(replenish_soc);
  1688. /*
  1689. * For WIN usecase we should only get fragment packets in
  1690. * this ring as for MLO case fragmentation is not supported
  1691. * we should not see links from other soc.
  1692. *
  1693. * Drop all packets from partner soc and replenish the descriptors
  1694. */
  1695. dp_handle_wbm_internal_error(replenish_soc, ring_desc,
  1696. HAL_WBM_RELEASE_RING_2_DESC_TYPE);
  1697. return true;
  1698. }
  1699. #else
  1700. static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
  1701. void *ring_desc)
  1702. {
  1703. return false;
  1704. }
  1705. #endif
  1706. uint32_t
  1707. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1708. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1709. {
  1710. hal_ring_desc_t ring_desc;
  1711. hal_soc_handle_t hal_soc;
  1712. uint32_t count = 0;
  1713. uint32_t rx_bufs_used = 0;
  1714. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1715. uint8_t mac_id = 0;
  1716. uint8_t buf_type;
  1717. uint8_t err_status;
  1718. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1719. struct hal_buf_info hbi;
  1720. struct dp_pdev *dp_pdev;
  1721. struct dp_srng *dp_rxdma_srng;
  1722. struct rx_desc_pool *rx_desc_pool;
  1723. void *link_desc_va;
  1724. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1725. uint16_t num_msdus;
  1726. struct dp_rx_desc *rx_desc = NULL;
  1727. QDF_STATUS status;
  1728. bool ret;
  1729. uint32_t error_code = 0;
  1730. bool sw_pn_check_needed;
  1731. int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
  1732. int i, rx_bufs_reaped_total;
  1733. /* Debug -- Remove later */
  1734. qdf_assert(soc && hal_ring_hdl);
  1735. hal_soc = soc->hal_soc;
  1736. /* Debug -- Remove later */
  1737. qdf_assert(hal_soc);
  1738. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1739. /* TODO */
  1740. /*
  1741. * Need API to convert from hal_ring pointer to
  1742. * Ring Type / Ring Id combo
  1743. */
  1744. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1745. dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
  1746. hal_ring_hdl);
  1747. goto done;
  1748. }
  1749. while (qdf_likely(quota-- && (ring_desc =
  1750. hal_srng_dst_peek(hal_soc,
  1751. hal_ring_hdl)))) {
  1752. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1753. err_status = hal_rx_err_status_get(hal_soc, ring_desc);
  1754. buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
  1755. if (err_status == HAL_REO_ERROR_DETECTED)
  1756. error_code = hal_rx_get_reo_error_code(hal_soc,
  1757. ring_desc);
  1758. qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
  1759. sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
  1760. err_status,
  1761. error_code);
  1762. if (!sw_pn_check_needed) {
  1763. /*
  1764. * MPDU desc info will be present in the REO desc
  1765. * only in the below scenarios
  1766. * 1) pn_in_dest_disabled: always
  1767. * 2) pn_in_dest enabled: All cases except 2k-jup
  1768. * and OOR errors
  1769. */
  1770. hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
  1771. &mpdu_desc_info);
  1772. }
  1773. if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
  1774. goto next_entry;
  1775. /*
  1776. * For REO error ring, only MSDU LINK DESC is expected.
  1777. * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
  1778. */
  1779. if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
  1780. int lmac_id;
  1781. lmac_id = dp_rx_err_exception(soc, ring_desc);
  1782. if (lmac_id >= 0)
  1783. rx_bufs_reaped[lmac_id] += 1;
  1784. goto next_entry;
  1785. }
  1786. hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
  1787. &hbi);
  1788. /*
  1789. * check for the magic number in the sw cookie
  1790. */
  1791. qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
  1792. soc->link_desc_id_start);
  1793. if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
  1794. DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
  1795. goto next_entry;
  1796. }
  1797. status = dp_rx_link_cookie_check(ring_desc);
  1798. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  1799. DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
  1800. break;
  1801. }
  1802. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1803. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1804. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1805. &num_msdus);
  1806. if (!num_msdus ||
  1807. !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
  1808. dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
  1809. num_msdus, msdu_list.sw_cookie[0]);
  1810. dp_rx_link_desc_return(soc, ring_desc,
  1811. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1812. goto next_entry;
  1813. }
  1814. dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
  1815. msdu_list.sw_cookie[0],
  1816. msdu_list.rbm[0]);
  1817. // TODO - BE- Check if the RBM is to be checked for all chips
  1818. if (qdf_unlikely((msdu_list.rbm[0] !=
  1819. dp_rx_get_rx_bm_id(soc)) &&
  1820. (msdu_list.rbm[0] !=
  1821. soc->idle_link_bm_id) &&
  1822. (msdu_list.rbm[0] !=
  1823. dp_rx_get_defrag_bm_id(soc)))) {
  1824. /* TODO */
  1825. /* Call appropriate handler */
  1826. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1827. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1828. dp_rx_err_err("%pK: Invalid RBM %d",
  1829. soc, msdu_list.rbm[0]);
  1830. }
  1831. /* Return link descriptor through WBM ring (SW2WBM)*/
  1832. dp_rx_link_desc_return(soc, ring_desc,
  1833. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1834. goto next_entry;
  1835. }
  1836. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  1837. soc,
  1838. msdu_list.sw_cookie[0]);
  1839. qdf_assert_always(rx_desc);
  1840. mac_id = rx_desc->pool_id;
  1841. if (sw_pn_check_needed) {
  1842. goto process_reo_error_code;
  1843. }
  1844. if (mpdu_desc_info.bar_frame) {
  1845. qdf_assert_always(mpdu_desc_info.msdu_count == 1);
  1846. dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
  1847. &mpdu_desc_info, err_status,
  1848. error_code);
  1849. rx_bufs_reaped[mac_id] += 1;
  1850. goto next_entry;
  1851. }
  1852. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1853. /*
  1854. * We only handle one msdu per link desc for fragmented
  1855. * case. We drop the msdus and release the link desc
  1856. * back if there are more than one msdu in link desc.
  1857. */
  1858. if (qdf_unlikely(num_msdus > 1)) {
  1859. count = dp_rx_msdus_drop(soc, ring_desc,
  1860. &mpdu_desc_info,
  1861. &mac_id, quota);
  1862. rx_bufs_reaped[mac_id] += count;
  1863. goto next_entry;
  1864. }
  1865. /*
  1866. * this is a unlikely scenario where the host is reaping
  1867. * a descriptor which it already reaped just a while ago
  1868. * but is yet to replenish it back to HW.
  1869. * In this case host will dump the last 128 descriptors
  1870. * including the software descriptor rx_desc and assert.
  1871. */
  1872. if (qdf_unlikely(!rx_desc->in_use)) {
  1873. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  1874. dp_info_rl("Reaping rx_desc not in use!");
  1875. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1876. ring_desc, rx_desc);
  1877. /* ignore duplicate RX desc and continue */
  1878. /* Pop out the descriptor */
  1879. goto next_entry;
  1880. }
  1881. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  1882. msdu_list.paddr[0]);
  1883. if (!ret) {
  1884. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1885. rx_desc->in_err_state = 1;
  1886. goto next_entry;
  1887. }
  1888. count = dp_rx_frag_handle(soc,
  1889. ring_desc, &mpdu_desc_info,
  1890. rx_desc, &mac_id, quota);
  1891. rx_bufs_reaped[mac_id] += count;
  1892. DP_STATS_INC(soc, rx.rx_frags, 1);
  1893. goto next_entry;
  1894. }
  1895. process_reo_error_code:
  1896. /*
  1897. * Expect REO errors to be handled after this point
  1898. */
  1899. qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
  1900. dp_info_rl("Got pkt with REO ERROR: %d", error_code);
  1901. switch (error_code) {
  1902. case HAL_REO_ERR_PN_CHECK_FAILED:
  1903. case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
  1904. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1905. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1906. if (dp_pdev)
  1907. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1908. count = dp_rx_pn_error_handle(soc,
  1909. ring_desc,
  1910. &mpdu_desc_info, &mac_id,
  1911. quota);
  1912. rx_bufs_reaped[mac_id] += count;
  1913. break;
  1914. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1915. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  1916. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1917. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  1918. case HAL_REO_ERR_BAR_FRAME_OOR:
  1919. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1920. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1921. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1922. if (dp_pdev)
  1923. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1924. count = dp_rx_reo_err_entry_process(
  1925. soc,
  1926. ring_desc,
  1927. &mpdu_desc_info,
  1928. link_desc_va,
  1929. error_code);
  1930. rx_bufs_reaped[mac_id] += count;
  1931. break;
  1932. case HAL_REO_ERR_QUEUE_DESC_INVALID:
  1933. case HAL_REO_ERR_AMPDU_IN_NON_BA:
  1934. case HAL_REO_ERR_NON_BA_DUPLICATE:
  1935. case HAL_REO_ERR_BA_DUPLICATE:
  1936. case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
  1937. case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
  1938. case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
  1939. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1940. count = dp_rx_msdus_drop(soc, ring_desc,
  1941. &mpdu_desc_info,
  1942. &mac_id, quota);
  1943. rx_bufs_reaped[mac_id] += count;
  1944. break;
  1945. default:
  1946. /* Assert if unexpected error type */
  1947. qdf_assert_always(0);
  1948. }
  1949. next_entry:
  1950. dp_rx_link_cookie_invalidate(ring_desc);
  1951. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  1952. rx_bufs_reaped_total = 0;
  1953. for (i = 0; i < MAX_PDEV_CNT; i++)
  1954. rx_bufs_reaped_total += rx_bufs_reaped[i];
  1955. if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
  1956. max_reap_limit))
  1957. break;
  1958. }
  1959. done:
  1960. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1961. if (soc->rx.flags.defrag_timeout_check) {
  1962. uint32_t now_ms =
  1963. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1964. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1965. dp_rx_defrag_waitlist_flush(soc);
  1966. }
  1967. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1968. if (rx_bufs_reaped[mac_id]) {
  1969. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1970. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1971. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1972. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1973. rx_desc_pool,
  1974. rx_bufs_reaped[mac_id],
  1975. &dp_pdev->free_list_head,
  1976. &dp_pdev->free_list_tail,
  1977. false);
  1978. rx_bufs_used += rx_bufs_reaped[mac_id];
  1979. }
  1980. }
  1981. return rx_bufs_used; /* Assume no scale factor for now */
  1982. }
  1983. #ifdef DROP_RXDMA_DECRYPT_ERR
  1984. /**
  1985. * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
  1986. *
  1987. * Return: true if rxdma decrypt err frames are handled and false otherwise
  1988. */
  1989. static inline bool dp_handle_rxdma_decrypt_err(void)
  1990. {
  1991. return false;
  1992. }
  1993. #else
  1994. static inline bool dp_handle_rxdma_decrypt_err(void)
  1995. {
  1996. return true;
  1997. }
  1998. #endif
  1999. void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
  2000. {
  2001. if (soc->wbm_sg_last_msdu_war) {
  2002. uint32_t len;
  2003. qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
  2004. len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
  2005. qdf_nbuf_data(temp));
  2006. temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
  2007. while (temp) {
  2008. QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
  2009. temp = temp->next;
  2010. }
  2011. }
  2012. }
  2013. #ifdef RX_DESC_DEBUG_CHECK
  2014. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  2015. hal_ring_handle_t hal_ring_hdl,
  2016. hal_ring_desc_t ring_desc,
  2017. struct dp_rx_desc *rx_desc)
  2018. {
  2019. struct hal_buf_info hbi;
  2020. hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2021. /* Sanity check for possible buffer paddr corruption */
  2022. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  2023. return QDF_STATUS_SUCCESS;
  2024. hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
  2025. return QDF_STATUS_E_FAILURE;
  2026. }
  2027. #else
  2028. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  2029. hal_ring_handle_t hal_ring_hdl,
  2030. hal_ring_desc_t ring_desc,
  2031. struct dp_rx_desc *rx_desc)
  2032. {
  2033. return QDF_STATUS_SUCCESS;
  2034. }
  2035. #endif
  2036. bool
  2037. dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
  2038. {
  2039. /*
  2040. * Currently Null Queue and Unencrypted error handlers has support for
  2041. * SG. Other error handler do not deal with SG buffer.
  2042. */
  2043. if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
  2044. (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
  2045. ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
  2046. (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
  2047. return true;
  2048. return false;
  2049. }
  2050. #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
  2051. void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
  2052. qdf_nbuf_t nbuf)
  2053. {
  2054. /*
  2055. * In case of fast recycle TX driver can avoid invalidate
  2056. * of buffer in case of SFE forward. We need to invalidate
  2057. * the TLV headers after writing to this location
  2058. */
  2059. qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
  2060. (void *)(nbuf->data +
  2061. soc->rx_pkt_tlv_size +
  2062. L3_HEADER_PAD));
  2063. }
  2064. #else
  2065. void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
  2066. qdf_nbuf_t nbuf)
  2067. {
  2068. }
  2069. #endif
  2070. uint32_t
  2071. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2072. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  2073. {
  2074. hal_soc_handle_t hal_soc;
  2075. uint32_t rx_bufs_used = 0;
  2076. struct dp_pdev *dp_pdev;
  2077. uint8_t *rx_tlv_hdr;
  2078. bool is_tkip_mic_err;
  2079. qdf_nbuf_t nbuf_head = NULL;
  2080. qdf_nbuf_t nbuf, next;
  2081. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  2082. uint8_t pool_id;
  2083. uint8_t tid = 0;
  2084. uint8_t link_id = 0;
  2085. /* Debug -- Remove later */
  2086. qdf_assert(soc && hal_ring_hdl);
  2087. hal_soc = soc->hal_soc;
  2088. /* Debug -- Remove later */
  2089. qdf_assert(hal_soc);
  2090. nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
  2091. hal_ring_hdl,
  2092. quota,
  2093. &rx_bufs_used);
  2094. nbuf = nbuf_head;
  2095. while (nbuf) {
  2096. struct dp_txrx_peer *txrx_peer;
  2097. struct dp_peer *peer;
  2098. uint16_t peer_id;
  2099. uint8_t err_code;
  2100. uint8_t *tlv_hdr;
  2101. uint32_t peer_meta_data;
  2102. dp_txrx_ref_handle txrx_ref_handle = NULL;
  2103. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2104. /*
  2105. * retrieve the wbm desc info from nbuf TLV, so we can
  2106. * handle error cases appropriately
  2107. */
  2108. wbm_err_info = dp_rx_get_err_info(soc, nbuf);
  2109. peer_meta_data = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
  2110. rx_tlv_hdr);
  2111. peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data);
  2112. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  2113. &txrx_ref_handle,
  2114. DP_MOD_ID_RX_ERR);
  2115. if (!txrx_peer)
  2116. dp_info_rl("peer is null peer_id %u err_src %u, "
  2117. "REO: push_rsn %u err_code %u, "
  2118. "RXDMA: push_rsn %u err_code %u",
  2119. peer_id, wbm_err_info.wbm_err_src,
  2120. wbm_err_info.reo_psh_rsn,
  2121. wbm_err_info.reo_err_code,
  2122. wbm_err_info.rxdma_psh_rsn,
  2123. wbm_err_info.rxdma_err_code);
  2124. /* Set queue_mapping in nbuf to 0 */
  2125. dp_set_rx_queue(nbuf, 0);
  2126. next = nbuf->next;
  2127. /*
  2128. * Form the SG for msdu continued buffers
  2129. * QCN9000 has this support
  2130. */
  2131. if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  2132. nbuf = dp_rx_sg_create(soc, nbuf);
  2133. next = nbuf->next;
  2134. /*
  2135. * SG error handling is not done correctly,
  2136. * drop SG frames for now.
  2137. */
  2138. dp_rx_nbuf_free(nbuf);
  2139. dp_info_rl("scattered msdu dropped");
  2140. nbuf = next;
  2141. if (txrx_peer)
  2142. dp_txrx_peer_unref_delete(txrx_ref_handle,
  2143. DP_MOD_ID_RX_ERR);
  2144. continue;
  2145. }
  2146. pool_id = wbm_err_info.pool_id;
  2147. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  2148. if (dp_pdev && dp_pdev->link_peer_stats &&
  2149. txrx_peer && txrx_peer->is_mld_peer) {
  2150. link_id = dp_rx_peer_mdata_link_id_get(
  2151. soc,
  2152. peer_meta_data);
  2153. if (!link_id) {
  2154. DP_PEER_PER_PKT_STATS_INC(
  2155. txrx_peer,
  2156. rx.inval_link_id_pkt_cnt,
  2157. 1, link_id);
  2158. }
  2159. } else {
  2160. link_id = 0;
  2161. }
  2162. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  2163. if (wbm_err_info.reo_psh_rsn
  2164. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  2165. DP_STATS_INC(soc,
  2166. rx.err.reo_error
  2167. [wbm_err_info.reo_err_code], 1);
  2168. /* increment @pdev level */
  2169. if (dp_pdev)
  2170. DP_STATS_INC(dp_pdev, err.reo_error,
  2171. 1);
  2172. switch (wbm_err_info.reo_err_code) {
  2173. /*
  2174. * Handling for packets which have NULL REO
  2175. * queue descriptor
  2176. */
  2177. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  2178. pool_id = wbm_err_info.pool_id;
  2179. soc->arch_ops.dp_rx_null_q_desc_handle(
  2180. soc, nbuf,
  2181. rx_tlv_hdr,
  2182. pool_id,
  2183. txrx_peer,
  2184. FALSE,
  2185. link_id);
  2186. break;
  2187. /* TODO */
  2188. /* Add per error code accounting */
  2189. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  2190. if (txrx_peer)
  2191. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2192. rx.err.jump_2k_err,
  2193. 1,
  2194. link_id);
  2195. pool_id = wbm_err_info.pool_id;
  2196. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  2197. rx_tlv_hdr)) {
  2198. tid =
  2199. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  2200. }
  2201. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2202. hal_rx_msdu_start_msdu_len_get(
  2203. soc->hal_soc, rx_tlv_hdr);
  2204. nbuf->next = NULL;
  2205. dp_2k_jump_handle(soc, nbuf,
  2206. rx_tlv_hdr,
  2207. peer_id, tid);
  2208. break;
  2209. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  2210. if (txrx_peer)
  2211. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2212. rx.err.oor_err,
  2213. 1,
  2214. link_id);
  2215. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  2216. rx_tlv_hdr)) {
  2217. tid =
  2218. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  2219. }
  2220. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2221. hal_rx_msdu_start_msdu_len_get(
  2222. soc->hal_soc, rx_tlv_hdr);
  2223. nbuf->next = NULL;
  2224. dp_rx_oor_handle(soc, nbuf,
  2225. peer_id,
  2226. rx_tlv_hdr);
  2227. break;
  2228. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  2229. case HAL_REO_ERR_BAR_FRAME_OOR:
  2230. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  2231. if (peer) {
  2232. dp_rx_err_handle_bar(soc, peer,
  2233. nbuf);
  2234. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  2235. }
  2236. dp_rx_nbuf_free(nbuf);
  2237. break;
  2238. case HAL_REO_ERR_PN_CHECK_FAILED:
  2239. case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
  2240. if (txrx_peer)
  2241. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2242. rx.err.pn_err,
  2243. 1,
  2244. link_id);
  2245. dp_rx_nbuf_free(nbuf);
  2246. break;
  2247. default:
  2248. dp_info_rl("Got pkt with REO ERROR: %d",
  2249. wbm_err_info.reo_err_code);
  2250. dp_rx_nbuf_free(nbuf);
  2251. }
  2252. } else if (wbm_err_info.reo_psh_rsn
  2253. == HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
  2254. dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
  2255. rx_tlv_hdr,
  2256. HAL_RX_WBM_ERR_SRC_REO,
  2257. link_id);
  2258. } else {
  2259. /* should not enter here */
  2260. dp_rx_err_alert("invalid reo push reason %u",
  2261. wbm_err_info.reo_psh_rsn);
  2262. dp_rx_nbuf_free(nbuf);
  2263. qdf_assert_always(0);
  2264. }
  2265. } else if (wbm_err_info.wbm_err_src ==
  2266. HAL_RX_WBM_ERR_SRC_RXDMA) {
  2267. if (wbm_err_info.rxdma_psh_rsn
  2268. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2269. DP_STATS_INC(soc,
  2270. rx.err.rxdma_error
  2271. [wbm_err_info.rxdma_err_code], 1);
  2272. /* increment @pdev level */
  2273. if (dp_pdev)
  2274. DP_STATS_INC(dp_pdev,
  2275. err.rxdma_error, 1);
  2276. switch (wbm_err_info.rxdma_err_code) {
  2277. case HAL_RXDMA_ERR_UNENCRYPTED:
  2278. case HAL_RXDMA_ERR_WIFI_PARSE:
  2279. if (txrx_peer)
  2280. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2281. rx.err.rxdma_wifi_parse_err,
  2282. 1,
  2283. link_id);
  2284. pool_id = wbm_err_info.pool_id;
  2285. dp_rx_process_rxdma_err(soc, nbuf,
  2286. rx_tlv_hdr,
  2287. txrx_peer,
  2288. wbm_err_info.
  2289. rxdma_err_code,
  2290. pool_id,
  2291. link_id);
  2292. break;
  2293. case HAL_RXDMA_ERR_TKIP_MIC:
  2294. dp_rx_process_mic_error(soc, nbuf,
  2295. rx_tlv_hdr,
  2296. txrx_peer);
  2297. if (txrx_peer)
  2298. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2299. rx.err.mic_err,
  2300. 1,
  2301. link_id);
  2302. break;
  2303. case HAL_RXDMA_ERR_DECRYPT:
  2304. /* All the TKIP-MIC failures are treated as Decrypt Errors
  2305. * for QCN9224 Targets
  2306. */
  2307. is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
  2308. if (is_tkip_mic_err && txrx_peer) {
  2309. dp_rx_process_mic_error(soc, nbuf,
  2310. rx_tlv_hdr,
  2311. txrx_peer);
  2312. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2313. rx.err.mic_err,
  2314. 1,
  2315. link_id);
  2316. break;
  2317. }
  2318. if (txrx_peer) {
  2319. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2320. rx.err.decrypt_err,
  2321. 1,
  2322. link_id);
  2323. dp_rx_nbuf_free(nbuf);
  2324. break;
  2325. }
  2326. if (!dp_handle_rxdma_decrypt_err()) {
  2327. dp_rx_nbuf_free(nbuf);
  2328. break;
  2329. }
  2330. pool_id = wbm_err_info.pool_id;
  2331. err_code = wbm_err_info.rxdma_err_code;
  2332. tlv_hdr = rx_tlv_hdr;
  2333. dp_rx_process_rxdma_err(soc, nbuf,
  2334. tlv_hdr, NULL,
  2335. err_code,
  2336. pool_id,
  2337. link_id);
  2338. break;
  2339. case HAL_RXDMA_MULTICAST_ECHO:
  2340. if (txrx_peer)
  2341. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  2342. rx.mec_drop, 1,
  2343. qdf_nbuf_len(nbuf),
  2344. link_id);
  2345. dp_rx_nbuf_free(nbuf);
  2346. break;
  2347. case HAL_RXDMA_UNAUTHORIZED_WDS:
  2348. pool_id = wbm_err_info.pool_id;
  2349. err_code = wbm_err_info.rxdma_err_code;
  2350. tlv_hdr = rx_tlv_hdr;
  2351. dp_rx_process_rxdma_err(soc, nbuf,
  2352. tlv_hdr,
  2353. txrx_peer,
  2354. err_code,
  2355. pool_id,
  2356. link_id);
  2357. break;
  2358. default:
  2359. dp_rx_nbuf_free(nbuf);
  2360. dp_err_rl("RXDMA error %d",
  2361. wbm_err_info.rxdma_err_code);
  2362. }
  2363. } else if (wbm_err_info.rxdma_psh_rsn
  2364. == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
  2365. dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
  2366. rx_tlv_hdr,
  2367. HAL_RX_WBM_ERR_SRC_RXDMA,
  2368. link_id);
  2369. } else if (wbm_err_info.rxdma_psh_rsn
  2370. == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
  2371. dp_rx_err_err("rxdma push reason %u",
  2372. wbm_err_info.rxdma_psh_rsn);
  2373. DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
  2374. dp_rx_nbuf_free(nbuf);
  2375. } else {
  2376. /* should not enter here */
  2377. dp_rx_err_alert("invalid rxdma push reason %u",
  2378. wbm_err_info.rxdma_psh_rsn);
  2379. dp_rx_nbuf_free(nbuf);
  2380. qdf_assert_always(0);
  2381. }
  2382. } else {
  2383. /* Should not come here */
  2384. qdf_assert(0);
  2385. }
  2386. if (txrx_peer)
  2387. dp_txrx_peer_unref_delete(txrx_ref_handle,
  2388. DP_MOD_ID_RX_ERR);
  2389. nbuf = next;
  2390. }
  2391. return rx_bufs_used; /* Assume no scale factor for now */
  2392. }
  2393. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2394. /**
  2395. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  2396. *
  2397. * @soc: core DP main context
  2398. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2399. * @rx_desc: void pointer to rx descriptor
  2400. *
  2401. * Return: void
  2402. */
  2403. static void dup_desc_dbg(struct dp_soc *soc,
  2404. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2405. void *rx_desc)
  2406. {
  2407. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  2408. dp_rx_dump_info_and_assert(
  2409. soc,
  2410. soc->rx_rel_ring.hal_srng,
  2411. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  2412. rx_desc);
  2413. }
  2414. /**
  2415. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  2416. *
  2417. * @soc: core DP main context
  2418. * @mac_id: mac id which is one of 3 mac_ids
  2419. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2420. * @head: head of descs list to be freed
  2421. * @tail: tail of decs list to be freed
  2422. *
  2423. * Return: number of msdu in MPDU to be popped
  2424. */
  2425. static inline uint32_t
  2426. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2427. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2428. union dp_rx_desc_list_elem_t **head,
  2429. union dp_rx_desc_list_elem_t **tail)
  2430. {
  2431. void *rx_msdu_link_desc;
  2432. qdf_nbuf_t msdu;
  2433. qdf_nbuf_t last;
  2434. struct hal_rx_msdu_list msdu_list;
  2435. uint16_t num_msdus;
  2436. struct hal_buf_info buf_info;
  2437. uint32_t rx_bufs_used = 0;
  2438. uint32_t msdu_cnt;
  2439. uint32_t i;
  2440. uint8_t push_reason;
  2441. uint8_t rxdma_error_code = 0;
  2442. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  2443. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2444. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2445. hal_rxdma_desc_t ring_desc;
  2446. struct rx_desc_pool *rx_desc_pool;
  2447. if (!pdev) {
  2448. dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
  2449. soc, mac_id);
  2450. return rx_bufs_used;
  2451. }
  2452. msdu = 0;
  2453. last = NULL;
  2454. hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
  2455. &buf_info, &msdu_cnt);
  2456. push_reason =
  2457. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  2458. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2459. rxdma_error_code =
  2460. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  2461. }
  2462. do {
  2463. rx_msdu_link_desc =
  2464. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2465. qdf_assert_always(rx_msdu_link_desc);
  2466. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2467. &msdu_list, &num_msdus);
  2468. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2469. /* if the msdus belongs to NSS offloaded radio &&
  2470. * the rbm is not SW1_BM then return the msdu_link
  2471. * descriptor without freeing the msdus (nbufs). let
  2472. * these buffers be given to NSS completion ring for
  2473. * NSS to free them.
  2474. * else iterate through the msdu link desc list and
  2475. * free each msdu in the list.
  2476. */
  2477. if (msdu_list.rbm[0] !=
  2478. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
  2479. wlan_cfg_get_dp_pdev_nss_enabled(
  2480. pdev->wlan_cfg_ctx))
  2481. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  2482. else {
  2483. for (i = 0; i < num_msdus; i++) {
  2484. struct dp_rx_desc *rx_desc =
  2485. soc->arch_ops.
  2486. dp_rx_desc_cookie_2_va(
  2487. soc,
  2488. msdu_list.sw_cookie[i]);
  2489. qdf_assert_always(rx_desc);
  2490. msdu = rx_desc->nbuf;
  2491. /*
  2492. * this is a unlikely scenario
  2493. * where the host is reaping
  2494. * a descriptor which
  2495. * it already reaped just a while ago
  2496. * but is yet to replenish
  2497. * it back to HW.
  2498. * In this case host will dump
  2499. * the last 128 descriptors
  2500. * including the software descriptor
  2501. * rx_desc and assert.
  2502. */
  2503. ring_desc = rxdma_dst_ring_desc;
  2504. if (qdf_unlikely(!rx_desc->in_use)) {
  2505. dup_desc_dbg(soc,
  2506. ring_desc,
  2507. rx_desc);
  2508. continue;
  2509. }
  2510. if (rx_desc->unmapped == 0) {
  2511. rx_desc_pool =
  2512. &soc->rx_desc_buf[rx_desc->pool_id];
  2513. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2514. dp_rx_nbuf_unmap_pool(soc,
  2515. rx_desc_pool,
  2516. msdu);
  2517. rx_desc->unmapped = 1;
  2518. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2519. }
  2520. dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
  2521. soc, msdu);
  2522. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2523. rx_desc->pool_id);
  2524. rx_bufs_used++;
  2525. dp_rx_add_to_free_desc_list(head,
  2526. tail, rx_desc);
  2527. }
  2528. }
  2529. } else {
  2530. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  2531. }
  2532. /*
  2533. * Store the current link buffer into to the local structure
  2534. * to be used for release purpose.
  2535. */
  2536. hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
  2537. buf_info.paddr, buf_info.sw_cookie,
  2538. buf_info.rbm);
  2539. hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
  2540. &buf_info);
  2541. dp_rx_link_desc_return_by_addr(soc,
  2542. (hal_buff_addrinfo_t)
  2543. rx_link_buf_info,
  2544. bm_action);
  2545. } while (buf_info.paddr);
  2546. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  2547. if (pdev)
  2548. DP_STATS_INC(pdev, err.rxdma_error, 1);
  2549. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  2550. dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
  2551. }
  2552. return rx_bufs_used;
  2553. }
  2554. uint32_t
  2555. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2556. uint32_t mac_id, uint32_t quota)
  2557. {
  2558. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2559. hal_rxdma_desc_t rxdma_dst_ring_desc;
  2560. hal_soc_handle_t hal_soc;
  2561. void *err_dst_srng;
  2562. union dp_rx_desc_list_elem_t *head = NULL;
  2563. union dp_rx_desc_list_elem_t *tail = NULL;
  2564. struct dp_srng *dp_rxdma_srng;
  2565. struct rx_desc_pool *rx_desc_pool;
  2566. uint32_t work_done = 0;
  2567. uint32_t rx_bufs_used = 0;
  2568. if (!pdev)
  2569. return 0;
  2570. err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
  2571. if (!err_dst_srng) {
  2572. dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  2573. soc, err_dst_srng);
  2574. return 0;
  2575. }
  2576. hal_soc = soc->hal_soc;
  2577. qdf_assert(hal_soc);
  2578. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  2579. dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  2580. soc, err_dst_srng);
  2581. return 0;
  2582. }
  2583. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  2584. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  2585. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  2586. rxdma_dst_ring_desc,
  2587. &head, &tail);
  2588. }
  2589. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  2590. if (rx_bufs_used) {
  2591. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2592. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2593. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2594. } else {
  2595. dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
  2596. rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
  2597. }
  2598. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2599. rx_desc_pool, rx_bufs_used, &head, &tail, false);
  2600. work_done += rx_bufs_used;
  2601. }
  2602. return work_done;
  2603. }
  2604. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2605. static inline void
  2606. dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2607. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2608. union dp_rx_desc_list_elem_t **head,
  2609. union dp_rx_desc_list_elem_t **tail,
  2610. uint32_t *rx_bufs_used)
  2611. {
  2612. void *rx_msdu_link_desc;
  2613. qdf_nbuf_t msdu;
  2614. qdf_nbuf_t last;
  2615. struct hal_rx_msdu_list msdu_list;
  2616. uint16_t num_msdus;
  2617. struct hal_buf_info buf_info;
  2618. uint32_t msdu_cnt, i;
  2619. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2620. struct rx_desc_pool *rx_desc_pool;
  2621. struct dp_rx_desc *rx_desc;
  2622. msdu = 0;
  2623. last = NULL;
  2624. hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
  2625. &buf_info, &msdu_cnt);
  2626. do {
  2627. rx_msdu_link_desc =
  2628. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2629. if (!rx_msdu_link_desc) {
  2630. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
  2631. break;
  2632. }
  2633. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2634. &msdu_list, &num_msdus);
  2635. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2636. for (i = 0; i < num_msdus; i++) {
  2637. if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
  2638. dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
  2639. msdu_list.sw_cookie[i]);
  2640. continue;
  2641. }
  2642. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  2643. soc,
  2644. msdu_list.sw_cookie[i]);
  2645. qdf_assert_always(rx_desc);
  2646. rx_desc_pool =
  2647. &soc->rx_desc_buf[rx_desc->pool_id];
  2648. msdu = rx_desc->nbuf;
  2649. /*
  2650. * this is a unlikely scenario where the host is reaping
  2651. * a descriptor which it already reaped just a while ago
  2652. * but is yet to replenish it back to HW.
  2653. */
  2654. if (qdf_unlikely(!rx_desc->in_use) ||
  2655. qdf_unlikely(!msdu)) {
  2656. dp_rx_err_info_rl("Reaping rx_desc not in use!");
  2657. continue;
  2658. }
  2659. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2660. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
  2661. rx_desc->unmapped = 1;
  2662. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2663. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2664. rx_desc->pool_id);
  2665. rx_bufs_used[rx_desc->pool_id]++;
  2666. dp_rx_add_to_free_desc_list(head,
  2667. tail, rx_desc);
  2668. }
  2669. }
  2670. /*
  2671. * Store the current link buffer into to the local structure
  2672. * to be used for release purpose.
  2673. */
  2674. hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
  2675. buf_info.paddr, buf_info.sw_cookie,
  2676. buf_info.rbm);
  2677. hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
  2678. &buf_info);
  2679. dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
  2680. rx_link_buf_info,
  2681. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  2682. } while (buf_info.paddr);
  2683. }
  2684. void
  2685. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  2686. uint32_t buf_type)
  2687. {
  2688. struct hal_buf_info buf_info = {0};
  2689. struct dp_rx_desc *rx_desc = NULL;
  2690. struct rx_desc_pool *rx_desc_pool;
  2691. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
  2692. union dp_rx_desc_list_elem_t *head = NULL;
  2693. union dp_rx_desc_list_elem_t *tail = NULL;
  2694. uint8_t pool_id;
  2695. uint8_t mac_id;
  2696. hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
  2697. if (!buf_info.paddr) {
  2698. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
  2699. return;
  2700. }
  2701. /* buffer_addr_info is the first element of ring_desc */
  2702. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
  2703. &buf_info);
  2704. if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
  2705. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
  2706. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  2707. soc,
  2708. buf_info.sw_cookie);
  2709. if (rx_desc && rx_desc->nbuf) {
  2710. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  2711. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2712. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
  2713. rx_desc->nbuf);
  2714. rx_desc->unmapped = 1;
  2715. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2716. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  2717. rx_desc->pool_id);
  2718. dp_rx_add_to_free_desc_list(&head,
  2719. &tail,
  2720. rx_desc);
  2721. rx_bufs_reaped[rx_desc->pool_id]++;
  2722. }
  2723. } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
  2724. pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
  2725. dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
  2726. &head, &tail, rx_bufs_reaped);
  2727. }
  2728. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  2729. struct rx_desc_pool *rx_desc_pool;
  2730. struct dp_srng *dp_rxdma_srng;
  2731. if (!rx_bufs_reaped[mac_id])
  2732. continue;
  2733. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
  2734. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2735. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2736. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2737. rx_desc_pool,
  2738. rx_bufs_reaped[mac_id],
  2739. &head, &tail, false);
  2740. }
  2741. }
  2742. #endif /* QCA_HOST_MODE_WIFI_DISABLED */