dp_rx_err.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "dp_internal.h"
  25. #include "hal_api.h"
  26. #include "qdf_trace.h"
  27. #include "qdf_nbuf.h"
  28. #include "dp_rx_defrag.h"
  29. #include "dp_ipa.h"
  30. #ifdef WIFI_MONITOR_SUPPORT
  31. #include "dp_htt.h"
  32. #include <dp_mon.h>
  33. #endif
  34. #ifdef FEATURE_WDS
  35. #include "dp_txrx_wds.h"
  36. #endif
  37. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  38. #include "qdf_net_types.h"
  39. #include "dp_rx_buffer_pool.h"
  40. #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
  41. #define dp_rx_err_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
  42. #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
  43. #define dp_rx_err_info(params...) \
  44. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
  45. #define dp_rx_err_info_rl(params...) \
  46. __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
  47. #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
  48. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  49. /* Max regular Rx packet routing error */
  50. #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
  51. #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
  52. #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
  53. #ifdef FEATURE_MEC
  54. bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  55. struct dp_txrx_peer *txrx_peer,
  56. uint8_t *rx_tlv_hdr,
  57. qdf_nbuf_t nbuf)
  58. {
  59. struct dp_vdev *vdev = txrx_peer->vdev;
  60. struct dp_pdev *pdev = vdev->pdev;
  61. struct dp_mec_entry *mecentry = NULL;
  62. struct dp_ast_entry *ase = NULL;
  63. uint16_t sa_idx = 0;
  64. uint8_t *data;
  65. /*
  66. * Multicast Echo Check is required only if vdev is STA and
  67. * received pkt is a multicast/broadcast pkt. otherwise
  68. * skip the MEC check.
  69. */
  70. if (vdev->opmode != wlan_op_mode_sta)
  71. return false;
  72. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  73. return false;
  74. data = qdf_nbuf_data(nbuf);
  75. /*
  76. * if the received pkts src mac addr matches with vdev
  77. * mac address then drop the pkt as it is looped back
  78. */
  79. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  80. vdev->mac_addr.raw,
  81. QDF_MAC_ADDR_SIZE)))
  82. return true;
  83. /*
  84. * In case of qwrap isolation mode, donot drop loopback packets.
  85. * In isolation mode, all packets from the wired stations need to go
  86. * to rootap and loop back to reach the wireless stations and
  87. * vice-versa.
  88. */
  89. if (qdf_unlikely(vdev->isolation_vdev))
  90. return false;
  91. /*
  92. * if the received pkts src mac addr matches with the
  93. * wired PCs MAC addr which is behind the STA or with
  94. * wireless STAs MAC addr which are behind the Repeater,
  95. * then drop the pkt as it is looped back
  96. */
  97. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  98. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  99. if ((sa_idx < 0) ||
  100. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  101. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  102. "invalid sa_idx: %d", sa_idx);
  103. qdf_assert_always(0);
  104. }
  105. qdf_spin_lock_bh(&soc->ast_lock);
  106. ase = soc->ast_table[sa_idx];
  107. /*
  108. * this check was not needed since MEC is not dependent on AST,
  109. * but if we dont have this check SON has some issues in
  110. * dual backhaul scenario. in APS SON mode, client connected
  111. * to RE 2G and sends multicast packets. the RE sends it to CAP
  112. * over 5G backhaul. the CAP loopback it on 2G to RE.
  113. * On receiving in 2G STA vap, we assume that client has roamed
  114. * and kickout the client.
  115. */
  116. if (ase && (ase->peer_id != txrx_peer->peer_id)) {
  117. qdf_spin_unlock_bh(&soc->ast_lock);
  118. goto drop;
  119. }
  120. qdf_spin_unlock_bh(&soc->ast_lock);
  121. }
  122. qdf_spin_lock_bh(&soc->mec_lock);
  123. mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
  124. &data[QDF_MAC_ADDR_SIZE]);
  125. if (!mecentry) {
  126. qdf_spin_unlock_bh(&soc->mec_lock);
  127. return false;
  128. }
  129. qdf_spin_unlock_bh(&soc->mec_lock);
  130. drop:
  131. dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
  132. soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
  133. return true;
  134. }
  135. #endif
  136. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  137. void dp_rx_link_desc_refill_duplicate_check(
  138. struct dp_soc *soc,
  139. struct hal_buf_info *buf_info,
  140. hal_buff_addrinfo_t ring_buf_info)
  141. {
  142. struct hal_buf_info current_link_desc_buf_info = { 0 };
  143. /* do duplicate link desc address check */
  144. hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
  145. &current_link_desc_buf_info);
  146. /*
  147. * TODO - Check if the hal soc api call can be removed
  148. * since the cookie is just used for print.
  149. * buffer_addr_info is the first element of ring_desc
  150. */
  151. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  152. (uint32_t *)ring_buf_info,
  153. &current_link_desc_buf_info);
  154. if (qdf_unlikely(current_link_desc_buf_info.paddr ==
  155. buf_info->paddr)) {
  156. dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
  157. current_link_desc_buf_info.paddr,
  158. current_link_desc_buf_info.sw_cookie);
  159. DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
  160. }
  161. *buf_info = current_link_desc_buf_info;
  162. }
  163. /**
  164. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  165. * (WBM) by address
  166. *
  167. * @soc: core DP main context
  168. * @link_desc_addr: link descriptor addr
  169. *
  170. * Return: QDF_STATUS
  171. */
  172. QDF_STATUS
  173. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  174. hal_buff_addrinfo_t link_desc_addr,
  175. uint8_t bm_action)
  176. {
  177. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  178. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  179. hal_soc_handle_t hal_soc = soc->hal_soc;
  180. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  181. void *src_srng_desc;
  182. if (!wbm_rel_srng) {
  183. dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
  184. return status;
  185. }
  186. /* do duplicate link desc address check */
  187. dp_rx_link_desc_refill_duplicate_check(
  188. soc,
  189. &soc->last_op_info.wbm_rel_link_desc,
  190. link_desc_addr);
  191. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  192. /* TODO */
  193. /*
  194. * Need API to convert from hal_ring pointer to
  195. * Ring Type / Ring Id combo
  196. */
  197. dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
  198. soc, wbm_rel_srng);
  199. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  200. goto done;
  201. }
  202. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  203. if (qdf_likely(src_srng_desc)) {
  204. /* Return link descriptor through WBM ring (SW2WBM)*/
  205. hal_rx_msdu_link_desc_set(hal_soc,
  206. src_srng_desc, link_desc_addr, bm_action);
  207. status = QDF_STATUS_SUCCESS;
  208. } else {
  209. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  210. DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
  211. dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
  212. srng->ring_id,
  213. soc->stats.rx.err.hal_ring_access_full_fail);
  214. dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  215. *srng->u.src_ring.hp_addr,
  216. srng->u.src_ring.reap_hp,
  217. *srng->u.src_ring.tp_addr,
  218. srng->u.src_ring.cached_tp);
  219. QDF_BUG(0);
  220. }
  221. done:
  222. hal_srng_access_end(hal_soc, wbm_rel_srng);
  223. return status;
  224. }
  225. qdf_export_symbol(dp_rx_link_desc_return_by_addr);
  226. /**
  227. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  228. * (WBM), following error handling
  229. *
  230. * @soc: core DP main context
  231. * @ring_desc: opaque pointer to the REO error ring descriptor
  232. *
  233. * Return: QDF_STATUS
  234. */
  235. QDF_STATUS
  236. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  237. uint8_t bm_action)
  238. {
  239. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  240. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  241. }
  242. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  243. /**
  244. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  245. *
  246. * @soc: core txrx main context
  247. * @ring_desc: opaque pointer to the REO error ring descriptor
  248. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  249. * @head: head of the local descriptor free-list
  250. * @tail: tail of the local descriptor free-list
  251. * @quota: No. of units (packets) that can be serviced in one shot.
  252. *
  253. * This function is used to drop all MSDU in an MPDU
  254. *
  255. * Return: uint32_t: No. of elements processed
  256. */
  257. static uint32_t
  258. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  259. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  260. uint8_t *mac_id,
  261. uint32_t quota)
  262. {
  263. uint32_t rx_bufs_used = 0;
  264. void *link_desc_va;
  265. struct hal_buf_info buf_info;
  266. struct dp_pdev *pdev;
  267. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  268. int i;
  269. uint8_t *rx_tlv_hdr;
  270. uint32_t tid;
  271. struct rx_desc_pool *rx_desc_pool;
  272. struct dp_rx_desc *rx_desc;
  273. /* First field in REO Dst ring Desc is buffer_addr_info */
  274. void *buf_addr_info = ring_desc;
  275. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  276. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  277. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
  278. /* buffer_addr_info is the first element of ring_desc */
  279. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  280. (uint32_t *)ring_desc,
  281. &buf_info);
  282. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  283. if (!link_desc_va) {
  284. dp_rx_err_debug("link desc va is null, soc %pk", soc);
  285. return rx_bufs_used;
  286. }
  287. more_msdu_link_desc:
  288. /* No UNMAP required -- this is "malloc_consistent" memory */
  289. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  290. &mpdu_desc_info->msdu_count);
  291. for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
  292. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  293. soc, msdu_list.sw_cookie[i]);
  294. qdf_assert_always(rx_desc);
  295. /* all buffers from a MSDU link link belong to same pdev */
  296. *mac_id = rx_desc->pool_id;
  297. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  298. if (!pdev) {
  299. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  300. soc, rx_desc->pool_id);
  301. return rx_bufs_used;
  302. }
  303. if (!dp_rx_desc_check_magic(rx_desc)) {
  304. dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
  305. soc, msdu_list.sw_cookie[i]);
  306. return rx_bufs_used;
  307. }
  308. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  309. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  310. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
  311. rx_desc->unmapped = 1;
  312. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  313. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  314. rx_bufs_used++;
  315. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  316. rx_desc->rx_buf_start);
  317. dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
  318. soc, tid);
  319. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  320. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  321. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  322. dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
  323. rx_desc->nbuf,
  324. QDF_TX_RX_STATUS_DROP, true);
  325. /* Just free the buffers */
  326. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
  327. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  328. &pdev->free_list_tail, rx_desc);
  329. }
  330. /*
  331. * If the msdu's are spread across multiple link-descriptors,
  332. * we cannot depend solely on the msdu_count(e.g., if msdu is
  333. * spread across multiple buffers).Hence, it is
  334. * necessary to check the next link_descriptor and release
  335. * all the msdu's that are part of it.
  336. */
  337. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  338. link_desc_va,
  339. &next_link_desc_addr_info);
  340. if (hal_rx_is_buf_addr_info_valid(
  341. &next_link_desc_addr_info)) {
  342. /* Clear the next link desc info for the current link_desc */
  343. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  344. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  345. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  346. hal_rx_buffer_addr_info_get_paddr(
  347. &next_link_desc_addr_info,
  348. &buf_info);
  349. /* buffer_addr_info is the first element of ring_desc */
  350. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  351. (uint32_t *)&next_link_desc_addr_info,
  352. &buf_info);
  353. cur_link_desc_addr_info = next_link_desc_addr_info;
  354. buf_addr_info = &cur_link_desc_addr_info;
  355. link_desc_va =
  356. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  357. goto more_msdu_link_desc;
  358. }
  359. quota--;
  360. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  361. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  362. return rx_bufs_used;
  363. }
  364. /**
  365. * dp_rx_pn_error_handle() - Handles PN check errors
  366. *
  367. * @soc: core txrx main context
  368. * @ring_desc: opaque pointer to the REO error ring descriptor
  369. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  370. * @head: head of the local descriptor free-list
  371. * @tail: tail of the local descriptor free-list
  372. * @quota: No. of units (packets) that can be serviced in one shot.
  373. *
  374. * This function implements PN error handling
  375. * If the peer is configured to ignore the PN check errors
  376. * or if DP feels, that this frame is still OK, the frame can be
  377. * re-injected back to REO to use some of the other features
  378. * of REO e.g. duplicate detection/routing to other cores
  379. *
  380. * Return: uint32_t: No. of elements processed
  381. */
  382. static uint32_t
  383. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  384. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  385. uint8_t *mac_id,
  386. uint32_t quota)
  387. {
  388. uint16_t peer_id;
  389. uint32_t rx_bufs_used = 0;
  390. struct dp_txrx_peer *txrx_peer;
  391. bool peer_pn_policy = false;
  392. dp_txrx_ref_handle txrx_ref_handle = NULL;
  393. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  394. mpdu_desc_info->peer_meta_data);
  395. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  396. &txrx_ref_handle,
  397. DP_MOD_ID_RX_ERR);
  398. if (qdf_likely(txrx_peer)) {
  399. /*
  400. * TODO: Check for peer specific policies & set peer_pn_policy
  401. */
  402. dp_err_rl("discard rx due to PN error for peer %pK",
  403. txrx_peer);
  404. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  405. }
  406. dp_rx_err_err("%pK: Packet received with PN error", soc);
  407. /* No peer PN policy -- definitely drop */
  408. if (!peer_pn_policy)
  409. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  410. mpdu_desc_info,
  411. mac_id, quota);
  412. return rx_bufs_used;
  413. }
  414. #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
  415. /**
  416. * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
  417. * @soc: Datapath soc handler
  418. * @peer: pointer to DP peer
  419. * @nbuf: pointer to the skb of RX frame
  420. * @frame_mask: the mask for special frame needed
  421. * @rx_tlv_hdr: start of rx tlv header
  422. *
  423. * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
  424. * single nbuf is expected.
  425. *
  426. * return: true - nbuf has been delivered to stack, false - not.
  427. */
  428. static bool
  429. dp_rx_deliver_oor_frame(struct dp_soc *soc,
  430. struct dp_txrx_peer *txrx_peer,
  431. qdf_nbuf_t nbuf, uint32_t frame_mask,
  432. uint8_t *rx_tlv_hdr)
  433. {
  434. uint32_t l2_hdr_offset = 0;
  435. uint16_t msdu_len = 0;
  436. uint32_t skip_len;
  437. l2_hdr_offset =
  438. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  439. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  440. skip_len = l2_hdr_offset;
  441. } else {
  442. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  443. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  444. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  445. }
  446. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  447. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  448. qdf_nbuf_pull_head(nbuf, skip_len);
  449. qdf_nbuf_set_exc_frame(nbuf, 1);
  450. dp_info_rl("OOR frame, mpdu sn 0x%x",
  451. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  452. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
  453. return true;
  454. }
  455. #else
  456. static bool
  457. dp_rx_deliver_oor_frame(struct dp_soc *soc,
  458. struct dp_txrx_peer *txrx_peer,
  459. qdf_nbuf_t nbuf, uint32_t frame_mask,
  460. uint8_t *rx_tlv_hdr)
  461. {
  462. return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
  463. rx_tlv_hdr);
  464. }
  465. #endif
  466. /**
  467. * dp_rx_oor_handle() - Handles the msdu which is OOR error
  468. *
  469. * @soc: core txrx main context
  470. * @nbuf: pointer to msdu skb
  471. * @peer_id: dp peer ID
  472. * @rx_tlv_hdr: start of rx tlv header
  473. *
  474. * This function process the msdu delivered from REO2TCL
  475. * ring with error type OOR
  476. *
  477. * Return: None
  478. */
  479. static void
  480. dp_rx_oor_handle(struct dp_soc *soc,
  481. qdf_nbuf_t nbuf,
  482. uint16_t peer_id,
  483. uint8_t *rx_tlv_hdr)
  484. {
  485. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  486. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  487. struct dp_txrx_peer *txrx_peer = NULL;
  488. dp_txrx_ref_handle txrx_ref_handle = NULL;
  489. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  490. &txrx_ref_handle,
  491. DP_MOD_ID_RX_ERR);
  492. if (!txrx_peer) {
  493. dp_info_rl("peer not found");
  494. goto free_nbuf;
  495. }
  496. if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
  497. rx_tlv_hdr)) {
  498. DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
  499. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  500. return;
  501. }
  502. free_nbuf:
  503. if (txrx_peer)
  504. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  505. DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
  506. dp_rx_nbuf_free(nbuf);
  507. }
  508. /**
  509. * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
  510. * is a monotonous increment of packet number
  511. * from the previous successfully re-ordered
  512. * frame.
  513. * @soc: Datapath SOC handle
  514. * @ring_desc: REO ring descriptor
  515. * @nbuf: Current packet
  516. *
  517. * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
  518. */
  519. static inline QDF_STATUS
  520. dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  521. qdf_nbuf_t nbuf)
  522. {
  523. uint64_t prev_pn, curr_pn[2];
  524. if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
  525. return QDF_STATUS_SUCCESS;
  526. hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
  527. hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
  528. if (curr_pn[0] > prev_pn)
  529. return QDF_STATUS_SUCCESS;
  530. return QDF_STATUS_E_FAILURE;
  531. }
  532. #ifdef WLAN_SKIP_BAR_UPDATE
  533. static
  534. void dp_rx_err_handle_bar(struct dp_soc *soc,
  535. struct dp_peer *peer,
  536. qdf_nbuf_t nbuf)
  537. {
  538. dp_info_rl("BAR update to H.W is skipped");
  539. DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
  540. }
  541. #else
  542. static
  543. void dp_rx_err_handle_bar(struct dp_soc *soc,
  544. struct dp_peer *peer,
  545. qdf_nbuf_t nbuf)
  546. {
  547. uint8_t *rx_tlv_hdr;
  548. unsigned char type, subtype;
  549. uint16_t start_seq_num;
  550. uint32_t tid;
  551. QDF_STATUS status;
  552. struct ieee80211_frame_bar *bar;
  553. /*
  554. * 1. Is this a BAR frame. If not Discard it.
  555. * 2. If it is, get the peer id, tid, ssn
  556. * 2a Do a tid update
  557. */
  558. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  559. bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
  560. type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
  561. subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
  562. if (!(type == IEEE80211_FC0_TYPE_CTL &&
  563. subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
  564. dp_err_rl("Not a BAR frame!");
  565. return;
  566. }
  567. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  568. qdf_assert_always(tid < DP_MAX_TIDS);
  569. start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
  570. dp_info_rl("tid %u window_size %u start_seq_num %u",
  571. tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
  572. status = dp_rx_tid_update_wifi3(peer, tid,
  573. peer->rx_tid[tid].ba_win_size,
  574. start_seq_num,
  575. true);
  576. if (status != QDF_STATUS_SUCCESS) {
  577. dp_err_rl("failed to handle bar frame update rx tid");
  578. DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
  579. } else {
  580. DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
  581. }
  582. }
  583. #endif
  584. /**
  585. * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
  586. * @soc: Datapath SoC handle
  587. * @nbuf: packet being processed
  588. * @mpdu_desc_info: mpdu desc info for the current packet
  589. * @tid: tid on which the packet arrived
  590. * @err_status: Flag to indicate if REO encountered an error while routing this
  591. * frame
  592. * @error_code: REO error code
  593. *
  594. * Return: None
  595. */
  596. static void
  597. _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  598. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  599. uint32_t tid, uint8_t err_status, uint32_t error_code)
  600. {
  601. uint16_t peer_id;
  602. struct dp_peer *peer;
  603. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  604. mpdu_desc_info->peer_meta_data);
  605. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  606. if (!peer)
  607. return;
  608. dp_info_rl("BAR frame: "
  609. " peer_id = %d"
  610. " tid = %u"
  611. " SSN = %d"
  612. " error status = %d",
  613. peer->peer_id,
  614. tid,
  615. mpdu_desc_info->mpdu_seq,
  616. err_status);
  617. if (err_status == HAL_REO_ERROR_DETECTED) {
  618. switch (error_code) {
  619. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  620. case HAL_REO_ERR_BAR_FRAME_OOR:
  621. dp_rx_err_handle_bar(soc, peer, nbuf);
  622. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  623. break;
  624. default:
  625. DP_STATS_INC(soc, rx.bar_frame, 1);
  626. }
  627. }
  628. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  629. }
  630. /**
  631. * dp_rx_bar_frame_handle() - Function to handle err BAR frames
  632. * @soc: core DP main context
  633. * @ring_desc: Hal ring desc
  634. * @rx_desc: dp rx desc
  635. * @mpdu_desc_info: mpdu desc info
  636. *
  637. * Handle the error BAR frames received. Ensure the SOC level
  638. * stats are updated based on the REO error code. The BAR frames
  639. * are further processed by updating the Rx tids with the start
  640. * sequence number (SSN) and BA window size. Desc is returned
  641. * to the free desc list
  642. *
  643. * Return: none
  644. */
  645. static void
  646. dp_rx_bar_frame_handle(struct dp_soc *soc,
  647. hal_ring_desc_t ring_desc,
  648. struct dp_rx_desc *rx_desc,
  649. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  650. uint8_t err_status,
  651. uint32_t err_code)
  652. {
  653. qdf_nbuf_t nbuf;
  654. struct dp_pdev *pdev;
  655. struct rx_desc_pool *rx_desc_pool;
  656. uint8_t *rx_tlv_hdr;
  657. uint32_t tid;
  658. nbuf = rx_desc->nbuf;
  659. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  660. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  661. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  662. rx_desc->unmapped = 1;
  663. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  664. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  665. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  666. rx_tlv_hdr);
  667. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  668. if (!pdev) {
  669. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  670. soc, rx_desc->pool_id);
  671. return;
  672. }
  673. _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
  674. err_code);
  675. dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
  676. QDF_TX_RX_STATUS_DROP, true);
  677. dp_rx_link_desc_return(soc, ring_desc,
  678. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  679. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  680. rx_desc->pool_id);
  681. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  682. &pdev->free_list_tail,
  683. rx_desc);
  684. }
  685. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  686. /**
  687. * dp_2k_jump_handle() - Function to handle 2k jump exception
  688. * on WBM ring
  689. *
  690. * @soc: core DP main context
  691. * @nbuf: buffer pointer
  692. * @rx_tlv_hdr: start of rx tlv header
  693. * @peer_id: peer id of first msdu
  694. * @tid: Tid for which exception occurred
  695. *
  696. * This function handles 2k jump violations arising out
  697. * of receiving aggregates in non BA case. This typically
  698. * may happen if aggregates are received on a QOS enabled TID
  699. * while Rx window size is still initialized to value of 2. Or
  700. * it may also happen if negotiated window size is 1 but peer
  701. * sends aggregates.
  702. *
  703. */
  704. void
  705. dp_2k_jump_handle(struct dp_soc *soc,
  706. qdf_nbuf_t nbuf,
  707. uint8_t *rx_tlv_hdr,
  708. uint16_t peer_id,
  709. uint8_t tid)
  710. {
  711. struct dp_peer *peer = NULL;
  712. struct dp_rx_tid *rx_tid = NULL;
  713. uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
  714. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  715. if (!peer) {
  716. dp_rx_err_info_rl("%pK: peer not found", soc);
  717. goto free_nbuf;
  718. }
  719. if (tid >= DP_MAX_TIDS) {
  720. dp_info_rl("invalid tid");
  721. goto nbuf_deliver;
  722. }
  723. rx_tid = &peer->rx_tid[tid];
  724. qdf_spin_lock_bh(&rx_tid->tid_lock);
  725. /* only if BA session is active, allow send Delba */
  726. if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  727. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  728. goto nbuf_deliver;
  729. }
  730. if (!rx_tid->delba_tx_status) {
  731. rx_tid->delba_tx_retry++;
  732. rx_tid->delba_tx_status = 1;
  733. rx_tid->delba_rcode =
  734. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  735. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  736. if (soc->cdp_soc.ol_ops->send_delba) {
  737. DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
  738. 1);
  739. soc->cdp_soc.ol_ops->send_delba(
  740. peer->vdev->pdev->soc->ctrl_psoc,
  741. peer->vdev->vdev_id,
  742. peer->mac_addr.raw,
  743. tid,
  744. rx_tid->delba_rcode,
  745. CDP_DELBA_2K_JUMP);
  746. }
  747. } else {
  748. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  749. }
  750. nbuf_deliver:
  751. if (dp_rx_deliver_special_frame(soc, peer->txrx_peer, nbuf, frame_mask,
  752. rx_tlv_hdr)) {
  753. DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
  754. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  755. return;
  756. }
  757. free_nbuf:
  758. if (peer)
  759. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  760. DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
  761. dp_rx_nbuf_free(nbuf);
  762. }
  763. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  764. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
  765. /**
  766. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  767. * @soc: pointer to dp_soc struct
  768. * @pool_id: Pool id to find dp_pdev
  769. * @rx_tlv_hdr: TLV header of received packet
  770. * @nbuf: SKB
  771. *
  772. * In certain types of packets if peer_id is not correct then
  773. * driver may not be able find. Try finding peer by addr_2 of
  774. * received MPDU. If you find the peer then most likely sw_peer_id &
  775. * ast_idx is corrupted.
  776. *
  777. * Return: True if you find the peer by addr_2 of received MPDU else false
  778. */
  779. static bool
  780. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  781. uint8_t pool_id,
  782. uint8_t *rx_tlv_hdr,
  783. qdf_nbuf_t nbuf)
  784. {
  785. struct dp_peer *peer = NULL;
  786. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  787. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  788. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  789. if (!pdev) {
  790. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  791. soc, pool_id);
  792. return false;
  793. }
  794. /*
  795. * WAR- In certain types of packets if peer_id is not correct then
  796. * driver may not be able find. Try finding peer by addr_2 of
  797. * received MPDU
  798. */
  799. if (wh)
  800. peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
  801. DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
  802. if (peer) {
  803. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  804. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  805. QDF_TRACE_LEVEL_DEBUG);
  806. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  807. 1, qdf_nbuf_len(nbuf));
  808. dp_rx_nbuf_free(nbuf);
  809. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  810. return true;
  811. }
  812. return false;
  813. }
  814. #else
  815. static inline bool
  816. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  817. uint8_t pool_id,
  818. uint8_t *rx_tlv_hdr,
  819. qdf_nbuf_t nbuf)
  820. {
  821. return false;
  822. }
  823. #endif
  824. /**
  825. * dp_rx_check_pkt_len() - Check for pktlen validity
  826. * @soc: DP SOC context
  827. * @pkt_len: computed length of the pkt from caller in bytes
  828. *
  829. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  830. *
  831. */
  832. static inline
  833. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  834. {
  835. if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
  836. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  837. 1, pkt_len);
  838. return true;
  839. } else {
  840. return false;
  841. }
  842. }
  843. /*
  844. * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
  845. * @soc: DP soc
  846. * @vdv: DP vdev handle
  847. * @txrx_peer: pointer to the txrx_peer object
  848. * @nbuf: skb list head
  849. * @tail: skb list tail
  850. * @is_eapol: eapol pkt check
  851. *
  852. * Return: None
  853. */
  854. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  855. static inline void
  856. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  857. struct dp_vdev *vdev,
  858. struct dp_txrx_peer *txrx_peer,
  859. qdf_nbuf_t nbuf,
  860. qdf_nbuf_t tail,
  861. bool is_eapol)
  862. {
  863. if (is_eapol && soc->eapol_over_control_port)
  864. dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  865. else
  866. dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  867. }
  868. #else
  869. static inline void
  870. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  871. struct dp_vdev *vdev,
  872. struct dp_txrx_peer *txrx_peer,
  873. qdf_nbuf_t nbuf,
  874. qdf_nbuf_t tail,
  875. bool is_eapol)
  876. {
  877. dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  878. }
  879. #endif
  880. #ifdef WLAN_FEATURE_11BE_MLO
  881. /*
  882. * dp_rx_err_match_dhost() - function to check whether dest-mac is correct
  883. * @eh: Ethernet header of incoming packet
  884. * @vdev: dp_vdev object of the VAP on which this data packet is received
  885. *
  886. * Return: 1 if the destination mac is correct,
  887. * 0 if this frame is not correctly destined to this VAP/MLD
  888. */
  889. int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
  890. {
  891. return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
  892. QDF_MAC_ADDR_SIZE) == 0) ||
  893. (qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
  894. QDF_MAC_ADDR_SIZE) == 0));
  895. }
  896. #else
  897. int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
  898. {
  899. return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
  900. QDF_MAC_ADDR_SIZE) == 0);
  901. }
  902. #endif
  903. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  904. /**
  905. * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
  906. * If so, drop the multicast frame.
  907. * @vdev: datapath vdev
  908. * @rx_tlv_hdr: TLV header
  909. *
  910. * Return: true if packet is to be dropped,
  911. * false, if packet is not dropped.
  912. */
  913. static bool
  914. dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
  915. {
  916. struct dp_soc *soc = vdev->pdev->soc;
  917. if (!vdev->drop_3addr_mcast)
  918. return false;
  919. if (vdev->opmode != wlan_op_mode_sta)
  920. return false;
  921. if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  922. return true;
  923. return false;
  924. }
  925. /**
  926. * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
  927. * for this frame received in REO error ring.
  928. * @soc: Datapath SOC handle
  929. * @error: REO error detected or not
  930. * @error_code: Error code in case of REO error
  931. *
  932. * Return: true if pn check if needed in software,
  933. * false, if pn check if not needed.
  934. */
  935. static inline bool
  936. dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
  937. uint32_t error_code)
  938. {
  939. return (soc->features.pn_in_reo_dest &&
  940. (error == HAL_REO_ERROR_DETECTED &&
  941. (hal_rx_reo_is_2k_jump(error_code) ||
  942. hal_rx_reo_is_oor_error(error_code) ||
  943. hal_rx_reo_is_bar_oor_2k_jump(error_code))));
  944. }
  945. /**
  946. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  947. * descriptor violation on either a
  948. * REO or WBM ring
  949. *
  950. * @soc: core DP main context
  951. * @nbuf: buffer pointer
  952. * @rx_tlv_hdr: start of rx tlv header
  953. * @pool_id: mac id
  954. * @txrx_peer: txrx peer handle
  955. *
  956. * This function handles NULL queue descriptor violations arising out
  957. * a missing REO queue for a given peer or a given TID. This typically
  958. * may happen if a packet is received on a QOS enabled TID before the
  959. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  960. * it may also happen for MC/BC frames if they are not routed to the
  961. * non-QOS TID queue, in the absence of any other default TID queue.
  962. * This error can show up both in a REO destination or WBM release ring.
  963. *
  964. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  965. * if nbuf could not be handled or dropped.
  966. */
  967. static QDF_STATUS
  968. dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  969. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  970. struct dp_txrx_peer *txrx_peer)
  971. {
  972. uint32_t pkt_len;
  973. uint16_t msdu_len;
  974. struct dp_vdev *vdev;
  975. uint8_t tid;
  976. qdf_ether_header_t *eh;
  977. struct hal_rx_msdu_metadata msdu_metadata;
  978. uint16_t sa_idx = 0;
  979. bool is_eapol = 0;
  980. bool enh_flag;
  981. qdf_nbuf_set_rx_chfrag_start(nbuf,
  982. hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  983. rx_tlv_hdr));
  984. qdf_nbuf_set_rx_chfrag_end(nbuf,
  985. hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
  986. rx_tlv_hdr));
  987. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  988. rx_tlv_hdr));
  989. qdf_nbuf_set_da_valid(nbuf,
  990. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  991. rx_tlv_hdr));
  992. qdf_nbuf_set_sa_valid(nbuf,
  993. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  994. rx_tlv_hdr));
  995. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  996. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  997. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
  998. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  999. if (dp_rx_check_pkt_len(soc, pkt_len))
  1000. goto drop_nbuf;
  1001. /* Set length in nbuf */
  1002. qdf_nbuf_set_pktlen(
  1003. nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
  1004. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  1005. }
  1006. /*
  1007. * Check if DMA completed -- msdu_done is the last bit
  1008. * to be written
  1009. */
  1010. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1011. dp_err_rl("MSDU DONE failure");
  1012. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1013. QDF_TRACE_LEVEL_INFO);
  1014. qdf_assert(0);
  1015. }
  1016. if (!txrx_peer &&
  1017. dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
  1018. rx_tlv_hdr, nbuf))
  1019. return QDF_STATUS_E_FAILURE;
  1020. if (!txrx_peer) {
  1021. bool mpdu_done = false;
  1022. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1023. if (!pdev) {
  1024. dp_err_rl("pdev is null for pool_id = %d", pool_id);
  1025. return QDF_STATUS_E_FAILURE;
  1026. }
  1027. dp_err_rl("txrx_peer is NULL");
  1028. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1029. qdf_nbuf_len(nbuf));
  1030. /* QCN9000 has the support enabled */
  1031. if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
  1032. mpdu_done = true;
  1033. nbuf->next = NULL;
  1034. /* Trigger invalid peer handler wrapper */
  1035. dp_rx_process_invalid_peer_wrapper(soc,
  1036. nbuf, mpdu_done, pool_id);
  1037. } else {
  1038. mpdu_done = soc->arch_ops.dp_rx_chain_msdus(soc, nbuf,
  1039. rx_tlv_hdr,
  1040. pool_id);
  1041. /* Trigger invalid peer handler wrapper */
  1042. dp_rx_process_invalid_peer_wrapper(soc,
  1043. pdev->invalid_peer_head_msdu,
  1044. mpdu_done, pool_id);
  1045. }
  1046. if (mpdu_done) {
  1047. pdev->invalid_peer_head_msdu = NULL;
  1048. pdev->invalid_peer_tail_msdu = NULL;
  1049. }
  1050. return QDF_STATUS_E_FAILURE;
  1051. }
  1052. vdev = txrx_peer->vdev;
  1053. if (!vdev) {
  1054. dp_err_rl("Null vdev!");
  1055. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1056. goto drop_nbuf;
  1057. }
  1058. /*
  1059. * Advance the packet start pointer by total size of
  1060. * pre-header TLV's
  1061. */
  1062. if (qdf_nbuf_is_frag(nbuf))
  1063. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1064. else
  1065. qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
  1066. soc->rx_pkt_tlv_size));
  1067. DP_STATS_INC_PKT(vdev, rx_i.null_q_desc_pkt, 1, qdf_nbuf_len(nbuf));
  1068. dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
  1069. if (dp_rx_err_drop_3addr_mcast(vdev, rx_tlv_hdr)) {
  1070. DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.mcast_3addr_drop, 1);
  1071. goto drop_nbuf;
  1072. }
  1073. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  1074. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  1075. if ((sa_idx < 0) ||
  1076. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  1077. DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
  1078. goto drop_nbuf;
  1079. }
  1080. }
  1081. if ((!soc->mec_fw_offload) &&
  1082. dp_rx_mcast_echo_check(soc, txrx_peer, rx_tlv_hdr, nbuf)) {
  1083. /* this is a looped back MCBC pkt, drop it */
  1084. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
  1085. qdf_nbuf_len(nbuf));
  1086. goto drop_nbuf;
  1087. }
  1088. /*
  1089. * In qwrap mode if the received packet matches with any of the vdev
  1090. * mac addresses, drop it. Donot receive multicast packets originated
  1091. * from any proxysta.
  1092. */
  1093. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  1094. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.mec_drop, 1,
  1095. qdf_nbuf_len(nbuf));
  1096. goto drop_nbuf;
  1097. }
  1098. if (qdf_unlikely(txrx_peer->nawds_enabled &&
  1099. hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1100. rx_tlv_hdr))) {
  1101. dp_err_rl("free buffer for multicast packet");
  1102. DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.nawds_mcast_drop, 1);
  1103. goto drop_nbuf;
  1104. }
  1105. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
  1106. dp_err_rl("mcast Policy Check Drop pkt");
  1107. DP_PEER_PER_PKT_STATS_INC(txrx_peer, rx.policy_check_drop, 1);
  1108. goto drop_nbuf;
  1109. }
  1110. /* WDS Source Port Learning */
  1111. if (!soc->ast_offload_support &&
  1112. qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  1113. vdev->wds_enabled))
  1114. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, txrx_peer, nbuf,
  1115. msdu_metadata);
  1116. if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
  1117. struct dp_peer *peer;
  1118. struct dp_rx_tid *rx_tid;
  1119. tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
  1120. peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
  1121. DP_MOD_ID_RX_ERR);
  1122. if (peer) {
  1123. rx_tid = &peer->rx_tid[tid];
  1124. qdf_spin_lock_bh(&rx_tid->tid_lock);
  1125. if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
  1126. dp_rx_tid_setup_wifi3(peer, tid, 1,
  1127. IEEE80211_SEQ_MAX);
  1128. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1129. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  1130. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  1131. }
  1132. }
  1133. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1134. if (!txrx_peer->authorize) {
  1135. is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
  1136. qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
  1137. if (is_eapol) {
  1138. if (!dp_rx_err_match_dhost(eh, vdev))
  1139. goto drop_nbuf;
  1140. } else {
  1141. goto drop_nbuf;
  1142. }
  1143. }
  1144. /*
  1145. * Drop packets in this path if cce_match is found. Packets will come
  1146. * in following path depending on whether tidQ is setup.
  1147. * 1. If tidQ is setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE and
  1148. * cce_match = 1
  1149. * Packets with WIFILI_HAL_RX_WBM_REO_PSH_RSN_ROUTE are already
  1150. * dropped.
  1151. * 2. If tidQ is not setup: WIFILI_HAL_RX_WBM_REO_PSH_RSN_ERROR and
  1152. * cce_match = 1
  1153. * These packets need to be dropped and should not get delivered
  1154. * to stack.
  1155. */
  1156. if (qdf_unlikely(dp_rx_err_cce_drop(soc, vdev, nbuf, rx_tlv_hdr))) {
  1157. goto drop_nbuf;
  1158. }
  1159. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  1160. qdf_nbuf_set_next(nbuf, NULL);
  1161. dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
  1162. } else {
  1163. enh_flag = vdev->pdev->enhanced_stats_en;
  1164. qdf_nbuf_set_next(nbuf, NULL);
  1165. DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
  1166. enh_flag);
  1167. /*
  1168. * Update the protocol tag in SKB based on
  1169. * CCE metadata
  1170. */
  1171. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1172. EXCEPTION_DEST_RING_ID,
  1173. true, true);
  1174. /* Update the flow tag in SKB based on FSE metadata */
  1175. dp_rx_update_flow_tag(soc, vdev, nbuf,
  1176. rx_tlv_hdr, true);
  1177. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
  1178. soc->hal_soc, rx_tlv_hdr) &&
  1179. (vdev->rx_decap_type ==
  1180. htt_cmn_pkt_type_ethernet))) {
  1181. DP_PEER_MC_INCC_PKT(txrx_peer, 1, qdf_nbuf_len(nbuf),
  1182. enh_flag);
  1183. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
  1184. DP_PEER_BC_INCC_PKT(txrx_peer, 1,
  1185. qdf_nbuf_len(nbuf),
  1186. enh_flag);
  1187. }
  1188. qdf_nbuf_set_exc_frame(nbuf, 1);
  1189. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
  1190. is_eapol);
  1191. }
  1192. return QDF_STATUS_SUCCESS;
  1193. drop_nbuf:
  1194. dp_rx_nbuf_free(nbuf);
  1195. return QDF_STATUS_E_FAILURE;
  1196. }
  1197. /**
  1198. * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
  1199. *
  1200. * @soc: core txrx main context
  1201. * @ring_desc: opaque pointer to the REO error ring descriptor
  1202. * @mpdu_desc_info: pointer to mpdu level description info
  1203. * @link_desc_va: pointer to msdu_link_desc virtual address
  1204. * @err_code: reo error code fetched from ring entry
  1205. *
  1206. * Function to handle msdus fetched from msdu link desc, currently
  1207. * support REO error NULL queue, 2K jump, OOR.
  1208. *
  1209. * Return: msdu count processed
  1210. */
  1211. static uint32_t
  1212. dp_rx_reo_err_entry_process(struct dp_soc *soc,
  1213. void *ring_desc,
  1214. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  1215. void *link_desc_va,
  1216. enum hal_reo_error_code err_code)
  1217. {
  1218. uint32_t rx_bufs_used = 0;
  1219. struct dp_pdev *pdev;
  1220. int i;
  1221. uint8_t *rx_tlv_hdr_first;
  1222. uint8_t *rx_tlv_hdr_last;
  1223. uint32_t tid = DP_MAX_TIDS;
  1224. uint16_t peer_id;
  1225. struct dp_rx_desc *rx_desc;
  1226. struct rx_desc_pool *rx_desc_pool;
  1227. qdf_nbuf_t nbuf;
  1228. struct hal_buf_info buf_info;
  1229. struct hal_rx_msdu_list msdu_list;
  1230. uint16_t num_msdus;
  1231. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  1232. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  1233. /* First field in REO Dst ring Desc is buffer_addr_info */
  1234. void *buf_addr_info = ring_desc;
  1235. qdf_nbuf_t head_nbuf = NULL;
  1236. qdf_nbuf_t tail_nbuf = NULL;
  1237. uint16_t msdu_processed = 0;
  1238. QDF_STATUS status;
  1239. bool ret, is_pn_check_needed;
  1240. uint8_t rx_desc_pool_id;
  1241. struct dp_txrx_peer *txrx_peer = NULL;
  1242. dp_txrx_ref_handle txrx_ref_handle = NULL;
  1243. hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  1244. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  1245. mpdu_desc_info->peer_meta_data);
  1246. is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
  1247. HAL_REO_ERROR_DETECTED,
  1248. err_code);
  1249. more_msdu_link_desc:
  1250. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1251. &num_msdus);
  1252. for (i = 0; i < num_msdus; i++) {
  1253. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  1254. soc,
  1255. msdu_list.sw_cookie[i]);
  1256. qdf_assert_always(rx_desc);
  1257. nbuf = rx_desc->nbuf;
  1258. /*
  1259. * this is a unlikely scenario where the host is reaping
  1260. * a descriptor which it already reaped just a while ago
  1261. * but is yet to replenish it back to HW.
  1262. * In this case host will dump the last 128 descriptors
  1263. * including the software descriptor rx_desc and assert.
  1264. */
  1265. if (qdf_unlikely(!rx_desc->in_use) ||
  1266. qdf_unlikely(!nbuf)) {
  1267. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  1268. dp_info_rl("Reaping rx_desc not in use!");
  1269. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1270. ring_desc, rx_desc);
  1271. /* ignore duplicate RX desc and continue to process */
  1272. /* Pop out the descriptor */
  1273. continue;
  1274. }
  1275. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  1276. msdu_list.paddr[i]);
  1277. if (!ret) {
  1278. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1279. rx_desc->in_err_state = 1;
  1280. continue;
  1281. }
  1282. rx_desc_pool_id = rx_desc->pool_id;
  1283. /* all buffers from a MSDU link belong to same pdev */
  1284. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
  1285. rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
  1286. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  1287. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  1288. rx_desc->unmapped = 1;
  1289. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  1290. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
  1291. rx_bufs_used++;
  1292. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  1293. &pdev->free_list_tail, rx_desc);
  1294. DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
  1295. if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
  1296. HAL_MSDU_F_MSDU_CONTINUATION))
  1297. continue;
  1298. if (dp_rx_buffer_pool_refill(soc, head_nbuf,
  1299. rx_desc_pool_id)) {
  1300. /* MSDU queued back to the pool */
  1301. goto process_next_msdu;
  1302. }
  1303. if (is_pn_check_needed) {
  1304. if (msdu_list.msdu_info[i].msdu_flags &
  1305. HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
  1306. hal_rx_tlv_populate_mpdu_desc_info(
  1307. soc->hal_soc,
  1308. qdf_nbuf_data(nbuf),
  1309. mpdu_desc_info);
  1310. } else {
  1311. /*
  1312. * DO NOTHING -
  1313. * Continue using the same mpdu_desc_info
  1314. * details populated from the first msdu in
  1315. * the mpdu.
  1316. */
  1317. }
  1318. status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
  1319. if (QDF_IS_STATUS_ERROR(status)) {
  1320. DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
  1321. 1);
  1322. dp_rx_nbuf_free(nbuf);
  1323. goto process_next_msdu;
  1324. }
  1325. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  1326. mpdu_desc_info->peer_meta_data);
  1327. if (mpdu_desc_info->bar_frame)
  1328. _dp_rx_bar_frame_handle(soc, nbuf,
  1329. mpdu_desc_info, tid,
  1330. HAL_REO_ERROR_DETECTED,
  1331. err_code);
  1332. }
  1333. if (qdf_unlikely(mpdu_desc_info->mpdu_flags &
  1334. HAL_MPDU_F_RAW_AMPDU)) {
  1335. dp_err_rl("RAW ampdu in REO error not expected");
  1336. DP_STATS_INC(soc, rx.err.reo_err_raw_mpdu_drop, 1);
  1337. qdf_nbuf_list_free(head_nbuf);
  1338. goto process_next_msdu;
  1339. }
  1340. rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
  1341. rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
  1342. if (qdf_unlikely(head_nbuf != tail_nbuf)) {
  1343. nbuf = dp_rx_sg_create(soc, head_nbuf);
  1344. qdf_nbuf_set_is_frag(nbuf, 1);
  1345. DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
  1346. }
  1347. switch (err_code) {
  1348. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1349. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  1350. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1351. /*
  1352. * only first msdu, mpdu start description tlv valid?
  1353. * and use it for following msdu.
  1354. */
  1355. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1356. rx_tlv_hdr_last))
  1357. tid = hal_rx_mpdu_start_tid_get(
  1358. soc->hal_soc,
  1359. rx_tlv_hdr_first);
  1360. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
  1361. peer_id, tid);
  1362. break;
  1363. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  1364. case HAL_REO_ERR_BAR_FRAME_OOR:
  1365. dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
  1366. break;
  1367. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1368. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
  1369. soc, peer_id,
  1370. &txrx_ref_handle,
  1371. DP_MOD_ID_RX_ERR);
  1372. if (!txrx_peer)
  1373. dp_info_rl("txrx_peer is null peer_id %u",
  1374. peer_id);
  1375. dp_rx_null_q_desc_handle(soc, nbuf, rx_tlv_hdr_last,
  1376. rx_desc_pool_id, txrx_peer);
  1377. if (txrx_peer)
  1378. dp_txrx_peer_unref_delete(txrx_ref_handle,
  1379. DP_MOD_ID_RX_ERR);
  1380. break;
  1381. default:
  1382. dp_err_rl("Non-support error code %d", err_code);
  1383. dp_rx_nbuf_free(nbuf);
  1384. }
  1385. process_next_msdu:
  1386. msdu_processed++;
  1387. head_nbuf = NULL;
  1388. tail_nbuf = NULL;
  1389. }
  1390. /*
  1391. * If the msdu's are spread across multiple link-descriptors,
  1392. * we cannot depend solely on the msdu_count(e.g., if msdu is
  1393. * spread across multiple buffers).Hence, it is
  1394. * necessary to check the next link_descriptor and release
  1395. * all the msdu's that are part of it.
  1396. */
  1397. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  1398. link_desc_va,
  1399. &next_link_desc_addr_info);
  1400. if (hal_rx_is_buf_addr_info_valid(
  1401. &next_link_desc_addr_info)) {
  1402. /* Clear the next link desc info for the current link_desc */
  1403. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  1404. dp_rx_link_desc_return_by_addr(
  1405. soc,
  1406. buf_addr_info,
  1407. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1408. hal_rx_buffer_addr_info_get_paddr(
  1409. &next_link_desc_addr_info,
  1410. &buf_info);
  1411. /* buffer_addr_info is the first element of ring_desc */
  1412. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  1413. (uint32_t *)&next_link_desc_addr_info,
  1414. &buf_info);
  1415. link_desc_va =
  1416. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1417. cur_link_desc_addr_info = next_link_desc_addr_info;
  1418. buf_addr_info = &cur_link_desc_addr_info;
  1419. goto more_msdu_link_desc;
  1420. }
  1421. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  1422. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1423. if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
  1424. DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
  1425. return rx_bufs_used;
  1426. }
  1427. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1428. /**
  1429. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  1430. * frames to OS or wifi parse errors.
  1431. * @soc: core DP main context
  1432. * @nbuf: buffer pointer
  1433. * @rx_tlv_hdr: start of rx tlv header
  1434. * @txrx_peer: peer reference
  1435. * @err_code: rxdma err code
  1436. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1437. * pool_id has same mapping)
  1438. *
  1439. * Return: None
  1440. */
  1441. void
  1442. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1443. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  1444. uint8_t err_code, uint8_t mac_id)
  1445. {
  1446. uint32_t pkt_len, l2_hdr_offset;
  1447. uint16_t msdu_len;
  1448. struct dp_vdev *vdev;
  1449. qdf_ether_header_t *eh;
  1450. bool is_broadcast;
  1451. /*
  1452. * Check if DMA completed -- msdu_done is the last bit
  1453. * to be written
  1454. */
  1455. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1456. dp_err_rl("MSDU DONE failure");
  1457. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1458. QDF_TRACE_LEVEL_INFO);
  1459. qdf_assert(0);
  1460. }
  1461. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  1462. rx_tlv_hdr);
  1463. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  1464. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  1465. if (dp_rx_check_pkt_len(soc, pkt_len)) {
  1466. /* Drop & free packet */
  1467. dp_rx_nbuf_free(nbuf);
  1468. return;
  1469. }
  1470. /* Set length in nbuf */
  1471. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1472. qdf_nbuf_set_next(nbuf, NULL);
  1473. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1474. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  1475. if (!txrx_peer) {
  1476. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
  1477. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1478. qdf_nbuf_len(nbuf));
  1479. /* Trigger invalid peer handler wrapper */
  1480. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  1481. return;
  1482. }
  1483. vdev = txrx_peer->vdev;
  1484. if (!vdev) {
  1485. dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
  1486. vdev);
  1487. /* Drop & free packet */
  1488. dp_rx_nbuf_free(nbuf);
  1489. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1490. return;
  1491. }
  1492. /*
  1493. * Advance the packet start pointer by total size of
  1494. * pre-header TLV's
  1495. */
  1496. dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
  1497. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  1498. uint8_t *pkt_type;
  1499. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  1500. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  1501. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  1502. htons(QDF_LLC_STP)) {
  1503. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  1504. goto process_mesh;
  1505. } else {
  1506. goto process_rx;
  1507. }
  1508. }
  1509. }
  1510. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  1511. goto process_mesh;
  1512. /*
  1513. * WAPI cert AP sends rekey frames as unencrypted.
  1514. * Thus RXDMA will report unencrypted frame error.
  1515. * To pass WAPI cert case, SW needs to pass unencrypted
  1516. * rekey frame to stack.
  1517. */
  1518. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1519. goto process_rx;
  1520. }
  1521. /*
  1522. * In dynamic WEP case rekey frames are not encrypted
  1523. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  1524. * key install is already done
  1525. */
  1526. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  1527. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  1528. goto process_rx;
  1529. process_mesh:
  1530. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  1531. dp_rx_nbuf_free(nbuf);
  1532. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1533. return;
  1534. }
  1535. if (vdev->mesh_vdev) {
  1536. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  1537. == QDF_STATUS_SUCCESS) {
  1538. dp_rx_err_info("%pK: mesh pkt filtered", soc);
  1539. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  1540. dp_rx_nbuf_free(nbuf);
  1541. return;
  1542. }
  1543. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
  1544. }
  1545. process_rx:
  1546. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1547. rx_tlv_hdr) &&
  1548. (vdev->rx_decap_type ==
  1549. htt_cmn_pkt_type_ethernet))) {
  1550. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1551. is_broadcast = (QDF_IS_ADDR_BROADCAST
  1552. (eh->ether_dhost)) ? 1 : 0 ;
  1553. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
  1554. qdf_nbuf_len(nbuf));
  1555. if (is_broadcast) {
  1556. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
  1557. qdf_nbuf_len(nbuf));
  1558. }
  1559. }
  1560. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  1561. dp_rx_deliver_raw(vdev, nbuf, txrx_peer);
  1562. } else {
  1563. /* Update the protocol tag in SKB based on CCE metadata */
  1564. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1565. EXCEPTION_DEST_RING_ID, true, true);
  1566. /* Update the flow tag in SKB based on FSE metadata */
  1567. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  1568. DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
  1569. qdf_nbuf_set_exc_frame(nbuf, 1);
  1570. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
  1571. qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
  1572. }
  1573. return;
  1574. }
  1575. /**
  1576. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  1577. * @soc: core DP main context
  1578. * @nbuf: buffer pointer
  1579. * @rx_tlv_hdr: start of rx tlv header
  1580. * @txrx_peer: txrx peer handle
  1581. *
  1582. * return: void
  1583. */
  1584. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1585. uint8_t *rx_tlv_hdr,
  1586. struct dp_txrx_peer *txrx_peer)
  1587. {
  1588. struct dp_vdev *vdev = NULL;
  1589. struct dp_pdev *pdev = NULL;
  1590. struct ol_if_ops *tops = NULL;
  1591. uint16_t rx_seq, fragno;
  1592. uint8_t is_raw;
  1593. unsigned int tid;
  1594. QDF_STATUS status;
  1595. struct cdp_rx_mic_err_info mic_failure_info;
  1596. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1597. rx_tlv_hdr))
  1598. return;
  1599. if (!txrx_peer) {
  1600. dp_info_rl("txrx_peer not found");
  1601. goto fail;
  1602. }
  1603. vdev = txrx_peer->vdev;
  1604. if (!vdev) {
  1605. dp_info_rl("VDEV not found");
  1606. goto fail;
  1607. }
  1608. pdev = vdev->pdev;
  1609. if (!pdev) {
  1610. dp_info_rl("PDEV not found");
  1611. goto fail;
  1612. }
  1613. is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
  1614. if (is_raw) {
  1615. fragno = dp_rx_frag_get_mpdu_frag_number(soc,
  1616. qdf_nbuf_data(nbuf));
  1617. /* Can get only last fragment */
  1618. if (fragno) {
  1619. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  1620. qdf_nbuf_data(nbuf));
  1621. rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
  1622. qdf_nbuf_data(nbuf));
  1623. status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
  1624. tid, rx_seq, nbuf);
  1625. dp_info_rl("Frag pkt seq# %d frag# %d consumed "
  1626. "status %d !", rx_seq, fragno, status);
  1627. return;
  1628. }
  1629. }
  1630. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  1631. &mic_failure_info.da_mac_addr.bytes[0])) {
  1632. dp_err_rl("Failed to get da_mac_addr");
  1633. goto fail;
  1634. }
  1635. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  1636. &mic_failure_info.ta_mac_addr.bytes[0])) {
  1637. dp_err_rl("Failed to get ta_mac_addr");
  1638. goto fail;
  1639. }
  1640. mic_failure_info.key_id = 0;
  1641. mic_failure_info.multicast =
  1642. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  1643. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  1644. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  1645. mic_failure_info.data = NULL;
  1646. mic_failure_info.vdev_id = vdev->vdev_id;
  1647. tops = pdev->soc->cdp_soc.ol_ops;
  1648. if (tops->rx_mic_error)
  1649. tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
  1650. &mic_failure_info);
  1651. fail:
  1652. dp_rx_nbuf_free(nbuf);
  1653. return;
  1654. }
  1655. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1656. defined(WLAN_MCAST_MLO)
  1657. static bool dp_rx_igmp_handler(struct dp_soc *soc,
  1658. struct dp_vdev *vdev,
  1659. struct dp_txrx_peer *peer,
  1660. qdf_nbuf_t nbuf)
  1661. {
  1662. if (soc->arch_ops.dp_rx_mcast_handler) {
  1663. if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer, nbuf))
  1664. return true;
  1665. }
  1666. return false;
  1667. }
  1668. #else
  1669. static bool dp_rx_igmp_handler(struct dp_soc *soc,
  1670. struct dp_vdev *vdev,
  1671. struct dp_txrx_peer *peer,
  1672. qdf_nbuf_t nbuf)
  1673. {
  1674. return false;
  1675. }
  1676. #endif
  1677. /**
  1678. * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
  1679. * Free any other packet which comes in
  1680. * this path.
  1681. *
  1682. * @soc: core DP main context
  1683. * @nbuf: buffer pointer
  1684. * @txrx_peer: txrx peer handle
  1685. * @rx_tlv_hdr: start of rx tlv header
  1686. * @err_src: rxdma/reo
  1687. *
  1688. * This function indicates EAPOL frame received in wbm error ring to stack.
  1689. * Any other frame should be dropped.
  1690. *
  1691. * Return: SUCCESS if delivered to stack
  1692. */
  1693. static void
  1694. dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1695. struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
  1696. enum hal_rx_wbm_error_source err_src)
  1697. {
  1698. uint32_t pkt_len;
  1699. uint16_t msdu_len;
  1700. struct dp_vdev *vdev;
  1701. struct hal_rx_msdu_metadata msdu_metadata;
  1702. bool is_eapol;
  1703. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  1704. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  1705. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
  1706. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  1707. if (dp_rx_check_pkt_len(soc, pkt_len))
  1708. goto drop_nbuf;
  1709. /* Set length in nbuf */
  1710. qdf_nbuf_set_pktlen(
  1711. nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
  1712. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  1713. }
  1714. /*
  1715. * Check if DMA completed -- msdu_done is the last bit
  1716. * to be written
  1717. */
  1718. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1719. dp_err_rl("MSDU DONE failure");
  1720. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1721. QDF_TRACE_LEVEL_INFO);
  1722. qdf_assert(0);
  1723. }
  1724. if (!txrx_peer)
  1725. goto drop_nbuf;
  1726. vdev = txrx_peer->vdev;
  1727. if (!vdev) {
  1728. dp_err_rl("Null vdev!");
  1729. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1730. goto drop_nbuf;
  1731. }
  1732. /*
  1733. * Advance the packet start pointer by total size of
  1734. * pre-header TLV's
  1735. */
  1736. if (qdf_nbuf_is_frag(nbuf))
  1737. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1738. else
  1739. qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
  1740. soc->rx_pkt_tlv_size));
  1741. if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf))
  1742. return;
  1743. dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
  1744. /*
  1745. * Indicate EAPOL frame to stack only when vap mac address
  1746. * matches the destination address.
  1747. */
  1748. is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
  1749. if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1750. qdf_ether_header_t *eh =
  1751. (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1752. if (dp_rx_err_match_dhost(eh, vdev)) {
  1753. DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
  1754. qdf_nbuf_len(nbuf));
  1755. /*
  1756. * Update the protocol tag in SKB based on
  1757. * CCE metadata.
  1758. */
  1759. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1760. EXCEPTION_DEST_RING_ID,
  1761. true, true);
  1762. /* Update the flow tag in SKB based on FSE metadata */
  1763. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1764. true);
  1765. DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
  1766. qdf_nbuf_len(nbuf),
  1767. vdev->pdev->enhanced_stats_en);
  1768. qdf_nbuf_set_exc_frame(nbuf, 1);
  1769. qdf_nbuf_set_next(nbuf, NULL);
  1770. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
  1771. NULL, is_eapol);
  1772. return;
  1773. }
  1774. }
  1775. drop_nbuf:
  1776. DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
  1777. err_src == HAL_RX_WBM_ERR_SRC_REO);
  1778. DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
  1779. err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
  1780. dp_rx_nbuf_free(nbuf);
  1781. }
  1782. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1783. #ifdef DP_RX_DESC_COOKIE_INVALIDATE
  1784. /**
  1785. * dp_rx_link_cookie_check() - Validate link desc cookie
  1786. * @ring_desc: ring descriptor
  1787. *
  1788. * Return: qdf status
  1789. */
  1790. static inline QDF_STATUS
  1791. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1792. {
  1793. if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
  1794. return QDF_STATUS_E_FAILURE;
  1795. return QDF_STATUS_SUCCESS;
  1796. }
  1797. /**
  1798. * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
  1799. * @ring_desc: ring descriptor
  1800. *
  1801. * Return: None
  1802. */
  1803. static inline void
  1804. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1805. {
  1806. HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
  1807. }
  1808. #else
  1809. static inline QDF_STATUS
  1810. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1811. {
  1812. return QDF_STATUS_SUCCESS;
  1813. }
  1814. static inline void
  1815. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1816. {
  1817. }
  1818. #endif
  1819. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  1820. /**
  1821. * dp_rx_err_ring_record_entry() - Record rx err ring history
  1822. * @soc: Datapath soc structure
  1823. * @paddr: paddr of the buffer in RX err ring
  1824. * @sw_cookie: SW cookie of the buffer in RX err ring
  1825. * @rbm: Return buffer manager of the buffer in RX err ring
  1826. *
  1827. * Returns: None
  1828. */
  1829. static inline void
  1830. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1831. uint32_t sw_cookie, uint8_t rbm)
  1832. {
  1833. struct dp_buf_info_record *record;
  1834. uint32_t idx;
  1835. if (qdf_unlikely(!soc->rx_err_ring_history))
  1836. return;
  1837. idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
  1838. DP_RX_ERR_HIST_MAX);
  1839. /* No NULL check needed for record since its an array */
  1840. record = &soc->rx_err_ring_history->entry[idx];
  1841. record->timestamp = qdf_get_log_timestamp();
  1842. record->hbi.paddr = paddr;
  1843. record->hbi.sw_cookie = sw_cookie;
  1844. record->hbi.rbm = rbm;
  1845. }
  1846. #else
  1847. static inline void
  1848. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1849. uint32_t sw_cookie, uint8_t rbm)
  1850. {
  1851. }
  1852. #endif
  1853. #ifdef HANDLE_RX_REROUTE_ERR
  1854. static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
  1855. hal_ring_desc_t ring_desc)
  1856. {
  1857. int lmac_id = DP_INVALID_LMAC_ID;
  1858. struct dp_rx_desc *rx_desc;
  1859. struct hal_buf_info hbi;
  1860. struct dp_pdev *pdev;
  1861. struct rx_desc_pool *rx_desc_pool;
  1862. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1863. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
  1864. /* sanity */
  1865. if (!rx_desc) {
  1866. DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
  1867. goto assert_return;
  1868. }
  1869. if (!rx_desc->nbuf)
  1870. goto assert_return;
  1871. dp_rx_err_ring_record_entry(soc, hbi.paddr,
  1872. hbi.sw_cookie,
  1873. hal_rx_ret_buf_manager_get(soc->hal_soc,
  1874. ring_desc));
  1875. if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
  1876. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1877. rx_desc->in_err_state = 1;
  1878. goto assert_return;
  1879. }
  1880. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  1881. /* After this point the rx_desc and nbuf are valid */
  1882. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  1883. qdf_assert_always(!rx_desc->unmapped);
  1884. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
  1885. rx_desc->unmapped = 1;
  1886. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  1887. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  1888. rx_desc->pool_id);
  1889. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  1890. lmac_id = rx_desc->pool_id;
  1891. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  1892. &pdev->free_list_tail,
  1893. rx_desc);
  1894. return lmac_id;
  1895. assert_return:
  1896. qdf_assert(0);
  1897. return lmac_id;
  1898. }
  1899. static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
  1900. {
  1901. int ret;
  1902. uint64_t cur_time_stamp;
  1903. DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
  1904. /* Recover if overall error count exceeds threshold */
  1905. if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
  1906. DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
  1907. dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
  1908. soc->stats.rx.err.reo_err_msdu_buf_rcved,
  1909. soc->rx_route_err_start_pkt_ts);
  1910. qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
  1911. }
  1912. cur_time_stamp = qdf_get_log_timestamp_usecs();
  1913. if (!soc->rx_route_err_start_pkt_ts)
  1914. soc->rx_route_err_start_pkt_ts = cur_time_stamp;
  1915. /* Recover if threshold number of packets received in threshold time */
  1916. if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
  1917. DP_RX_ERR_ROUTE_TIMEOUT_US) {
  1918. soc->rx_route_err_start_pkt_ts = cur_time_stamp;
  1919. if (soc->rx_route_err_in_window >
  1920. DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
  1921. qdf_trigger_self_recovery(NULL,
  1922. QDF_RX_REG_PKT_ROUTE_ERR);
  1923. dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
  1924. soc->stats.rx.err.reo_err_msdu_buf_rcved,
  1925. soc->rx_route_err_start_pkt_ts);
  1926. } else {
  1927. soc->rx_route_err_in_window = 1;
  1928. }
  1929. } else {
  1930. soc->rx_route_err_in_window++;
  1931. }
  1932. ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
  1933. return ret;
  1934. }
  1935. #else /* HANDLE_RX_REROUTE_ERR */
  1936. static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
  1937. {
  1938. qdf_assert_always(0);
  1939. return DP_INVALID_LMAC_ID;
  1940. }
  1941. #endif /* HANDLE_RX_REROUTE_ERR */
  1942. #ifdef WLAN_MLO_MULTI_CHIP
  1943. static void dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm)
  1944. {
  1945. /*
  1946. * For WIN usecase we should only get fragment packets in
  1947. * this ring as for MLO case fragmentation is not supported
  1948. * we should not see links from other soc.
  1949. *
  1950. * Adding a assert for link descriptors from local soc
  1951. */
  1952. qdf_assert_always(rbm == soc->idle_link_bm_id);
  1953. }
  1954. #else
  1955. static void dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm)
  1956. {
  1957. }
  1958. #endif
  1959. uint32_t
  1960. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1961. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1962. {
  1963. hal_ring_desc_t ring_desc;
  1964. hal_soc_handle_t hal_soc;
  1965. uint32_t count = 0;
  1966. uint32_t rx_bufs_used = 0;
  1967. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1968. uint8_t mac_id = 0;
  1969. uint8_t buf_type;
  1970. uint8_t err_status;
  1971. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1972. struct hal_buf_info hbi;
  1973. struct dp_pdev *dp_pdev;
  1974. struct dp_srng *dp_rxdma_srng;
  1975. struct rx_desc_pool *rx_desc_pool;
  1976. void *link_desc_va;
  1977. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1978. uint16_t num_msdus;
  1979. struct dp_rx_desc *rx_desc = NULL;
  1980. QDF_STATUS status;
  1981. bool ret;
  1982. uint32_t error_code = 0;
  1983. bool sw_pn_check_needed;
  1984. int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
  1985. int i, rx_bufs_reaped_total;
  1986. /* Debug -- Remove later */
  1987. qdf_assert(soc && hal_ring_hdl);
  1988. hal_soc = soc->hal_soc;
  1989. /* Debug -- Remove later */
  1990. qdf_assert(hal_soc);
  1991. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1992. /* TODO */
  1993. /*
  1994. * Need API to convert from hal_ring pointer to
  1995. * Ring Type / Ring Id combo
  1996. */
  1997. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1998. dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
  1999. hal_ring_hdl);
  2000. goto done;
  2001. }
  2002. while (qdf_likely(quota-- && (ring_desc =
  2003. hal_srng_dst_peek(hal_soc,
  2004. hal_ring_hdl)))) {
  2005. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  2006. err_status = hal_rx_err_status_get(hal_soc, ring_desc);
  2007. buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
  2008. if (err_status == HAL_REO_ERROR_DETECTED)
  2009. error_code = hal_rx_get_reo_error_code(hal_soc,
  2010. ring_desc);
  2011. qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
  2012. sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
  2013. err_status,
  2014. error_code);
  2015. if (!sw_pn_check_needed) {
  2016. /*
  2017. * MPDU desc info will be present in the REO desc
  2018. * only in the below scenarios
  2019. * 1) pn_in_dest_disabled: always
  2020. * 2) pn_in_dest enabled: All cases except 2k-jup
  2021. * and OOR errors
  2022. */
  2023. hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
  2024. &mpdu_desc_info);
  2025. }
  2026. if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
  2027. goto next_entry;
  2028. /*
  2029. * For REO error ring, only MSDU LINK DESC is expected.
  2030. * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
  2031. */
  2032. if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
  2033. int lmac_id;
  2034. lmac_id = dp_rx_err_exception(soc, ring_desc);
  2035. if (lmac_id >= 0)
  2036. rx_bufs_reaped[lmac_id] += 1;
  2037. goto next_entry;
  2038. }
  2039. hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
  2040. &hbi);
  2041. /*
  2042. * check for the magic number in the sw cookie
  2043. */
  2044. qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
  2045. soc->link_desc_id_start);
  2046. dp_idle_link_bm_id_check(soc, hbi.rbm);
  2047. status = dp_rx_link_cookie_check(ring_desc);
  2048. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  2049. DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
  2050. break;
  2051. }
  2052. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2053. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  2054. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  2055. &num_msdus);
  2056. if (!num_msdus ||
  2057. !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
  2058. dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
  2059. num_msdus, msdu_list.sw_cookie[0]);
  2060. dp_rx_link_desc_return(soc, ring_desc,
  2061. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  2062. goto next_entry;
  2063. }
  2064. dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
  2065. msdu_list.sw_cookie[0],
  2066. msdu_list.rbm[0]);
  2067. // TODO - BE- Check if the RBM is to be checked for all chips
  2068. if (qdf_unlikely((msdu_list.rbm[0] !=
  2069. dp_rx_get_rx_bm_id(soc)) &&
  2070. (msdu_list.rbm[0] !=
  2071. soc->idle_link_bm_id) &&
  2072. (msdu_list.rbm[0] !=
  2073. dp_rx_get_defrag_bm_id(soc)))) {
  2074. /* TODO */
  2075. /* Call appropriate handler */
  2076. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  2077. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  2078. dp_rx_err_err("%pK: Invalid RBM %d",
  2079. soc, msdu_list.rbm[0]);
  2080. }
  2081. /* Return link descriptor through WBM ring (SW2WBM)*/
  2082. dp_rx_link_desc_return(soc, ring_desc,
  2083. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  2084. goto next_entry;
  2085. }
  2086. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  2087. soc,
  2088. msdu_list.sw_cookie[0]);
  2089. qdf_assert_always(rx_desc);
  2090. mac_id = rx_desc->pool_id;
  2091. if (sw_pn_check_needed) {
  2092. goto process_reo_error_code;
  2093. }
  2094. if (mpdu_desc_info.bar_frame) {
  2095. qdf_assert_always(mpdu_desc_info.msdu_count == 1);
  2096. dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
  2097. &mpdu_desc_info, err_status,
  2098. error_code);
  2099. rx_bufs_reaped[mac_id] += 1;
  2100. goto next_entry;
  2101. }
  2102. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  2103. /*
  2104. * We only handle one msdu per link desc for fragmented
  2105. * case. We drop the msdus and release the link desc
  2106. * back if there are more than one msdu in link desc.
  2107. */
  2108. if (qdf_unlikely(num_msdus > 1)) {
  2109. count = dp_rx_msdus_drop(soc, ring_desc,
  2110. &mpdu_desc_info,
  2111. &mac_id, quota);
  2112. rx_bufs_reaped[mac_id] += count;
  2113. goto next_entry;
  2114. }
  2115. /*
  2116. * this is a unlikely scenario where the host is reaping
  2117. * a descriptor which it already reaped just a while ago
  2118. * but is yet to replenish it back to HW.
  2119. * In this case host will dump the last 128 descriptors
  2120. * including the software descriptor rx_desc and assert.
  2121. */
  2122. if (qdf_unlikely(!rx_desc->in_use)) {
  2123. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  2124. dp_info_rl("Reaping rx_desc not in use!");
  2125. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  2126. ring_desc, rx_desc);
  2127. /* ignore duplicate RX desc and continue */
  2128. /* Pop out the descriptor */
  2129. goto next_entry;
  2130. }
  2131. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  2132. msdu_list.paddr[0]);
  2133. if (!ret) {
  2134. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  2135. rx_desc->in_err_state = 1;
  2136. goto next_entry;
  2137. }
  2138. count = dp_rx_frag_handle(soc,
  2139. ring_desc, &mpdu_desc_info,
  2140. rx_desc, &mac_id, quota);
  2141. rx_bufs_reaped[mac_id] += count;
  2142. DP_STATS_INC(soc, rx.rx_frags, 1);
  2143. goto next_entry;
  2144. }
  2145. process_reo_error_code:
  2146. /*
  2147. * Expect REO errors to be handled after this point
  2148. */
  2149. qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
  2150. dp_info_rl("Got pkt with REO ERROR: %d", error_code);
  2151. switch (error_code) {
  2152. case HAL_REO_ERR_PN_CHECK_FAILED:
  2153. case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
  2154. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  2155. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2156. if (dp_pdev)
  2157. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  2158. count = dp_rx_pn_error_handle(soc,
  2159. ring_desc,
  2160. &mpdu_desc_info, &mac_id,
  2161. quota);
  2162. rx_bufs_reaped[mac_id] += count;
  2163. break;
  2164. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  2165. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  2166. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  2167. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  2168. case HAL_REO_ERR_BAR_FRAME_OOR:
  2169. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  2170. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  2171. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2172. if (dp_pdev)
  2173. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  2174. count = dp_rx_reo_err_entry_process(
  2175. soc,
  2176. ring_desc,
  2177. &mpdu_desc_info,
  2178. link_desc_va,
  2179. error_code);
  2180. rx_bufs_reaped[mac_id] += count;
  2181. break;
  2182. case HAL_REO_ERR_QUEUE_DESC_INVALID:
  2183. case HAL_REO_ERR_AMPDU_IN_NON_BA:
  2184. case HAL_REO_ERR_NON_BA_DUPLICATE:
  2185. case HAL_REO_ERR_BA_DUPLICATE:
  2186. case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
  2187. case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
  2188. case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
  2189. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  2190. count = dp_rx_msdus_drop(soc, ring_desc,
  2191. &mpdu_desc_info,
  2192. &mac_id, quota);
  2193. rx_bufs_reaped[mac_id] += count;
  2194. break;
  2195. default:
  2196. /* Assert if unexpected error type */
  2197. qdf_assert_always(0);
  2198. }
  2199. next_entry:
  2200. dp_rx_link_cookie_invalidate(ring_desc);
  2201. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  2202. rx_bufs_reaped_total = 0;
  2203. for (i = 0; i < MAX_PDEV_CNT; i++)
  2204. rx_bufs_reaped_total += rx_bufs_reaped[i];
  2205. if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
  2206. max_reap_limit))
  2207. break;
  2208. }
  2209. done:
  2210. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  2211. if (soc->rx.flags.defrag_timeout_check) {
  2212. uint32_t now_ms =
  2213. qdf_system_ticks_to_msecs(qdf_system_ticks());
  2214. if (now_ms >= soc->rx.defrag.next_flush_ms)
  2215. dp_rx_defrag_waitlist_flush(soc);
  2216. }
  2217. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  2218. if (rx_bufs_reaped[mac_id]) {
  2219. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2220. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2221. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2222. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2223. rx_desc_pool,
  2224. rx_bufs_reaped[mac_id],
  2225. &dp_pdev->free_list_head,
  2226. &dp_pdev->free_list_tail,
  2227. false);
  2228. rx_bufs_used += rx_bufs_reaped[mac_id];
  2229. }
  2230. }
  2231. return rx_bufs_used; /* Assume no scale factor for now */
  2232. }
  2233. #ifdef DROP_RXDMA_DECRYPT_ERR
  2234. /**
  2235. * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
  2236. *
  2237. * Return: true if rxdma decrypt err frames are handled and false otherwise
  2238. */
  2239. static inline bool dp_handle_rxdma_decrypt_err(void)
  2240. {
  2241. return false;
  2242. }
  2243. #else
  2244. static inline bool dp_handle_rxdma_decrypt_err(void)
  2245. {
  2246. return true;
  2247. }
  2248. #endif
  2249. /*
  2250. * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
  2251. *
  2252. * This is a war for HW issue where length is only valid in last msdu
  2253. *@soc: DP SOC handle
  2254. */
  2255. static inline void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
  2256. {
  2257. if (soc->wbm_sg_last_msdu_war) {
  2258. uint32_t len;
  2259. qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
  2260. len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
  2261. qdf_nbuf_data(temp));
  2262. temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
  2263. while (temp) {
  2264. QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
  2265. temp = temp->next;
  2266. }
  2267. }
  2268. }
  2269. #ifdef RX_DESC_DEBUG_CHECK
  2270. /**
  2271. * dp_rx_wbm_desc_nbuf_sanity_check - Add sanity check to for WBM rx_desc paddr
  2272. * corruption
  2273. * @soc: core txrx main context
  2274. * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
  2275. * @ring_desc: REO ring descriptor
  2276. * @rx_desc: Rx descriptor
  2277. *
  2278. * Return: NONE
  2279. */
  2280. static
  2281. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  2282. hal_ring_handle_t hal_ring_hdl,
  2283. hal_ring_desc_t ring_desc,
  2284. struct dp_rx_desc *rx_desc)
  2285. {
  2286. struct hal_buf_info hbi;
  2287. hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2288. /* Sanity check for possible buffer paddr corruption */
  2289. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  2290. return QDF_STATUS_SUCCESS;
  2291. hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
  2292. return QDF_STATUS_E_FAILURE;
  2293. }
  2294. #else
  2295. static
  2296. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  2297. hal_ring_handle_t hal_ring_hdl,
  2298. hal_ring_desc_t ring_desc,
  2299. struct dp_rx_desc *rx_desc)
  2300. {
  2301. return QDF_STATUS_SUCCESS;
  2302. }
  2303. #endif
  2304. static inline bool
  2305. dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
  2306. {
  2307. /*
  2308. * Currently Null Queue and Unencrypted error handlers has support for
  2309. * SG. Other error handler do not deal with SG buffer.
  2310. */
  2311. if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
  2312. (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
  2313. ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
  2314. (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
  2315. return true;
  2316. return false;
  2317. }
  2318. uint32_t
  2319. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2320. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  2321. {
  2322. hal_ring_desc_t ring_desc;
  2323. hal_soc_handle_t hal_soc;
  2324. struct dp_rx_desc *rx_desc;
  2325. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  2326. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  2327. uint32_t rx_bufs_used = 0;
  2328. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  2329. uint8_t buf_type;
  2330. uint8_t mac_id;
  2331. struct dp_pdev *dp_pdev;
  2332. struct dp_srng *dp_rxdma_srng;
  2333. struct rx_desc_pool *rx_desc_pool;
  2334. uint8_t *rx_tlv_hdr;
  2335. bool is_tkip_mic_err;
  2336. qdf_nbuf_t nbuf_head = NULL;
  2337. qdf_nbuf_t nbuf_tail = NULL;
  2338. qdf_nbuf_t nbuf, next;
  2339. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  2340. uint8_t pool_id;
  2341. uint8_t tid = 0;
  2342. uint8_t msdu_continuation = 0;
  2343. bool process_sg_buf = false;
  2344. uint32_t wbm_err_src;
  2345. QDF_STATUS status;
  2346. struct hal_rx_mpdu_desc_info mpdu_desc_info = { 0 };
  2347. /* Debug -- Remove later */
  2348. qdf_assert(soc && hal_ring_hdl);
  2349. hal_soc = soc->hal_soc;
  2350. /* Debug -- Remove later */
  2351. qdf_assert(hal_soc);
  2352. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  2353. /* TODO */
  2354. /*
  2355. * Need API to convert from hal_ring pointer to
  2356. * Ring Type / Ring Id combo
  2357. */
  2358. dp_rx_err_err("%pK: HAL RING Access Failed -- %pK",
  2359. soc, hal_ring_hdl);
  2360. goto done;
  2361. }
  2362. while (qdf_likely(quota)) {
  2363. ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  2364. if (qdf_unlikely(!ring_desc))
  2365. break;
  2366. /* XXX */
  2367. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  2368. /*
  2369. * For WBM ring, expect only MSDU buffers
  2370. */
  2371. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  2372. wbm_err_src = hal_rx_wbm_err_src_get(hal_soc, ring_desc);
  2373. qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  2374. (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
  2375. if (soc->arch_ops.dp_wbm_get_rx_desc_from_hal_desc(soc,
  2376. ring_desc,
  2377. &rx_desc)) {
  2378. dp_rx_err_err("get rx desc from hal_desc failed");
  2379. continue;
  2380. }
  2381. qdf_assert_always(rx_desc);
  2382. if (!dp_rx_desc_check_magic(rx_desc)) {
  2383. dp_rx_err_err("%pk: Invalid rx_desc %pk",
  2384. soc, rx_desc);
  2385. continue;
  2386. }
  2387. /*
  2388. * this is a unlikely scenario where the host is reaping
  2389. * a descriptor which it already reaped just a while ago
  2390. * but is yet to replenish it back to HW.
  2391. * In this case host will dump the last 128 descriptors
  2392. * including the software descriptor rx_desc and assert.
  2393. */
  2394. if (qdf_unlikely(!rx_desc->in_use)) {
  2395. DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
  2396. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  2397. ring_desc, rx_desc);
  2398. continue;
  2399. }
  2400. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
  2401. nbuf = rx_desc->nbuf;
  2402. status = dp_rx_wbm_desc_nbuf_sanity_check(soc, hal_ring_hdl,
  2403. ring_desc, rx_desc);
  2404. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  2405. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  2406. dp_info_rl("Rx error Nbuf %pk sanity check failure!",
  2407. nbuf);
  2408. rx_desc->in_err_state = 1;
  2409. rx_desc->unmapped = 1;
  2410. rx_bufs_reaped[rx_desc->pool_id]++;
  2411. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  2412. &tail[rx_desc->pool_id],
  2413. rx_desc);
  2414. continue;
  2415. }
  2416. /* Get MPDU DESC info */
  2417. hal_rx_mpdu_desc_info_get(hal_soc, ring_desc, &mpdu_desc_info);
  2418. if (qdf_likely(mpdu_desc_info.mpdu_flags &
  2419. HAL_MPDU_F_QOS_CONTROL_VALID))
  2420. qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
  2421. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  2422. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2423. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  2424. rx_desc->unmapped = 1;
  2425. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2426. if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
  2427. dp_rx_is_sg_formation_required(&wbm_err_info))) {
  2428. /* SG is detected from continuation bit */
  2429. msdu_continuation =
  2430. hal_rx_wbm_err_msdu_continuation_get(hal_soc,
  2431. ring_desc);
  2432. if (msdu_continuation &&
  2433. !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
  2434. /* Update length from first buffer in SG */
  2435. soc->wbm_sg_param.wbm_sg_desc_msdu_len =
  2436. hal_rx_msdu_start_msdu_len_get(
  2437. soc->hal_soc,
  2438. qdf_nbuf_data(nbuf));
  2439. soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
  2440. }
  2441. if (msdu_continuation) {
  2442. /* MSDU continued packets */
  2443. qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
  2444. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2445. soc->wbm_sg_param.wbm_sg_desc_msdu_len;
  2446. } else {
  2447. /* This is the terminal packet in SG */
  2448. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  2449. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  2450. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2451. soc->wbm_sg_param.wbm_sg_desc_msdu_len;
  2452. process_sg_buf = true;
  2453. }
  2454. }
  2455. /*
  2456. * save the wbm desc info in nbuf TLV. We will need this
  2457. * info when we do the actual nbuf processing
  2458. */
  2459. wbm_err_info.pool_id = rx_desc->pool_id;
  2460. hal_rx_priv_info_set_in_tlv(soc->hal_soc,
  2461. qdf_nbuf_data(nbuf),
  2462. (uint8_t *)&wbm_err_info,
  2463. sizeof(wbm_err_info));
  2464. rx_bufs_reaped[rx_desc->pool_id]++;
  2465. if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
  2466. DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
  2467. soc->wbm_sg_param.wbm_sg_nbuf_tail,
  2468. nbuf);
  2469. if (process_sg_buf) {
  2470. if (!dp_rx_buffer_pool_refill(
  2471. soc,
  2472. soc->wbm_sg_param.wbm_sg_nbuf_head,
  2473. rx_desc->pool_id))
  2474. DP_RX_MERGE_TWO_LIST(
  2475. nbuf_head, nbuf_tail,
  2476. soc->wbm_sg_param.wbm_sg_nbuf_head,
  2477. soc->wbm_sg_param.wbm_sg_nbuf_tail);
  2478. dp_rx_wbm_sg_list_last_msdu_war(soc);
  2479. dp_rx_wbm_sg_list_reset(soc);
  2480. process_sg_buf = false;
  2481. }
  2482. } else if (!dp_rx_buffer_pool_refill(soc, nbuf,
  2483. rx_desc->pool_id)) {
  2484. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
  2485. }
  2486. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  2487. &tail[rx_desc->pool_id],
  2488. rx_desc);
  2489. /*
  2490. * if continuation bit is set then we have MSDU spread
  2491. * across multiple buffers, let us not decrement quota
  2492. * till we reap all buffers of that MSDU.
  2493. */
  2494. if (qdf_likely(!msdu_continuation))
  2495. quota -= 1;
  2496. }
  2497. done:
  2498. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  2499. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  2500. if (rx_bufs_reaped[mac_id]) {
  2501. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2502. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2503. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2504. rx_desc_pool, rx_bufs_reaped[mac_id],
  2505. &head[mac_id], &tail[mac_id], false);
  2506. rx_bufs_used += rx_bufs_reaped[mac_id];
  2507. }
  2508. }
  2509. nbuf = nbuf_head;
  2510. while (nbuf) {
  2511. struct dp_txrx_peer *txrx_peer;
  2512. struct dp_peer *peer;
  2513. uint16_t peer_id;
  2514. uint8_t err_code;
  2515. uint8_t *tlv_hdr;
  2516. uint32_t peer_meta_data;
  2517. dp_txrx_ref_handle txrx_ref_handle = NULL;
  2518. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2519. /*
  2520. * retrieve the wbm desc info from nbuf TLV, so we can
  2521. * handle error cases appropriately
  2522. */
  2523. hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
  2524. (uint8_t *)&wbm_err_info,
  2525. sizeof(wbm_err_info));
  2526. peer_meta_data = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
  2527. rx_tlv_hdr);
  2528. peer_id = dp_rx_peer_metadata_peer_id_get(soc, peer_meta_data);
  2529. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  2530. &txrx_ref_handle,
  2531. DP_MOD_ID_RX_ERR);
  2532. if (!txrx_peer)
  2533. dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
  2534. peer_id, wbm_err_info.wbm_err_src,
  2535. wbm_err_info.reo_psh_rsn);
  2536. /* Set queue_mapping in nbuf to 0 */
  2537. dp_set_rx_queue(nbuf, 0);
  2538. next = nbuf->next;
  2539. /*
  2540. * Form the SG for msdu continued buffers
  2541. * QCN9000 has this support
  2542. */
  2543. if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  2544. nbuf = dp_rx_sg_create(soc, nbuf);
  2545. next = nbuf->next;
  2546. /*
  2547. * SG error handling is not done correctly,
  2548. * drop SG frames for now.
  2549. */
  2550. dp_rx_nbuf_free(nbuf);
  2551. dp_info_rl("scattered msdu dropped");
  2552. nbuf = next;
  2553. if (txrx_peer)
  2554. dp_txrx_peer_unref_delete(txrx_ref_handle,
  2555. DP_MOD_ID_RX_ERR);
  2556. continue;
  2557. }
  2558. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  2559. if (wbm_err_info.reo_psh_rsn
  2560. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  2561. DP_STATS_INC(soc,
  2562. rx.err.reo_error
  2563. [wbm_err_info.reo_err_code], 1);
  2564. /* increment @pdev level */
  2565. pool_id = wbm_err_info.pool_id;
  2566. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  2567. if (dp_pdev)
  2568. DP_STATS_INC(dp_pdev, err.reo_error,
  2569. 1);
  2570. switch (wbm_err_info.reo_err_code) {
  2571. /*
  2572. * Handling for packets which have NULL REO
  2573. * queue descriptor
  2574. */
  2575. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  2576. pool_id = wbm_err_info.pool_id;
  2577. dp_rx_null_q_desc_handle(soc, nbuf,
  2578. rx_tlv_hdr,
  2579. pool_id,
  2580. txrx_peer);
  2581. break;
  2582. /* TODO */
  2583. /* Add per error code accounting */
  2584. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  2585. if (txrx_peer)
  2586. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2587. rx.err.jump_2k_err,
  2588. 1);
  2589. pool_id = wbm_err_info.pool_id;
  2590. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  2591. rx_tlv_hdr)) {
  2592. tid =
  2593. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  2594. }
  2595. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2596. hal_rx_msdu_start_msdu_len_get(
  2597. soc->hal_soc, rx_tlv_hdr);
  2598. nbuf->next = NULL;
  2599. dp_2k_jump_handle(soc, nbuf,
  2600. rx_tlv_hdr,
  2601. peer_id, tid);
  2602. break;
  2603. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  2604. if (txrx_peer)
  2605. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2606. rx.err.oor_err,
  2607. 1);
  2608. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  2609. rx_tlv_hdr)) {
  2610. tid =
  2611. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  2612. }
  2613. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2614. hal_rx_msdu_start_msdu_len_get(
  2615. soc->hal_soc, rx_tlv_hdr);
  2616. nbuf->next = NULL;
  2617. dp_rx_oor_handle(soc, nbuf,
  2618. peer_id,
  2619. rx_tlv_hdr);
  2620. break;
  2621. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  2622. case HAL_REO_ERR_BAR_FRAME_OOR:
  2623. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  2624. if (peer) {
  2625. dp_rx_err_handle_bar(soc, peer,
  2626. nbuf);
  2627. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  2628. }
  2629. dp_rx_nbuf_free(nbuf);
  2630. break;
  2631. case HAL_REO_ERR_PN_CHECK_FAILED:
  2632. case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
  2633. if (txrx_peer)
  2634. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2635. rx.err.pn_err,
  2636. 1);
  2637. dp_rx_nbuf_free(nbuf);
  2638. break;
  2639. default:
  2640. dp_info_rl("Got pkt with REO ERROR: %d",
  2641. wbm_err_info.reo_err_code);
  2642. dp_rx_nbuf_free(nbuf);
  2643. }
  2644. } else if (wbm_err_info.reo_psh_rsn
  2645. == HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
  2646. dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
  2647. rx_tlv_hdr,
  2648. HAL_RX_WBM_ERR_SRC_REO);
  2649. } else {
  2650. /* should not enter here */
  2651. dp_rx_err_alert("invalid reo push reason %u",
  2652. wbm_err_info.reo_psh_rsn);
  2653. dp_rx_nbuf_free(nbuf);
  2654. qdf_assert_always(0);
  2655. }
  2656. } else if (wbm_err_info.wbm_err_src ==
  2657. HAL_RX_WBM_ERR_SRC_RXDMA) {
  2658. if (wbm_err_info.rxdma_psh_rsn
  2659. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2660. DP_STATS_INC(soc,
  2661. rx.err.rxdma_error
  2662. [wbm_err_info.rxdma_err_code], 1);
  2663. /* increment @pdev level */
  2664. pool_id = wbm_err_info.pool_id;
  2665. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  2666. if (dp_pdev)
  2667. DP_STATS_INC(dp_pdev,
  2668. err.rxdma_error, 1);
  2669. switch (wbm_err_info.rxdma_err_code) {
  2670. case HAL_RXDMA_ERR_UNENCRYPTED:
  2671. case HAL_RXDMA_ERR_WIFI_PARSE:
  2672. if (txrx_peer)
  2673. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2674. rx.err.rxdma_wifi_parse_err,
  2675. 1);
  2676. pool_id = wbm_err_info.pool_id;
  2677. dp_rx_process_rxdma_err(soc, nbuf,
  2678. rx_tlv_hdr,
  2679. txrx_peer,
  2680. wbm_err_info.
  2681. rxdma_err_code,
  2682. pool_id);
  2683. break;
  2684. case HAL_RXDMA_ERR_TKIP_MIC:
  2685. dp_rx_process_mic_error(soc, nbuf,
  2686. rx_tlv_hdr,
  2687. txrx_peer);
  2688. if (txrx_peer)
  2689. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2690. rx.err.mic_err,
  2691. 1);
  2692. break;
  2693. case HAL_RXDMA_ERR_DECRYPT:
  2694. /* All the TKIP-MIC failures are treated as Decrypt Errors
  2695. * for QCN9224 Targets
  2696. */
  2697. is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
  2698. if (is_tkip_mic_err && txrx_peer) {
  2699. dp_rx_process_mic_error(soc, nbuf,
  2700. rx_tlv_hdr,
  2701. txrx_peer);
  2702. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2703. rx.err.mic_err,
  2704. 1);
  2705. break;
  2706. }
  2707. if (txrx_peer) {
  2708. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2709. rx.err.decrypt_err,
  2710. 1);
  2711. dp_rx_nbuf_free(nbuf);
  2712. break;
  2713. }
  2714. if (!dp_handle_rxdma_decrypt_err()) {
  2715. dp_rx_nbuf_free(nbuf);
  2716. break;
  2717. }
  2718. pool_id = wbm_err_info.pool_id;
  2719. err_code = wbm_err_info.rxdma_err_code;
  2720. tlv_hdr = rx_tlv_hdr;
  2721. dp_rx_process_rxdma_err(soc, nbuf,
  2722. tlv_hdr, NULL,
  2723. err_code,
  2724. pool_id);
  2725. break;
  2726. case HAL_RXDMA_MULTICAST_ECHO:
  2727. if (txrx_peer)
  2728. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  2729. rx.mec_drop, 1,
  2730. qdf_nbuf_len(nbuf));
  2731. dp_rx_nbuf_free(nbuf);
  2732. break;
  2733. case HAL_RXDMA_UNAUTHORIZED_WDS:
  2734. pool_id = wbm_err_info.pool_id;
  2735. err_code = wbm_err_info.rxdma_err_code;
  2736. tlv_hdr = rx_tlv_hdr;
  2737. dp_rx_process_rxdma_err(soc, nbuf,
  2738. tlv_hdr,
  2739. txrx_peer,
  2740. err_code,
  2741. pool_id);
  2742. break;
  2743. default:
  2744. dp_rx_nbuf_free(nbuf);
  2745. dp_err_rl("RXDMA error %d",
  2746. wbm_err_info.rxdma_err_code);
  2747. }
  2748. } else if (wbm_err_info.rxdma_psh_rsn
  2749. == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
  2750. dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
  2751. rx_tlv_hdr,
  2752. HAL_RX_WBM_ERR_SRC_RXDMA);
  2753. } else if (wbm_err_info.rxdma_psh_rsn
  2754. == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
  2755. dp_rx_err_err("rxdma push reason %u",
  2756. wbm_err_info.rxdma_psh_rsn);
  2757. DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
  2758. dp_rx_nbuf_free(nbuf);
  2759. } else {
  2760. /* should not enter here */
  2761. dp_rx_err_alert("invalid rxdma push reason %u",
  2762. wbm_err_info.rxdma_psh_rsn);
  2763. dp_rx_nbuf_free(nbuf);
  2764. qdf_assert_always(0);
  2765. }
  2766. } else {
  2767. /* Should not come here */
  2768. qdf_assert(0);
  2769. }
  2770. if (txrx_peer)
  2771. dp_txrx_peer_unref_delete(txrx_ref_handle,
  2772. DP_MOD_ID_RX_ERR);
  2773. nbuf = next;
  2774. }
  2775. return rx_bufs_used; /* Assume no scale factor for now */
  2776. }
  2777. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2778. /**
  2779. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  2780. *
  2781. * @soc: core DP main context
  2782. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2783. * @rx_desc: void pointer to rx descriptor
  2784. *
  2785. * Return: void
  2786. */
  2787. static void dup_desc_dbg(struct dp_soc *soc,
  2788. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2789. void *rx_desc)
  2790. {
  2791. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  2792. dp_rx_dump_info_and_assert(
  2793. soc,
  2794. soc->rx_rel_ring.hal_srng,
  2795. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  2796. rx_desc);
  2797. }
  2798. /**
  2799. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  2800. *
  2801. * @soc: core DP main context
  2802. * @mac_id: mac id which is one of 3 mac_ids
  2803. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2804. * @head: head of descs list to be freed
  2805. * @tail: tail of decs list to be freed
  2806. * Return: number of msdu in MPDU to be popped
  2807. */
  2808. static inline uint32_t
  2809. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2810. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2811. union dp_rx_desc_list_elem_t **head,
  2812. union dp_rx_desc_list_elem_t **tail)
  2813. {
  2814. void *rx_msdu_link_desc;
  2815. qdf_nbuf_t msdu;
  2816. qdf_nbuf_t last;
  2817. struct hal_rx_msdu_list msdu_list;
  2818. uint16_t num_msdus;
  2819. struct hal_buf_info buf_info;
  2820. uint32_t rx_bufs_used = 0;
  2821. uint32_t msdu_cnt;
  2822. uint32_t i;
  2823. uint8_t push_reason;
  2824. uint8_t rxdma_error_code = 0;
  2825. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  2826. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2827. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2828. hal_rxdma_desc_t ring_desc;
  2829. struct rx_desc_pool *rx_desc_pool;
  2830. if (!pdev) {
  2831. dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
  2832. soc, mac_id);
  2833. return rx_bufs_used;
  2834. }
  2835. msdu = 0;
  2836. last = NULL;
  2837. hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
  2838. &buf_info, &msdu_cnt);
  2839. push_reason =
  2840. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  2841. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2842. rxdma_error_code =
  2843. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  2844. }
  2845. do {
  2846. rx_msdu_link_desc =
  2847. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2848. qdf_assert_always(rx_msdu_link_desc);
  2849. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2850. &msdu_list, &num_msdus);
  2851. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2852. /* if the msdus belongs to NSS offloaded radio &&
  2853. * the rbm is not SW1_BM then return the msdu_link
  2854. * descriptor without freeing the msdus (nbufs). let
  2855. * these buffers be given to NSS completion ring for
  2856. * NSS to free them.
  2857. * else iterate through the msdu link desc list and
  2858. * free each msdu in the list.
  2859. */
  2860. if (msdu_list.rbm[0] !=
  2861. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
  2862. wlan_cfg_get_dp_pdev_nss_enabled(
  2863. pdev->wlan_cfg_ctx))
  2864. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  2865. else {
  2866. for (i = 0; i < num_msdus; i++) {
  2867. struct dp_rx_desc *rx_desc =
  2868. soc->arch_ops.
  2869. dp_rx_desc_cookie_2_va(
  2870. soc,
  2871. msdu_list.sw_cookie[i]);
  2872. qdf_assert_always(rx_desc);
  2873. msdu = rx_desc->nbuf;
  2874. /*
  2875. * this is a unlikely scenario
  2876. * where the host is reaping
  2877. * a descriptor which
  2878. * it already reaped just a while ago
  2879. * but is yet to replenish
  2880. * it back to HW.
  2881. * In this case host will dump
  2882. * the last 128 descriptors
  2883. * including the software descriptor
  2884. * rx_desc and assert.
  2885. */
  2886. ring_desc = rxdma_dst_ring_desc;
  2887. if (qdf_unlikely(!rx_desc->in_use)) {
  2888. dup_desc_dbg(soc,
  2889. ring_desc,
  2890. rx_desc);
  2891. continue;
  2892. }
  2893. if (rx_desc->unmapped == 0) {
  2894. rx_desc_pool =
  2895. &soc->rx_desc_buf[rx_desc->pool_id];
  2896. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2897. dp_rx_nbuf_unmap_pool(soc,
  2898. rx_desc_pool,
  2899. msdu);
  2900. rx_desc->unmapped = 1;
  2901. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2902. }
  2903. dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
  2904. soc, msdu);
  2905. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2906. rx_desc->pool_id);
  2907. rx_bufs_used++;
  2908. dp_rx_add_to_free_desc_list(head,
  2909. tail, rx_desc);
  2910. }
  2911. }
  2912. } else {
  2913. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  2914. }
  2915. /*
  2916. * Store the current link buffer into to the local structure
  2917. * to be used for release purpose.
  2918. */
  2919. hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
  2920. buf_info.paddr, buf_info.sw_cookie,
  2921. buf_info.rbm);
  2922. hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
  2923. &buf_info);
  2924. dp_rx_link_desc_return_by_addr(soc,
  2925. (hal_buff_addrinfo_t)
  2926. rx_link_buf_info,
  2927. bm_action);
  2928. } while (buf_info.paddr);
  2929. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  2930. if (pdev)
  2931. DP_STATS_INC(pdev, err.rxdma_error, 1);
  2932. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  2933. dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
  2934. }
  2935. return rx_bufs_used;
  2936. }
  2937. uint32_t
  2938. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2939. uint32_t mac_id, uint32_t quota)
  2940. {
  2941. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2942. hal_rxdma_desc_t rxdma_dst_ring_desc;
  2943. hal_soc_handle_t hal_soc;
  2944. void *err_dst_srng;
  2945. union dp_rx_desc_list_elem_t *head = NULL;
  2946. union dp_rx_desc_list_elem_t *tail = NULL;
  2947. struct dp_srng *dp_rxdma_srng;
  2948. struct rx_desc_pool *rx_desc_pool;
  2949. uint32_t work_done = 0;
  2950. uint32_t rx_bufs_used = 0;
  2951. if (!pdev)
  2952. return 0;
  2953. err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
  2954. if (!err_dst_srng) {
  2955. dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  2956. soc, err_dst_srng);
  2957. return 0;
  2958. }
  2959. hal_soc = soc->hal_soc;
  2960. qdf_assert(hal_soc);
  2961. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  2962. dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  2963. soc, err_dst_srng);
  2964. return 0;
  2965. }
  2966. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  2967. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  2968. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  2969. rxdma_dst_ring_desc,
  2970. &head, &tail);
  2971. }
  2972. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  2973. if (rx_bufs_used) {
  2974. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2975. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2976. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2977. } else {
  2978. dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
  2979. rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
  2980. }
  2981. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2982. rx_desc_pool, rx_bufs_used, &head, &tail, false);
  2983. work_done += rx_bufs_used;
  2984. }
  2985. return work_done;
  2986. }
  2987. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2988. static inline void
  2989. dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2990. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2991. union dp_rx_desc_list_elem_t **head,
  2992. union dp_rx_desc_list_elem_t **tail,
  2993. uint32_t *rx_bufs_used)
  2994. {
  2995. void *rx_msdu_link_desc;
  2996. qdf_nbuf_t msdu;
  2997. qdf_nbuf_t last;
  2998. struct hal_rx_msdu_list msdu_list;
  2999. uint16_t num_msdus;
  3000. struct hal_buf_info buf_info;
  3001. uint32_t msdu_cnt, i;
  3002. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  3003. struct rx_desc_pool *rx_desc_pool;
  3004. struct dp_rx_desc *rx_desc;
  3005. msdu = 0;
  3006. last = NULL;
  3007. hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
  3008. &buf_info, &msdu_cnt);
  3009. do {
  3010. rx_msdu_link_desc =
  3011. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  3012. if (!rx_msdu_link_desc) {
  3013. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
  3014. break;
  3015. }
  3016. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  3017. &msdu_list, &num_msdus);
  3018. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  3019. for (i = 0; i < num_msdus; i++) {
  3020. if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
  3021. dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
  3022. msdu_list.sw_cookie[i]);
  3023. continue;
  3024. }
  3025. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  3026. soc,
  3027. msdu_list.sw_cookie[i]);
  3028. qdf_assert_always(rx_desc);
  3029. rx_desc_pool =
  3030. &soc->rx_desc_buf[rx_desc->pool_id];
  3031. msdu = rx_desc->nbuf;
  3032. /*
  3033. * this is a unlikely scenario where the host is reaping
  3034. * a descriptor which it already reaped just a while ago
  3035. * but is yet to replenish it back to HW.
  3036. */
  3037. if (qdf_unlikely(!rx_desc->in_use) ||
  3038. qdf_unlikely(!msdu)) {
  3039. dp_rx_err_info_rl("Reaping rx_desc not in use!");
  3040. continue;
  3041. }
  3042. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  3043. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
  3044. rx_desc->unmapped = 1;
  3045. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  3046. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  3047. rx_desc->pool_id);
  3048. rx_bufs_used[rx_desc->pool_id]++;
  3049. dp_rx_add_to_free_desc_list(head,
  3050. tail, rx_desc);
  3051. }
  3052. }
  3053. /*
  3054. * Store the current link buffer into to the local structure
  3055. * to be used for release purpose.
  3056. */
  3057. hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
  3058. buf_info.paddr, buf_info.sw_cookie,
  3059. buf_info.rbm);
  3060. hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
  3061. &buf_info);
  3062. dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
  3063. rx_link_buf_info,
  3064. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  3065. } while (buf_info.paddr);
  3066. }
  3067. /*
  3068. *
  3069. * dp_handle_wbm_internal_error() - handles wbm_internal_error case
  3070. *
  3071. * @soc: core DP main context
  3072. * @hal_desc: hal descriptor
  3073. * @buf_type: indicates if the buffer is of type link disc or msdu
  3074. * Return: None
  3075. *
  3076. * wbm_internal_error is seen in following scenarios :
  3077. *
  3078. * 1. Null pointers detected in WBM_RELEASE_RING descriptors
  3079. * 2. Null pointers detected during delinking process
  3080. *
  3081. * Some null pointer cases:
  3082. *
  3083. * a. MSDU buffer pointer is NULL
  3084. * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
  3085. * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
  3086. */
  3087. void
  3088. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  3089. uint32_t buf_type)
  3090. {
  3091. struct hal_buf_info buf_info = {0};
  3092. struct dp_rx_desc *rx_desc = NULL;
  3093. struct rx_desc_pool *rx_desc_pool;
  3094. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
  3095. union dp_rx_desc_list_elem_t *head = NULL;
  3096. union dp_rx_desc_list_elem_t *tail = NULL;
  3097. uint8_t pool_id;
  3098. uint8_t mac_id;
  3099. hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
  3100. if (!buf_info.paddr) {
  3101. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
  3102. return;
  3103. }
  3104. /* buffer_addr_info is the first element of ring_desc */
  3105. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
  3106. &buf_info);
  3107. pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
  3108. if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
  3109. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
  3110. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  3111. soc,
  3112. buf_info.sw_cookie);
  3113. if (rx_desc && rx_desc->nbuf) {
  3114. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  3115. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  3116. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
  3117. rx_desc->nbuf);
  3118. rx_desc->unmapped = 1;
  3119. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  3120. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  3121. rx_desc->pool_id);
  3122. dp_rx_add_to_free_desc_list(&head,
  3123. &tail,
  3124. rx_desc);
  3125. rx_bufs_reaped[rx_desc->pool_id]++;
  3126. }
  3127. } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
  3128. dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
  3129. &head, &tail, rx_bufs_reaped);
  3130. }
  3131. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  3132. struct rx_desc_pool *rx_desc_pool;
  3133. struct dp_srng *dp_rxdma_srng;
  3134. if (!rx_bufs_reaped[mac_id])
  3135. continue;
  3136. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
  3137. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  3138. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  3139. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  3140. rx_desc_pool,
  3141. rx_bufs_reaped[mac_id],
  3142. &head, &tail, false);
  3143. }
  3144. }
  3145. #endif /* QCA_HOST_MODE_WIFI_DISABLED */