dp_rx_err.c 87 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "dp_internal.h"
  25. #include "hal_api.h"
  26. #include "qdf_trace.h"
  27. #include "qdf_nbuf.h"
  28. #include "dp_rx_defrag.h"
  29. #include "dp_ipa.h"
  30. #ifdef WIFI_MONITOR_SUPPORT
  31. #include "dp_htt.h"
  32. #include <dp_mon.h>
  33. #endif
  34. #ifdef FEATURE_WDS
  35. #include "dp_txrx_wds.h"
  36. #endif
  37. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  38. #include "qdf_net_types.h"
  39. #include "dp_rx_buffer_pool.h"
  40. #define dp_rx_err_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_ERROR, params)
  41. #define dp_rx_err_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_ERROR, params)
  42. #define dp_rx_err_info(params...) \
  43. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
  44. #define dp_rx_err_info_rl(params...) \
  45. __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_ERROR, ## params)
  46. #define dp_rx_err_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_ERROR, params)
  47. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  48. /* Max regular Rx packet routing error */
  49. #define DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD 20
  50. #define DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT 10
  51. #define DP_RX_ERR_ROUTE_TIMEOUT_US (5 * 1000 * 1000) /* micro seconds */
  52. #ifdef FEATURE_MEC
  53. bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  54. struct dp_txrx_peer *txrx_peer,
  55. uint8_t *rx_tlv_hdr,
  56. qdf_nbuf_t nbuf)
  57. {
  58. struct dp_vdev *vdev = txrx_peer->vdev;
  59. struct dp_pdev *pdev = vdev->pdev;
  60. struct dp_mec_entry *mecentry = NULL;
  61. struct dp_ast_entry *ase = NULL;
  62. uint16_t sa_idx = 0;
  63. uint8_t *data;
  64. /*
  65. * Multicast Echo Check is required only if vdev is STA and
  66. * received pkt is a multicast/broadcast pkt. otherwise
  67. * skip the MEC check.
  68. */
  69. if (vdev->opmode != wlan_op_mode_sta)
  70. return false;
  71. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  72. return false;
  73. data = qdf_nbuf_data(nbuf);
  74. /*
  75. * if the received pkts src mac addr matches with vdev
  76. * mac address then drop the pkt as it is looped back
  77. */
  78. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  79. vdev->mac_addr.raw,
  80. QDF_MAC_ADDR_SIZE)))
  81. return true;
  82. /*
  83. * In case of qwrap isolation mode, donot drop loopback packets.
  84. * In isolation mode, all packets from the wired stations need to go
  85. * to rootap and loop back to reach the wireless stations and
  86. * vice-versa.
  87. */
  88. if (qdf_unlikely(vdev->isolation_vdev))
  89. return false;
  90. /*
  91. * if the received pkts src mac addr matches with the
  92. * wired PCs MAC addr which is behind the STA or with
  93. * wireless STAs MAC addr which are behind the Repeater,
  94. * then drop the pkt as it is looped back
  95. */
  96. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  97. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  98. if ((sa_idx < 0) ||
  99. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  100. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  101. "invalid sa_idx: %d", sa_idx);
  102. qdf_assert_always(0);
  103. }
  104. qdf_spin_lock_bh(&soc->ast_lock);
  105. ase = soc->ast_table[sa_idx];
  106. /*
  107. * this check was not needed since MEC is not dependent on AST,
  108. * but if we dont have this check SON has some issues in
  109. * dual backhaul scenario. in APS SON mode, client connected
  110. * to RE 2G and sends multicast packets. the RE sends it to CAP
  111. * over 5G backhaul. the CAP loopback it on 2G to RE.
  112. * On receiving in 2G STA vap, we assume that client has roamed
  113. * and kickout the client.
  114. */
  115. if (ase && (ase->peer_id != txrx_peer->peer_id)) {
  116. qdf_spin_unlock_bh(&soc->ast_lock);
  117. goto drop;
  118. }
  119. qdf_spin_unlock_bh(&soc->ast_lock);
  120. }
  121. qdf_spin_lock_bh(&soc->mec_lock);
  122. mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
  123. &data[QDF_MAC_ADDR_SIZE]);
  124. if (!mecentry) {
  125. qdf_spin_unlock_bh(&soc->mec_lock);
  126. return false;
  127. }
  128. qdf_spin_unlock_bh(&soc->mec_lock);
  129. drop:
  130. dp_rx_err_info("%pK: received pkt with same src mac " QDF_MAC_ADDR_FMT,
  131. soc, QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
  132. return true;
  133. }
  134. #endif
  135. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  136. void dp_rx_link_desc_refill_duplicate_check(
  137. struct dp_soc *soc,
  138. struct hal_buf_info *buf_info,
  139. hal_buff_addrinfo_t ring_buf_info)
  140. {
  141. struct hal_buf_info current_link_desc_buf_info = { 0 };
  142. /* do duplicate link desc address check */
  143. hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
  144. &current_link_desc_buf_info);
  145. /*
  146. * TODO - Check if the hal soc api call can be removed
  147. * since the cookie is just used for print.
  148. * buffer_addr_info is the first element of ring_desc
  149. */
  150. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  151. (uint32_t *)ring_buf_info,
  152. &current_link_desc_buf_info);
  153. if (qdf_unlikely(current_link_desc_buf_info.paddr ==
  154. buf_info->paddr)) {
  155. dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
  156. current_link_desc_buf_info.paddr,
  157. current_link_desc_buf_info.sw_cookie);
  158. DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
  159. }
  160. *buf_info = current_link_desc_buf_info;
  161. }
  162. QDF_STATUS
  163. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  164. hal_buff_addrinfo_t link_desc_addr,
  165. uint8_t bm_action)
  166. {
  167. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  168. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  169. hal_soc_handle_t hal_soc = soc->hal_soc;
  170. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  171. void *src_srng_desc;
  172. if (!wbm_rel_srng) {
  173. dp_rx_err_err("%pK: WBM RELEASE RING not initialized", soc);
  174. return status;
  175. }
  176. /* do duplicate link desc address check */
  177. dp_rx_link_desc_refill_duplicate_check(
  178. soc,
  179. &soc->last_op_info.wbm_rel_link_desc,
  180. link_desc_addr);
  181. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  182. /* TODO */
  183. /*
  184. * Need API to convert from hal_ring pointer to
  185. * Ring Type / Ring Id combo
  186. */
  187. dp_rx_err_err("%pK: HAL RING Access For WBM Release SRNG Failed - %pK",
  188. soc, wbm_rel_srng);
  189. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  190. goto done;
  191. }
  192. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  193. if (qdf_likely(src_srng_desc)) {
  194. /* Return link descriptor through WBM ring (SW2WBM)*/
  195. hal_rx_msdu_link_desc_set(hal_soc,
  196. src_srng_desc, link_desc_addr, bm_action);
  197. status = QDF_STATUS_SUCCESS;
  198. } else {
  199. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  200. DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
  201. dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
  202. srng->ring_id,
  203. soc->stats.rx.err.hal_ring_access_full_fail);
  204. dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  205. *srng->u.src_ring.hp_addr,
  206. srng->u.src_ring.reap_hp,
  207. *srng->u.src_ring.tp_addr,
  208. srng->u.src_ring.cached_tp);
  209. QDF_BUG(0);
  210. }
  211. done:
  212. hal_srng_access_end(hal_soc, wbm_rel_srng);
  213. return status;
  214. }
  215. qdf_export_symbol(dp_rx_link_desc_return_by_addr);
  216. QDF_STATUS
  217. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  218. uint8_t bm_action)
  219. {
  220. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  221. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  222. }
  223. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  224. /**
  225. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  226. *
  227. * @soc: core txrx main context
  228. * @ring_desc: opaque pointer to the REO error ring descriptor
  229. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  230. * @mac_id: mac ID
  231. * @quota: No. of units (packets) that can be serviced in one shot.
  232. *
  233. * This function is used to drop all MSDU in an MPDU
  234. *
  235. * Return: uint32_t: No. of elements processed
  236. */
  237. static uint32_t
  238. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  239. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  240. uint8_t *mac_id,
  241. uint32_t quota)
  242. {
  243. uint32_t rx_bufs_used = 0;
  244. void *link_desc_va;
  245. struct hal_buf_info buf_info;
  246. struct dp_pdev *pdev;
  247. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  248. int i;
  249. uint8_t *rx_tlv_hdr;
  250. uint32_t tid;
  251. struct rx_desc_pool *rx_desc_pool;
  252. struct dp_rx_desc *rx_desc;
  253. /* First field in REO Dst ring Desc is buffer_addr_info */
  254. void *buf_addr_info = ring_desc;
  255. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  256. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  257. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &buf_info);
  258. /* buffer_addr_info is the first element of ring_desc */
  259. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  260. (uint32_t *)ring_desc,
  261. &buf_info);
  262. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  263. if (!link_desc_va) {
  264. dp_rx_err_debug("link desc va is null, soc %pk", soc);
  265. return rx_bufs_used;
  266. }
  267. more_msdu_link_desc:
  268. /* No UNMAP required -- this is "malloc_consistent" memory */
  269. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  270. &mpdu_desc_info->msdu_count);
  271. for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
  272. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  273. soc, msdu_list.sw_cookie[i]);
  274. qdf_assert_always(rx_desc);
  275. /* all buffers from a MSDU link link belong to same pdev */
  276. *mac_id = rx_desc->pool_id;
  277. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  278. if (!pdev) {
  279. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  280. soc, rx_desc->pool_id);
  281. return rx_bufs_used;
  282. }
  283. if (!dp_rx_desc_check_magic(rx_desc)) {
  284. dp_rx_err_err("%pK: Invalid rx_desc cookie=%d",
  285. soc, msdu_list.sw_cookie[i]);
  286. return rx_bufs_used;
  287. }
  288. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  289. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  290. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
  291. rx_desc->unmapped = 1;
  292. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  293. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  294. rx_bufs_used++;
  295. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  296. rx_desc->rx_buf_start);
  297. dp_rx_err_err("%pK: Packet received with PN error for tid :%d",
  298. soc, tid);
  299. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  300. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  301. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  302. dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info,
  303. rx_desc->nbuf,
  304. QDF_TX_RX_STATUS_DROP, true);
  305. /* Just free the buffers */
  306. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
  307. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  308. &pdev->free_list_tail, rx_desc);
  309. }
  310. /*
  311. * If the msdu's are spread across multiple link-descriptors,
  312. * we cannot depend solely on the msdu_count(e.g., if msdu is
  313. * spread across multiple buffers).Hence, it is
  314. * necessary to check the next link_descriptor and release
  315. * all the msdu's that are part of it.
  316. */
  317. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  318. link_desc_va,
  319. &next_link_desc_addr_info);
  320. if (hal_rx_is_buf_addr_info_valid(
  321. &next_link_desc_addr_info)) {
  322. /* Clear the next link desc info for the current link_desc */
  323. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  324. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  325. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  326. hal_rx_buffer_addr_info_get_paddr(
  327. &next_link_desc_addr_info,
  328. &buf_info);
  329. /* buffer_addr_info is the first element of ring_desc */
  330. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  331. (uint32_t *)&next_link_desc_addr_info,
  332. &buf_info);
  333. cur_link_desc_addr_info = next_link_desc_addr_info;
  334. buf_addr_info = &cur_link_desc_addr_info;
  335. link_desc_va =
  336. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  337. goto more_msdu_link_desc;
  338. }
  339. quota--;
  340. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  341. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  342. return rx_bufs_used;
  343. }
  344. /**
  345. * dp_rx_pn_error_handle() - Handles PN check errors
  346. *
  347. * @soc: core txrx main context
  348. * @ring_desc: opaque pointer to the REO error ring descriptor
  349. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  350. * @mac_id: mac ID
  351. * @quota: No. of units (packets) that can be serviced in one shot.
  352. *
  353. * This function implements PN error handling
  354. * If the peer is configured to ignore the PN check errors
  355. * or if DP feels, that this frame is still OK, the frame can be
  356. * re-injected back to REO to use some of the other features
  357. * of REO e.g. duplicate detection/routing to other cores
  358. *
  359. * Return: uint32_t: No. of elements processed
  360. */
  361. static uint32_t
  362. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  363. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  364. uint8_t *mac_id,
  365. uint32_t quota)
  366. {
  367. uint16_t peer_id;
  368. uint32_t rx_bufs_used = 0;
  369. struct dp_txrx_peer *txrx_peer;
  370. bool peer_pn_policy = false;
  371. dp_txrx_ref_handle txrx_ref_handle = NULL;
  372. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  373. mpdu_desc_info->peer_meta_data);
  374. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  375. &txrx_ref_handle,
  376. DP_MOD_ID_RX_ERR);
  377. if (qdf_likely(txrx_peer)) {
  378. /*
  379. * TODO: Check for peer specific policies & set peer_pn_policy
  380. */
  381. dp_err_rl("discard rx due to PN error for peer %pK",
  382. txrx_peer);
  383. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  384. }
  385. dp_rx_err_err("%pK: Packet received with PN error", soc);
  386. /* No peer PN policy -- definitely drop */
  387. if (!peer_pn_policy)
  388. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  389. mpdu_desc_info,
  390. mac_id, quota);
  391. return rx_bufs_used;
  392. }
  393. #ifdef DP_RX_DELIVER_ALL_OOR_FRAMES
  394. /**
  395. * dp_rx_deliver_oor_frame() - deliver OOR frames to stack
  396. * @soc: Datapath soc handler
  397. * @txrx_peer: pointer to DP peer
  398. * @nbuf: pointer to the skb of RX frame
  399. * @frame_mask: the mask for special frame needed
  400. * @rx_tlv_hdr: start of rx tlv header
  401. *
  402. * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
  403. * single nbuf is expected.
  404. *
  405. * return: true - nbuf has been delivered to stack, false - not.
  406. */
  407. static bool
  408. dp_rx_deliver_oor_frame(struct dp_soc *soc,
  409. struct dp_txrx_peer *txrx_peer,
  410. qdf_nbuf_t nbuf, uint32_t frame_mask,
  411. uint8_t *rx_tlv_hdr)
  412. {
  413. uint32_t l2_hdr_offset = 0;
  414. uint16_t msdu_len = 0;
  415. uint32_t skip_len;
  416. l2_hdr_offset =
  417. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  418. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  419. skip_len = l2_hdr_offset;
  420. } else {
  421. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  422. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  423. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  424. }
  425. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  426. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  427. qdf_nbuf_pull_head(nbuf, skip_len);
  428. qdf_nbuf_set_exc_frame(nbuf, 1);
  429. dp_info_rl("OOR frame, mpdu sn 0x%x",
  430. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  431. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer, nbuf, NULL);
  432. return true;
  433. }
  434. #else
  435. static bool
  436. dp_rx_deliver_oor_frame(struct dp_soc *soc,
  437. struct dp_txrx_peer *txrx_peer,
  438. qdf_nbuf_t nbuf, uint32_t frame_mask,
  439. uint8_t *rx_tlv_hdr)
  440. {
  441. return dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
  442. rx_tlv_hdr);
  443. }
  444. #endif
  445. /**
  446. * dp_rx_oor_handle() - Handles the msdu which is OOR error
  447. *
  448. * @soc: core txrx main context
  449. * @nbuf: pointer to msdu skb
  450. * @peer_id: dp peer ID
  451. * @rx_tlv_hdr: start of rx tlv header
  452. *
  453. * This function process the msdu delivered from REO2TCL
  454. * ring with error type OOR
  455. *
  456. * Return: None
  457. */
  458. static void
  459. dp_rx_oor_handle(struct dp_soc *soc,
  460. qdf_nbuf_t nbuf,
  461. uint16_t peer_id,
  462. uint8_t *rx_tlv_hdr)
  463. {
  464. uint32_t frame_mask = wlan_cfg_get_special_frame_cfg(soc->wlan_cfg_ctx);
  465. struct dp_txrx_peer *txrx_peer = NULL;
  466. dp_txrx_ref_handle txrx_ref_handle = NULL;
  467. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  468. &txrx_ref_handle,
  469. DP_MOD_ID_RX_ERR);
  470. if (!txrx_peer) {
  471. dp_info_rl("peer not found");
  472. goto free_nbuf;
  473. }
  474. if (dp_rx_deliver_oor_frame(soc, txrx_peer, nbuf, frame_mask,
  475. rx_tlv_hdr)) {
  476. DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
  477. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  478. return;
  479. }
  480. free_nbuf:
  481. if (txrx_peer)
  482. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_RX_ERR);
  483. DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
  484. dp_rx_nbuf_free(nbuf);
  485. }
  486. /**
  487. * dp_rx_err_nbuf_pn_check() - Check if the PN number of this current packet
  488. * is a monotonous increment of packet number
  489. * from the previous successfully re-ordered
  490. * frame.
  491. * @soc: Datapath SOC handle
  492. * @ring_desc: REO ring descriptor
  493. * @nbuf: Current packet
  494. *
  495. * Return: QDF_STATUS_SUCCESS, if the pn check passes, else QDF_STATUS_E_FAILURE
  496. */
  497. static inline QDF_STATUS
  498. dp_rx_err_nbuf_pn_check(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  499. qdf_nbuf_t nbuf)
  500. {
  501. uint64_t prev_pn, curr_pn[2];
  502. if (!hal_rx_encryption_info_valid(soc->hal_soc, qdf_nbuf_data(nbuf)))
  503. return QDF_STATUS_SUCCESS;
  504. hal_rx_reo_prev_pn_get(soc->hal_soc, ring_desc, &prev_pn);
  505. hal_rx_tlv_get_pn_num(soc->hal_soc, qdf_nbuf_data(nbuf), curr_pn);
  506. if (curr_pn[0] > prev_pn)
  507. return QDF_STATUS_SUCCESS;
  508. return QDF_STATUS_E_FAILURE;
  509. }
  510. #ifdef WLAN_SKIP_BAR_UPDATE
  511. static
  512. void dp_rx_err_handle_bar(struct dp_soc *soc,
  513. struct dp_peer *peer,
  514. qdf_nbuf_t nbuf)
  515. {
  516. dp_info_rl("BAR update to H.W is skipped");
  517. DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
  518. }
  519. #else
  520. static
  521. void dp_rx_err_handle_bar(struct dp_soc *soc,
  522. struct dp_peer *peer,
  523. qdf_nbuf_t nbuf)
  524. {
  525. uint8_t *rx_tlv_hdr;
  526. unsigned char type, subtype;
  527. uint16_t start_seq_num;
  528. uint32_t tid;
  529. QDF_STATUS status;
  530. struct ieee80211_frame_bar *bar;
  531. /*
  532. * 1. Is this a BAR frame. If not Discard it.
  533. * 2. If it is, get the peer id, tid, ssn
  534. * 2a Do a tid update
  535. */
  536. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  537. bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + soc->rx_pkt_tlv_size);
  538. type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
  539. subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
  540. if (!(type == IEEE80211_FC0_TYPE_CTL &&
  541. subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
  542. dp_err_rl("Not a BAR frame!");
  543. return;
  544. }
  545. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  546. qdf_assert_always(tid < DP_MAX_TIDS);
  547. start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
  548. dp_info_rl("tid %u window_size %u start_seq_num %u",
  549. tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
  550. status = dp_rx_tid_update_wifi3(peer, tid,
  551. peer->rx_tid[tid].ba_win_size,
  552. start_seq_num,
  553. true);
  554. if (status != QDF_STATUS_SUCCESS) {
  555. dp_err_rl("failed to handle bar frame update rx tid");
  556. DP_STATS_INC(soc, rx.err.bar_handle_fail_count, 1);
  557. } else {
  558. DP_STATS_INC(soc, rx.err.ssn_update_count, 1);
  559. }
  560. }
  561. #endif
  562. /**
  563. * _dp_rx_bar_frame_handle(): Core of the BAR frame handling
  564. * @soc: Datapath SoC handle
  565. * @nbuf: packet being processed
  566. * @mpdu_desc_info: mpdu desc info for the current packet
  567. * @tid: tid on which the packet arrived
  568. * @err_status: Flag to indicate if REO encountered an error while routing this
  569. * frame
  570. * @error_code: REO error code
  571. *
  572. * Return: None
  573. */
  574. static void
  575. _dp_rx_bar_frame_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  576. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  577. uint32_t tid, uint8_t err_status, uint32_t error_code)
  578. {
  579. uint16_t peer_id;
  580. struct dp_peer *peer;
  581. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  582. mpdu_desc_info->peer_meta_data);
  583. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  584. if (!peer)
  585. return;
  586. dp_info_rl("BAR frame: "
  587. " peer_id = %d"
  588. " tid = %u"
  589. " SSN = %d"
  590. " error status = %d",
  591. peer->peer_id,
  592. tid,
  593. mpdu_desc_info->mpdu_seq,
  594. err_status);
  595. if (err_status == HAL_REO_ERROR_DETECTED) {
  596. switch (error_code) {
  597. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  598. case HAL_REO_ERR_BAR_FRAME_OOR:
  599. dp_rx_err_handle_bar(soc, peer, nbuf);
  600. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  601. break;
  602. default:
  603. DP_STATS_INC(soc, rx.bar_frame, 1);
  604. }
  605. }
  606. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  607. }
  608. /**
  609. * dp_rx_bar_frame_handle() - Function to handle err BAR frames
  610. * @soc: core DP main context
  611. * @ring_desc: Hal ring desc
  612. * @rx_desc: dp rx desc
  613. * @mpdu_desc_info: mpdu desc info
  614. * @err_status: error status
  615. * @err_code: error code
  616. *
  617. * Handle the error BAR frames received. Ensure the SOC level
  618. * stats are updated based on the REO error code. The BAR frames
  619. * are further processed by updating the Rx tids with the start
  620. * sequence number (SSN) and BA window size. Desc is returned
  621. * to the free desc list
  622. *
  623. * Return: none
  624. */
  625. static void
  626. dp_rx_bar_frame_handle(struct dp_soc *soc,
  627. hal_ring_desc_t ring_desc,
  628. struct dp_rx_desc *rx_desc,
  629. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  630. uint8_t err_status,
  631. uint32_t err_code)
  632. {
  633. qdf_nbuf_t nbuf;
  634. struct dp_pdev *pdev;
  635. struct rx_desc_pool *rx_desc_pool;
  636. uint8_t *rx_tlv_hdr;
  637. uint32_t tid;
  638. nbuf = rx_desc->nbuf;
  639. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  640. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  641. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  642. rx_desc->unmapped = 1;
  643. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  644. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  645. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  646. rx_tlv_hdr);
  647. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  648. if (!pdev) {
  649. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  650. soc, rx_desc->pool_id);
  651. return;
  652. }
  653. _dp_rx_bar_frame_handle(soc, nbuf, mpdu_desc_info, tid, err_status,
  654. err_code);
  655. dp_rx_err_send_pktlog(soc, pdev, mpdu_desc_info, nbuf,
  656. QDF_TX_RX_STATUS_DROP, true);
  657. dp_rx_link_desc_return(soc, ring_desc,
  658. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  659. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  660. rx_desc->pool_id);
  661. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  662. &pdev->free_list_tail,
  663. rx_desc);
  664. }
  665. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  666. void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  667. uint16_t peer_id, uint8_t tid)
  668. {
  669. struct dp_peer *peer = NULL;
  670. struct dp_rx_tid *rx_tid = NULL;
  671. struct dp_txrx_peer *txrx_peer;
  672. uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
  673. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  674. if (!peer) {
  675. dp_rx_err_info_rl("%pK: peer not found", soc);
  676. goto free_nbuf;
  677. }
  678. txrx_peer = dp_get_txrx_peer(peer);
  679. if (!txrx_peer) {
  680. dp_rx_err_info_rl("%pK: txrx_peer not found", soc);
  681. goto free_nbuf;
  682. }
  683. if (tid >= DP_MAX_TIDS) {
  684. dp_info_rl("invalid tid");
  685. goto nbuf_deliver;
  686. }
  687. rx_tid = &peer->rx_tid[tid];
  688. qdf_spin_lock_bh(&rx_tid->tid_lock);
  689. /* only if BA session is active, allow send Delba */
  690. if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  691. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  692. goto nbuf_deliver;
  693. }
  694. if (!rx_tid->delba_tx_status) {
  695. rx_tid->delba_tx_retry++;
  696. rx_tid->delba_tx_status = 1;
  697. rx_tid->delba_rcode =
  698. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  699. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  700. if (soc->cdp_soc.ol_ops->send_delba) {
  701. DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent,
  702. 1);
  703. soc->cdp_soc.ol_ops->send_delba(
  704. peer->vdev->pdev->soc->ctrl_psoc,
  705. peer->vdev->vdev_id,
  706. peer->mac_addr.raw,
  707. tid,
  708. rx_tid->delba_rcode,
  709. CDP_DELBA_2K_JUMP);
  710. }
  711. } else {
  712. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  713. }
  714. nbuf_deliver:
  715. if (dp_rx_deliver_special_frame(soc, txrx_peer, nbuf, frame_mask,
  716. rx_tlv_hdr)) {
  717. DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
  718. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  719. return;
  720. }
  721. free_nbuf:
  722. if (peer)
  723. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  724. DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
  725. dp_rx_nbuf_free(nbuf);
  726. }
  727. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  728. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_KIWI)
  729. bool
  730. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  731. uint8_t pool_id,
  732. uint8_t *rx_tlv_hdr,
  733. qdf_nbuf_t nbuf)
  734. {
  735. struct dp_peer *peer = NULL;
  736. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  737. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  738. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  739. if (!pdev) {
  740. dp_rx_err_debug("%pK: pdev is null for pool_id = %d",
  741. soc, pool_id);
  742. return false;
  743. }
  744. /*
  745. * WAR- In certain types of packets if peer_id is not correct then
  746. * driver may not be able find. Try finding peer by addr_2 of
  747. * received MPDU
  748. */
  749. if (wh)
  750. peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
  751. DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
  752. if (peer) {
  753. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  754. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  755. QDF_TRACE_LEVEL_DEBUG);
  756. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  757. 1, qdf_nbuf_len(nbuf));
  758. dp_rx_nbuf_free(nbuf);
  759. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  760. return true;
  761. }
  762. return false;
  763. }
  764. #else
  765. bool
  766. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  767. uint8_t pool_id,
  768. uint8_t *rx_tlv_hdr,
  769. qdf_nbuf_t nbuf)
  770. {
  771. return false;
  772. }
  773. #endif
  774. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  775. {
  776. if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
  777. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  778. 1, pkt_len);
  779. return true;
  780. } else {
  781. return false;
  782. }
  783. }
  784. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  785. void
  786. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  787. struct dp_vdev *vdev,
  788. struct dp_txrx_peer *txrx_peer,
  789. qdf_nbuf_t nbuf,
  790. qdf_nbuf_t tail,
  791. bool is_eapol)
  792. {
  793. if (is_eapol && soc->eapol_over_control_port)
  794. dp_rx_eapol_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  795. else
  796. dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  797. }
  798. #else
  799. void
  800. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  801. struct dp_vdev *vdev,
  802. struct dp_txrx_peer *txrx_peer,
  803. qdf_nbuf_t nbuf,
  804. qdf_nbuf_t tail,
  805. bool is_eapol)
  806. {
  807. dp_rx_deliver_to_stack(soc, vdev, txrx_peer, nbuf, NULL);
  808. }
  809. #endif
  810. #ifdef WLAN_FEATURE_11BE_MLO
  811. int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
  812. {
  813. return ((qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
  814. QDF_MAC_ADDR_SIZE) == 0) ||
  815. (qdf_mem_cmp(eh->ether_dhost, &vdev->mld_mac_addr.raw[0],
  816. QDF_MAC_ADDR_SIZE) == 0));
  817. }
  818. #else
  819. int dp_rx_err_match_dhost(qdf_ether_header_t *eh, struct dp_vdev *vdev)
  820. {
  821. return (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
  822. QDF_MAC_ADDR_SIZE) == 0);
  823. }
  824. #endif
  825. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  826. bool
  827. dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr)
  828. {
  829. struct dp_soc *soc = vdev->pdev->soc;
  830. if (!vdev->drop_3addr_mcast)
  831. return false;
  832. if (vdev->opmode != wlan_op_mode_sta)
  833. return false;
  834. if (hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  835. return true;
  836. return false;
  837. }
  838. /**
  839. * dp_rx_err_is_pn_check_needed() - Check if the packet number check is needed
  840. * for this frame received in REO error ring.
  841. * @soc: Datapath SOC handle
  842. * @error: REO error detected or not
  843. * @error_code: Error code in case of REO error
  844. *
  845. * Return: true if pn check if needed in software,
  846. * false, if pn check if not needed.
  847. */
  848. static inline bool
  849. dp_rx_err_is_pn_check_needed(struct dp_soc *soc, uint8_t error,
  850. uint32_t error_code)
  851. {
  852. return (soc->features.pn_in_reo_dest &&
  853. (error == HAL_REO_ERROR_DETECTED &&
  854. (hal_rx_reo_is_2k_jump(error_code) ||
  855. hal_rx_reo_is_oor_error(error_code) ||
  856. hal_rx_reo_is_bar_oor_2k_jump(error_code))));
  857. }
  858. #ifdef DP_WAR_INVALID_FIRST_MSDU_FLAG
  859. static inline void
  860. dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
  861. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  862. bool first_msdu_in_mpdu_processed)
  863. {
  864. if (first_msdu_in_mpdu_processed) {
  865. /*
  866. * This is the 2nd indication of first_msdu in the same mpdu.
  867. * Skip re-parsing the mdpu_desc_info and use the cached one,
  868. * since this msdu is most probably from the current mpdu
  869. * which is being processed
  870. */
  871. } else {
  872. hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc,
  873. qdf_nbuf_data(nbuf),
  874. mpdu_desc_info);
  875. }
  876. }
  877. #else
  878. static inline void
  879. dp_rx_err_populate_mpdu_desc_info(struct dp_soc *soc, qdf_nbuf_t nbuf,
  880. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  881. bool first_msdu_in_mpdu_processed)
  882. {
  883. hal_rx_tlv_populate_mpdu_desc_info(soc->hal_soc, qdf_nbuf_data(nbuf),
  884. mpdu_desc_info);
  885. }
  886. #endif
  887. /**
  888. * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
  889. *
  890. * @soc: core txrx main context
  891. * @ring_desc: opaque pointer to the REO error ring descriptor
  892. * @mpdu_desc_info: pointer to mpdu level description info
  893. * @link_desc_va: pointer to msdu_link_desc virtual address
  894. * @err_code: reo error code fetched from ring entry
  895. *
  896. * Function to handle msdus fetched from msdu link desc, currently
  897. * support REO error NULL queue, 2K jump, OOR.
  898. *
  899. * Return: msdu count processed
  900. */
  901. static uint32_t
  902. dp_rx_reo_err_entry_process(struct dp_soc *soc,
  903. void *ring_desc,
  904. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  905. void *link_desc_va,
  906. enum hal_reo_error_code err_code)
  907. {
  908. uint32_t rx_bufs_used = 0;
  909. struct dp_pdev *pdev;
  910. int i;
  911. uint8_t *rx_tlv_hdr_first;
  912. uint8_t *rx_tlv_hdr_last;
  913. uint32_t tid = DP_MAX_TIDS;
  914. uint16_t peer_id;
  915. struct dp_rx_desc *rx_desc;
  916. struct rx_desc_pool *rx_desc_pool;
  917. qdf_nbuf_t nbuf;
  918. qdf_nbuf_t next_nbuf;
  919. struct hal_buf_info buf_info;
  920. struct hal_rx_msdu_list msdu_list;
  921. uint16_t num_msdus;
  922. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  923. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  924. /* First field in REO Dst ring Desc is buffer_addr_info */
  925. void *buf_addr_info = ring_desc;
  926. qdf_nbuf_t head_nbuf = NULL;
  927. qdf_nbuf_t tail_nbuf = NULL;
  928. uint16_t msdu_processed = 0;
  929. QDF_STATUS status;
  930. bool ret, is_pn_check_needed;
  931. uint8_t rx_desc_pool_id;
  932. struct dp_txrx_peer *txrx_peer = NULL;
  933. dp_txrx_ref_handle txrx_ref_handle = NULL;
  934. hal_ring_handle_t hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  935. bool first_msdu_in_mpdu_processed = false;
  936. bool msdu_dropped = false;
  937. uint8_t link_id = 0;
  938. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  939. mpdu_desc_info->peer_meta_data);
  940. is_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
  941. HAL_REO_ERROR_DETECTED,
  942. err_code);
  943. more_msdu_link_desc:
  944. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  945. &num_msdus);
  946. for (i = 0; i < num_msdus; i++) {
  947. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  948. soc,
  949. msdu_list.sw_cookie[i]);
  950. qdf_assert_always(rx_desc);
  951. nbuf = rx_desc->nbuf;
  952. /*
  953. * this is a unlikely scenario where the host is reaping
  954. * a descriptor which it already reaped just a while ago
  955. * but is yet to replenish it back to HW.
  956. * In this case host will dump the last 128 descriptors
  957. * including the software descriptor rx_desc and assert.
  958. */
  959. if (qdf_unlikely(!rx_desc->in_use) ||
  960. qdf_unlikely(!nbuf)) {
  961. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  962. dp_info_rl("Reaping rx_desc not in use!");
  963. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  964. ring_desc, rx_desc);
  965. /* ignore duplicate RX desc and continue to process */
  966. /* Pop out the descriptor */
  967. msdu_dropped = true;
  968. continue;
  969. }
  970. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  971. msdu_list.paddr[i]);
  972. if (!ret) {
  973. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  974. rx_desc->in_err_state = 1;
  975. msdu_dropped = true;
  976. continue;
  977. }
  978. rx_desc_pool_id = rx_desc->pool_id;
  979. /* all buffers from a MSDU link belong to same pdev */
  980. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc_pool_id);
  981. rx_desc_pool = &soc->rx_desc_buf[rx_desc_pool_id];
  982. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  983. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  984. rx_desc->unmapped = 1;
  985. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  986. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
  987. rx_bufs_used++;
  988. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  989. &pdev->free_list_tail, rx_desc);
  990. DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
  991. if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
  992. HAL_MSDU_F_MSDU_CONTINUATION)) {
  993. qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
  994. continue;
  995. }
  996. if (dp_rx_buffer_pool_refill(soc, head_nbuf,
  997. rx_desc_pool_id)) {
  998. /* MSDU queued back to the pool */
  999. msdu_dropped = true;
  1000. head_nbuf = NULL;
  1001. goto process_next_msdu;
  1002. }
  1003. if (is_pn_check_needed) {
  1004. if (msdu_list.msdu_info[i].msdu_flags &
  1005. HAL_MSDU_F_FIRST_MSDU_IN_MPDU) {
  1006. dp_rx_err_populate_mpdu_desc_info(soc, nbuf,
  1007. mpdu_desc_info,
  1008. first_msdu_in_mpdu_processed);
  1009. first_msdu_in_mpdu_processed = true;
  1010. } else {
  1011. if (!first_msdu_in_mpdu_processed) {
  1012. /*
  1013. * If no msdu in this mpdu was dropped
  1014. * due to failed sanity checks, then
  1015. * its not expected to hit this
  1016. * condition. Hence we assert here.
  1017. */
  1018. if (!msdu_dropped)
  1019. qdf_assert_always(0);
  1020. /*
  1021. * We do not have valid mpdu_desc_info
  1022. * to process this nbuf, hence drop it.
  1023. * TODO - Increment stats
  1024. */
  1025. goto process_next_msdu;
  1026. }
  1027. /*
  1028. * DO NOTHING -
  1029. * Continue using the same mpdu_desc_info
  1030. * details populated from the first msdu in
  1031. * the mpdu.
  1032. */
  1033. }
  1034. status = dp_rx_err_nbuf_pn_check(soc, ring_desc, nbuf);
  1035. if (QDF_IS_STATUS_ERROR(status)) {
  1036. DP_STATS_INC(soc, rx.err.pn_in_dest_check_fail,
  1037. 1);
  1038. goto process_next_msdu;
  1039. }
  1040. peer_id = dp_rx_peer_metadata_peer_id_get(soc,
  1041. mpdu_desc_info->peer_meta_data);
  1042. if (mpdu_desc_info->bar_frame)
  1043. _dp_rx_bar_frame_handle(soc, nbuf,
  1044. mpdu_desc_info, tid,
  1045. HAL_REO_ERROR_DETECTED,
  1046. err_code);
  1047. }
  1048. rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
  1049. rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
  1050. if (qdf_unlikely(head_nbuf != tail_nbuf)) {
  1051. /*
  1052. * For SG case, only the length of last skb is valid
  1053. * as HW only populate the msdu_len for last msdu
  1054. * in rx link descriptor, use the length from
  1055. * last skb to overwrite the head skb for further
  1056. * SG processing.
  1057. */
  1058. QDF_NBUF_CB_RX_PKT_LEN(head_nbuf) =
  1059. QDF_NBUF_CB_RX_PKT_LEN(tail_nbuf);
  1060. nbuf = dp_rx_sg_create(soc, head_nbuf);
  1061. qdf_nbuf_set_is_frag(nbuf, 1);
  1062. DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
  1063. }
  1064. head_nbuf = NULL;
  1065. switch (err_code) {
  1066. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1067. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  1068. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1069. /*
  1070. * only first msdu, mpdu start description tlv valid?
  1071. * and use it for following msdu.
  1072. */
  1073. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1074. rx_tlv_hdr_last))
  1075. tid = hal_rx_mpdu_start_tid_get(
  1076. soc->hal_soc,
  1077. rx_tlv_hdr_first);
  1078. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
  1079. peer_id, tid);
  1080. break;
  1081. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  1082. case HAL_REO_ERR_BAR_FRAME_OOR:
  1083. dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
  1084. break;
  1085. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1086. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(
  1087. soc, peer_id,
  1088. &txrx_ref_handle,
  1089. DP_MOD_ID_RX_ERR);
  1090. if (!txrx_peer)
  1091. dp_info_rl("txrx_peer is null peer_id %u",
  1092. peer_id);
  1093. soc->arch_ops.dp_rx_null_q_desc_handle(soc, nbuf,
  1094. rx_tlv_hdr_last,
  1095. rx_desc_pool_id,
  1096. txrx_peer,
  1097. TRUE,
  1098. link_id);
  1099. if (txrx_peer)
  1100. dp_txrx_peer_unref_delete(txrx_ref_handle,
  1101. DP_MOD_ID_RX_ERR);
  1102. break;
  1103. default:
  1104. dp_err_rl("Non-support error code %d", err_code);
  1105. dp_rx_nbuf_free(nbuf);
  1106. }
  1107. process_next_msdu:
  1108. nbuf = head_nbuf;
  1109. while (nbuf) {
  1110. next_nbuf = qdf_nbuf_next(nbuf);
  1111. dp_rx_nbuf_free(nbuf);
  1112. nbuf = next_nbuf;
  1113. }
  1114. msdu_processed++;
  1115. head_nbuf = NULL;
  1116. tail_nbuf = NULL;
  1117. }
  1118. /*
  1119. * If the msdu's are spread across multiple link-descriptors,
  1120. * we cannot depend solely on the msdu_count(e.g., if msdu is
  1121. * spread across multiple buffers).Hence, it is
  1122. * necessary to check the next link_descriptor and release
  1123. * all the msdu's that are part of it.
  1124. */
  1125. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  1126. link_desc_va,
  1127. &next_link_desc_addr_info);
  1128. if (hal_rx_is_buf_addr_info_valid(
  1129. &next_link_desc_addr_info)) {
  1130. /* Clear the next link desc info for the current link_desc */
  1131. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  1132. dp_rx_link_desc_return_by_addr(
  1133. soc,
  1134. buf_addr_info,
  1135. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1136. hal_rx_buffer_addr_info_get_paddr(
  1137. &next_link_desc_addr_info,
  1138. &buf_info);
  1139. /* buffer_addr_info is the first element of ring_desc */
  1140. hal_rx_buf_cookie_rbm_get(soc->hal_soc,
  1141. (uint32_t *)&next_link_desc_addr_info,
  1142. &buf_info);
  1143. link_desc_va =
  1144. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1145. cur_link_desc_addr_info = next_link_desc_addr_info;
  1146. buf_addr_info = &cur_link_desc_addr_info;
  1147. goto more_msdu_link_desc;
  1148. }
  1149. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  1150. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1151. if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
  1152. DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
  1153. return rx_bufs_used;
  1154. }
  1155. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1156. void
  1157. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1158. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  1159. uint8_t err_code, uint8_t mac_id, uint8_t link_id)
  1160. {
  1161. uint32_t pkt_len, l2_hdr_offset;
  1162. uint16_t msdu_len;
  1163. struct dp_vdev *vdev;
  1164. qdf_ether_header_t *eh;
  1165. bool is_broadcast;
  1166. /*
  1167. * Check if DMA completed -- msdu_done is the last bit
  1168. * to be written
  1169. */
  1170. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1171. dp_err_rl("MSDU DONE failure");
  1172. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1173. QDF_TRACE_LEVEL_INFO);
  1174. qdf_assert(0);
  1175. }
  1176. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  1177. rx_tlv_hdr);
  1178. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  1179. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  1180. if (dp_rx_check_pkt_len(soc, pkt_len)) {
  1181. /* Drop & free packet */
  1182. dp_rx_nbuf_free(nbuf);
  1183. return;
  1184. }
  1185. /* Set length in nbuf */
  1186. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1187. qdf_nbuf_set_next(nbuf, NULL);
  1188. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1189. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  1190. if (!txrx_peer) {
  1191. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "txrx_peer is NULL");
  1192. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1193. qdf_nbuf_len(nbuf));
  1194. /* Trigger invalid peer handler wrapper */
  1195. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  1196. return;
  1197. }
  1198. vdev = txrx_peer->vdev;
  1199. if (!vdev) {
  1200. dp_rx_err_info_rl("%pK: INVALID vdev %pK OR osif_rx", soc,
  1201. vdev);
  1202. /* Drop & free packet */
  1203. dp_rx_nbuf_free(nbuf);
  1204. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1205. return;
  1206. }
  1207. /*
  1208. * Advance the packet start pointer by total size of
  1209. * pre-header TLV's
  1210. */
  1211. dp_rx_skip_tlvs(soc, nbuf, l2_hdr_offset);
  1212. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  1213. uint8_t *pkt_type;
  1214. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  1215. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  1216. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  1217. htons(QDF_LLC_STP)) {
  1218. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  1219. goto process_mesh;
  1220. } else {
  1221. goto process_rx;
  1222. }
  1223. }
  1224. }
  1225. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  1226. goto process_mesh;
  1227. /*
  1228. * WAPI cert AP sends rekey frames as unencrypted.
  1229. * Thus RXDMA will report unencrypted frame error.
  1230. * To pass WAPI cert case, SW needs to pass unencrypted
  1231. * rekey frame to stack.
  1232. */
  1233. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1234. goto process_rx;
  1235. }
  1236. /*
  1237. * In dynamic WEP case rekey frames are not encrypted
  1238. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  1239. * key install is already done
  1240. */
  1241. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  1242. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  1243. goto process_rx;
  1244. process_mesh:
  1245. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  1246. dp_rx_nbuf_free(nbuf);
  1247. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1248. return;
  1249. }
  1250. if (vdev->mesh_vdev) {
  1251. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  1252. == QDF_STATUS_SUCCESS) {
  1253. dp_rx_err_info("%pK: mesh pkt filtered", soc);
  1254. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  1255. dp_rx_nbuf_free(nbuf);
  1256. return;
  1257. }
  1258. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, txrx_peer);
  1259. }
  1260. process_rx:
  1261. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1262. rx_tlv_hdr) &&
  1263. (vdev->rx_decap_type ==
  1264. htt_cmn_pkt_type_ethernet))) {
  1265. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1266. is_broadcast = (QDF_IS_ADDR_BROADCAST
  1267. (eh->ether_dhost)) ? 1 : 0 ;
  1268. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
  1269. qdf_nbuf_len(nbuf), link_id);
  1270. if (is_broadcast) {
  1271. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast, 1,
  1272. qdf_nbuf_len(nbuf),
  1273. link_id);
  1274. }
  1275. } else {
  1276. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.unicast, 1,
  1277. qdf_nbuf_len(nbuf),
  1278. link_id);
  1279. }
  1280. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  1281. dp_rx_deliver_raw(vdev, nbuf, txrx_peer, link_id);
  1282. } else {
  1283. /* Update the protocol tag in SKB based on CCE metadata */
  1284. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1285. EXCEPTION_DEST_RING_ID, true, true);
  1286. /* Update the flow tag in SKB based on FSE metadata */
  1287. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  1288. DP_PEER_STATS_FLAT_INC(txrx_peer, to_stack.num, 1);
  1289. qdf_nbuf_set_exc_frame(nbuf, 1);
  1290. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf, NULL,
  1291. qdf_nbuf_is_ipv4_eapol_pkt(nbuf));
  1292. }
  1293. return;
  1294. }
  1295. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1296. uint8_t *rx_tlv_hdr,
  1297. struct dp_txrx_peer *txrx_peer)
  1298. {
  1299. struct dp_vdev *vdev = NULL;
  1300. struct dp_pdev *pdev = NULL;
  1301. struct ol_if_ops *tops = NULL;
  1302. uint16_t rx_seq, fragno;
  1303. uint8_t is_raw;
  1304. unsigned int tid;
  1305. QDF_STATUS status;
  1306. struct cdp_rx_mic_err_info mic_failure_info;
  1307. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1308. rx_tlv_hdr))
  1309. return;
  1310. if (!txrx_peer) {
  1311. dp_info_rl("txrx_peer not found");
  1312. goto fail;
  1313. }
  1314. vdev = txrx_peer->vdev;
  1315. if (!vdev) {
  1316. dp_info_rl("VDEV not found");
  1317. goto fail;
  1318. }
  1319. pdev = vdev->pdev;
  1320. if (!pdev) {
  1321. dp_info_rl("PDEV not found");
  1322. goto fail;
  1323. }
  1324. is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
  1325. if (is_raw) {
  1326. fragno = dp_rx_frag_get_mpdu_frag_number(soc,
  1327. qdf_nbuf_data(nbuf));
  1328. /* Can get only last fragment */
  1329. if (fragno) {
  1330. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  1331. qdf_nbuf_data(nbuf));
  1332. rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
  1333. qdf_nbuf_data(nbuf));
  1334. status = dp_rx_defrag_add_last_frag(soc, txrx_peer,
  1335. tid, rx_seq, nbuf);
  1336. dp_info_rl("Frag pkt seq# %d frag# %d consumed "
  1337. "status %d !", rx_seq, fragno, status);
  1338. return;
  1339. }
  1340. }
  1341. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  1342. &mic_failure_info.da_mac_addr.bytes[0])) {
  1343. dp_err_rl("Failed to get da_mac_addr");
  1344. goto fail;
  1345. }
  1346. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  1347. &mic_failure_info.ta_mac_addr.bytes[0])) {
  1348. dp_err_rl("Failed to get ta_mac_addr");
  1349. goto fail;
  1350. }
  1351. mic_failure_info.key_id = 0;
  1352. mic_failure_info.multicast =
  1353. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  1354. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  1355. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  1356. mic_failure_info.data = NULL;
  1357. mic_failure_info.vdev_id = vdev->vdev_id;
  1358. tops = pdev->soc->cdp_soc.ol_ops;
  1359. if (tops->rx_mic_error)
  1360. tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
  1361. &mic_failure_info);
  1362. fail:
  1363. dp_rx_nbuf_free(nbuf);
  1364. return;
  1365. }
  1366. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1367. defined(WLAN_MCAST_MLO)
  1368. static bool dp_rx_igmp_handler(struct dp_soc *soc,
  1369. struct dp_vdev *vdev,
  1370. struct dp_txrx_peer *peer,
  1371. qdf_nbuf_t nbuf,
  1372. uint8_t link_id)
  1373. {
  1374. if (soc->arch_ops.dp_rx_mcast_handler) {
  1375. if (soc->arch_ops.dp_rx_mcast_handler(soc, vdev, peer,
  1376. nbuf, link_id))
  1377. return true;
  1378. }
  1379. return false;
  1380. }
  1381. #else
  1382. static bool dp_rx_igmp_handler(struct dp_soc *soc,
  1383. struct dp_vdev *vdev,
  1384. struct dp_txrx_peer *peer,
  1385. qdf_nbuf_t nbuf,
  1386. uint8_t link_id)
  1387. {
  1388. return false;
  1389. }
  1390. #endif
  1391. /**
  1392. * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
  1393. * Free any other packet which comes in
  1394. * this path.
  1395. *
  1396. * @soc: core DP main context
  1397. * @nbuf: buffer pointer
  1398. * @txrx_peer: txrx peer handle
  1399. * @rx_tlv_hdr: start of rx tlv header
  1400. * @err_src: rxdma/reo
  1401. * @link_id: link id on which the packet is received
  1402. *
  1403. * This function indicates EAPOL frame received in wbm error ring to stack.
  1404. * Any other frame should be dropped.
  1405. *
  1406. * Return: SUCCESS if delivered to stack
  1407. */
  1408. static void
  1409. dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1410. struct dp_txrx_peer *txrx_peer, uint8_t *rx_tlv_hdr,
  1411. enum hal_rx_wbm_error_source err_src,
  1412. uint8_t link_id)
  1413. {
  1414. uint32_t pkt_len;
  1415. uint16_t msdu_len;
  1416. struct dp_vdev *vdev;
  1417. struct hal_rx_msdu_metadata msdu_metadata;
  1418. bool is_eapol;
  1419. qdf_nbuf_set_rx_chfrag_start(
  1420. nbuf,
  1421. hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1422. rx_tlv_hdr));
  1423. qdf_nbuf_set_rx_chfrag_end(nbuf,
  1424. hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
  1425. rx_tlv_hdr));
  1426. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1427. rx_tlv_hdr));
  1428. qdf_nbuf_set_da_valid(nbuf,
  1429. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  1430. rx_tlv_hdr));
  1431. qdf_nbuf_set_sa_valid(nbuf,
  1432. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  1433. rx_tlv_hdr));
  1434. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  1435. msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
  1436. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
  1437. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  1438. if (dp_rx_check_pkt_len(soc, pkt_len))
  1439. goto drop_nbuf;
  1440. /* Set length in nbuf */
  1441. qdf_nbuf_set_pktlen(
  1442. nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
  1443. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  1444. }
  1445. /*
  1446. * Check if DMA completed -- msdu_done is the last bit
  1447. * to be written
  1448. */
  1449. if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
  1450. dp_err_rl("MSDU DONE failure");
  1451. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1452. QDF_TRACE_LEVEL_INFO);
  1453. qdf_assert(0);
  1454. }
  1455. if (!txrx_peer)
  1456. goto drop_nbuf;
  1457. vdev = txrx_peer->vdev;
  1458. if (!vdev) {
  1459. dp_err_rl("Null vdev!");
  1460. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1461. goto drop_nbuf;
  1462. }
  1463. /*
  1464. * Advance the packet start pointer by total size of
  1465. * pre-header TLV's
  1466. */
  1467. if (qdf_nbuf_is_frag(nbuf))
  1468. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1469. else
  1470. qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
  1471. soc->rx_pkt_tlv_size));
  1472. QDF_NBUF_CB_RX_PEER_ID(nbuf) = txrx_peer->peer_id;
  1473. if (dp_rx_igmp_handler(soc, vdev, txrx_peer, nbuf, link_id))
  1474. return;
  1475. dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
  1476. /*
  1477. * Indicate EAPOL frame to stack only when vap mac address
  1478. * matches the destination address.
  1479. */
  1480. is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf);
  1481. if (is_eapol || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1482. qdf_ether_header_t *eh =
  1483. (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1484. if (dp_rx_err_match_dhost(eh, vdev)) {
  1485. DP_STATS_INC_PKT(vdev, rx_i.routed_eapol_pkt, 1,
  1486. qdf_nbuf_len(nbuf));
  1487. /*
  1488. * Update the protocol tag in SKB based on
  1489. * CCE metadata.
  1490. */
  1491. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1492. EXCEPTION_DEST_RING_ID,
  1493. true, true);
  1494. /* Update the flow tag in SKB based on FSE metadata */
  1495. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1496. true);
  1497. DP_PEER_TO_STACK_INCC_PKT(txrx_peer, 1,
  1498. qdf_nbuf_len(nbuf),
  1499. vdev->pdev->enhanced_stats_en);
  1500. qdf_nbuf_set_exc_frame(nbuf, 1);
  1501. qdf_nbuf_set_next(nbuf, NULL);
  1502. dp_rx_deliver_to_osif_stack(soc, vdev, txrx_peer, nbuf,
  1503. NULL, is_eapol);
  1504. return;
  1505. }
  1506. }
  1507. drop_nbuf:
  1508. DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
  1509. err_src == HAL_RX_WBM_ERR_SRC_REO);
  1510. DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
  1511. err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
  1512. dp_rx_nbuf_free(nbuf);
  1513. }
  1514. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1515. #ifdef DP_RX_DESC_COOKIE_INVALIDATE
  1516. /**
  1517. * dp_rx_link_cookie_check() - Validate link desc cookie
  1518. * @ring_desc: ring descriptor
  1519. *
  1520. * Return: qdf status
  1521. */
  1522. static inline QDF_STATUS
  1523. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1524. {
  1525. if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
  1526. return QDF_STATUS_E_FAILURE;
  1527. return QDF_STATUS_SUCCESS;
  1528. }
  1529. /**
  1530. * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
  1531. * @ring_desc: ring descriptor
  1532. *
  1533. * Return: None
  1534. */
  1535. static inline void
  1536. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1537. {
  1538. HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
  1539. }
  1540. #else
  1541. static inline QDF_STATUS
  1542. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1543. {
  1544. return QDF_STATUS_SUCCESS;
  1545. }
  1546. static inline void
  1547. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1548. {
  1549. }
  1550. #endif
  1551. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  1552. /**
  1553. * dp_rx_err_ring_record_entry() - Record rx err ring history
  1554. * @soc: Datapath soc structure
  1555. * @paddr: paddr of the buffer in RX err ring
  1556. * @sw_cookie: SW cookie of the buffer in RX err ring
  1557. * @rbm: Return buffer manager of the buffer in RX err ring
  1558. *
  1559. * Return: None
  1560. */
  1561. static inline void
  1562. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1563. uint32_t sw_cookie, uint8_t rbm)
  1564. {
  1565. struct dp_buf_info_record *record;
  1566. uint32_t idx;
  1567. if (qdf_unlikely(!soc->rx_err_ring_history))
  1568. return;
  1569. idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
  1570. DP_RX_ERR_HIST_MAX);
  1571. /* No NULL check needed for record since its an array */
  1572. record = &soc->rx_err_ring_history->entry[idx];
  1573. record->timestamp = qdf_get_log_timestamp();
  1574. record->hbi.paddr = paddr;
  1575. record->hbi.sw_cookie = sw_cookie;
  1576. record->hbi.rbm = rbm;
  1577. }
  1578. #else
  1579. static inline void
  1580. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1581. uint32_t sw_cookie, uint8_t rbm)
  1582. {
  1583. }
  1584. #endif
  1585. #ifdef HANDLE_RX_REROUTE_ERR
  1586. static int dp_rx_err_handle_msdu_buf(struct dp_soc *soc,
  1587. hal_ring_desc_t ring_desc)
  1588. {
  1589. int lmac_id = DP_INVALID_LMAC_ID;
  1590. struct dp_rx_desc *rx_desc;
  1591. struct hal_buf_info hbi;
  1592. struct dp_pdev *pdev;
  1593. struct rx_desc_pool *rx_desc_pool;
  1594. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1595. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, hbi.sw_cookie);
  1596. /* sanity */
  1597. if (!rx_desc) {
  1598. DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_invalid_cookie, 1);
  1599. goto assert_return;
  1600. }
  1601. if (!rx_desc->nbuf)
  1602. goto assert_return;
  1603. dp_rx_err_ring_record_entry(soc, hbi.paddr,
  1604. hbi.sw_cookie,
  1605. hal_rx_ret_buf_manager_get(soc->hal_soc,
  1606. ring_desc));
  1607. if (hbi.paddr != qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0)) {
  1608. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1609. rx_desc->in_err_state = 1;
  1610. goto assert_return;
  1611. }
  1612. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  1613. /* After this point the rx_desc and nbuf are valid */
  1614. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  1615. qdf_assert_always(!rx_desc->unmapped);
  1616. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, rx_desc->nbuf);
  1617. rx_desc->unmapped = 1;
  1618. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  1619. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  1620. rx_desc->pool_id);
  1621. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  1622. lmac_id = rx_desc->pool_id;
  1623. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  1624. &pdev->free_list_tail,
  1625. rx_desc);
  1626. return lmac_id;
  1627. assert_return:
  1628. qdf_assert(0);
  1629. return lmac_id;
  1630. }
  1631. static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
  1632. {
  1633. int ret;
  1634. uint64_t cur_time_stamp;
  1635. DP_STATS_INC(soc, rx.err.reo_err_msdu_buf_rcved, 1);
  1636. /* Recover if overall error count exceeds threshold */
  1637. if (soc->stats.rx.err.reo_err_msdu_buf_rcved >
  1638. DP_MAX_REG_RX_ROUTING_ERRS_THRESHOLD) {
  1639. dp_err("pkt threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
  1640. soc->stats.rx.err.reo_err_msdu_buf_rcved,
  1641. soc->rx_route_err_start_pkt_ts);
  1642. qdf_trigger_self_recovery(NULL, QDF_RX_REG_PKT_ROUTE_ERR);
  1643. }
  1644. cur_time_stamp = qdf_get_log_timestamp_usecs();
  1645. if (!soc->rx_route_err_start_pkt_ts)
  1646. soc->rx_route_err_start_pkt_ts = cur_time_stamp;
  1647. /* Recover if threshold number of packets received in threshold time */
  1648. if ((cur_time_stamp - soc->rx_route_err_start_pkt_ts) >
  1649. DP_RX_ERR_ROUTE_TIMEOUT_US) {
  1650. soc->rx_route_err_start_pkt_ts = cur_time_stamp;
  1651. if (soc->rx_route_err_in_window >
  1652. DP_MAX_REG_RX_ROUTING_ERRS_IN_TIMEOUT) {
  1653. qdf_trigger_self_recovery(NULL,
  1654. QDF_RX_REG_PKT_ROUTE_ERR);
  1655. dp_err("rate threshold breached! reo_err_msdu_buf_rcved %u first err pkt time_stamp %llu",
  1656. soc->stats.rx.err.reo_err_msdu_buf_rcved,
  1657. soc->rx_route_err_start_pkt_ts);
  1658. } else {
  1659. soc->rx_route_err_in_window = 1;
  1660. }
  1661. } else {
  1662. soc->rx_route_err_in_window++;
  1663. }
  1664. ret = dp_rx_err_handle_msdu_buf(soc, ring_desc);
  1665. return ret;
  1666. }
  1667. #else /* HANDLE_RX_REROUTE_ERR */
  1668. static int dp_rx_err_exception(struct dp_soc *soc, hal_ring_desc_t ring_desc)
  1669. {
  1670. qdf_assert_always(0);
  1671. return DP_INVALID_LMAC_ID;
  1672. }
  1673. #endif /* HANDLE_RX_REROUTE_ERR */
  1674. #ifdef WLAN_MLO_MULTI_CHIP
  1675. /**
  1676. * dp_idle_link_bm_id_check() - war for HW issue
  1677. *
  1678. * @soc: DP SOC handle
  1679. * @rbm: idle link RBM value
  1680. * @ring_desc: reo error link descriptor
  1681. *
  1682. * This is a war for HW issue where link descriptor
  1683. * of partner soc received due to packets wrongly
  1684. * interpreted as fragments
  1685. *
  1686. * Return: true in case link desc is consumed
  1687. * false in other cases
  1688. */
  1689. static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
  1690. void *ring_desc)
  1691. {
  1692. struct dp_soc *replenish_soc = NULL;
  1693. /* return ok incase of link desc of same soc */
  1694. if (rbm == soc->idle_link_bm_id)
  1695. return false;
  1696. if (soc->arch_ops.dp_soc_get_by_idle_bm_id)
  1697. replenish_soc =
  1698. soc->arch_ops.dp_soc_get_by_idle_bm_id(soc, rbm);
  1699. qdf_assert_always(replenish_soc);
  1700. /*
  1701. * For WIN usecase we should only get fragment packets in
  1702. * this ring as for MLO case fragmentation is not supported
  1703. * we should not see links from other soc.
  1704. *
  1705. * Drop all packets from partner soc and replenish the descriptors
  1706. */
  1707. dp_handle_wbm_internal_error(replenish_soc, ring_desc,
  1708. HAL_WBM_RELEASE_RING_2_DESC_TYPE);
  1709. return true;
  1710. }
  1711. #else
  1712. static bool dp_idle_link_bm_id_check(struct dp_soc *soc, uint8_t rbm,
  1713. void *ring_desc)
  1714. {
  1715. return false;
  1716. }
  1717. #endif
  1718. uint32_t
  1719. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1720. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1721. {
  1722. hal_ring_desc_t ring_desc;
  1723. hal_soc_handle_t hal_soc;
  1724. uint32_t count = 0;
  1725. uint32_t rx_bufs_used = 0;
  1726. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1727. uint8_t mac_id = 0;
  1728. uint8_t buf_type;
  1729. uint8_t err_status;
  1730. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1731. struct hal_buf_info hbi;
  1732. struct dp_pdev *dp_pdev;
  1733. struct dp_srng *dp_rxdma_srng;
  1734. struct rx_desc_pool *rx_desc_pool;
  1735. void *link_desc_va;
  1736. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1737. uint16_t num_msdus;
  1738. struct dp_rx_desc *rx_desc = NULL;
  1739. QDF_STATUS status;
  1740. bool ret;
  1741. uint32_t error_code = 0;
  1742. bool sw_pn_check_needed;
  1743. int max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
  1744. int i, rx_bufs_reaped_total;
  1745. /* Debug -- Remove later */
  1746. qdf_assert(soc && hal_ring_hdl);
  1747. hal_soc = soc->hal_soc;
  1748. /* Debug -- Remove later */
  1749. qdf_assert(hal_soc);
  1750. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1751. /* TODO */
  1752. /*
  1753. * Need API to convert from hal_ring pointer to
  1754. * Ring Type / Ring Id combo
  1755. */
  1756. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1757. dp_rx_err_err("%pK: HAL RING Access Failed -- %pK", soc,
  1758. hal_ring_hdl);
  1759. goto done;
  1760. }
  1761. while (qdf_likely(quota-- && (ring_desc =
  1762. hal_srng_dst_peek(hal_soc,
  1763. hal_ring_hdl)))) {
  1764. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1765. err_status = hal_rx_err_status_get(hal_soc, ring_desc);
  1766. buf_type = hal_rx_reo_buf_type_get(hal_soc, ring_desc);
  1767. if (err_status == HAL_REO_ERROR_DETECTED)
  1768. error_code = hal_rx_get_reo_error_code(hal_soc,
  1769. ring_desc);
  1770. qdf_mem_set(&mpdu_desc_info, sizeof(mpdu_desc_info), 0);
  1771. sw_pn_check_needed = dp_rx_err_is_pn_check_needed(soc,
  1772. err_status,
  1773. error_code);
  1774. if (!sw_pn_check_needed) {
  1775. /*
  1776. * MPDU desc info will be present in the REO desc
  1777. * only in the below scenarios
  1778. * 1) pn_in_dest_disabled: always
  1779. * 2) pn_in_dest enabled: All cases except 2k-jup
  1780. * and OOR errors
  1781. */
  1782. hal_rx_mpdu_desc_info_get(hal_soc, ring_desc,
  1783. &mpdu_desc_info);
  1784. }
  1785. if (HAL_RX_REO_DESC_MSDU_COUNT_GET(ring_desc) == 0)
  1786. goto next_entry;
  1787. /*
  1788. * For REO error ring, only MSDU LINK DESC is expected.
  1789. * Handle HAL_RX_REO_MSDU_BUF_ADDR_TYPE exception case.
  1790. */
  1791. if (qdf_unlikely(buf_type != HAL_RX_REO_MSDU_LINK_DESC_TYPE)) {
  1792. int lmac_id;
  1793. lmac_id = dp_rx_err_exception(soc, ring_desc);
  1794. if (lmac_id >= 0)
  1795. rx_bufs_reaped[lmac_id] += 1;
  1796. goto next_entry;
  1797. }
  1798. hal_rx_buf_cookie_rbm_get(hal_soc, (uint32_t *)ring_desc,
  1799. &hbi);
  1800. /*
  1801. * check for the magic number in the sw cookie
  1802. */
  1803. qdf_assert_always((hbi.sw_cookie >> LINK_DESC_ID_SHIFT) &
  1804. soc->link_desc_id_start);
  1805. if (dp_idle_link_bm_id_check(soc, hbi.rbm, ring_desc)) {
  1806. DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
  1807. goto next_entry;
  1808. }
  1809. status = dp_rx_link_cookie_check(ring_desc);
  1810. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  1811. DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
  1812. break;
  1813. }
  1814. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1815. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1816. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1817. &num_msdus);
  1818. if (!num_msdus ||
  1819. !dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[0])) {
  1820. dp_rx_err_info_rl("Invalid MSDU info num_msdus %u cookie: 0x%x",
  1821. num_msdus, msdu_list.sw_cookie[0]);
  1822. dp_rx_link_desc_return(soc, ring_desc,
  1823. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1824. goto next_entry;
  1825. }
  1826. dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
  1827. msdu_list.sw_cookie[0],
  1828. msdu_list.rbm[0]);
  1829. // TODO - BE- Check if the RBM is to be checked for all chips
  1830. if (qdf_unlikely((msdu_list.rbm[0] !=
  1831. dp_rx_get_rx_bm_id(soc)) &&
  1832. (msdu_list.rbm[0] !=
  1833. soc->idle_link_bm_id) &&
  1834. (msdu_list.rbm[0] !=
  1835. dp_rx_get_defrag_bm_id(soc)))) {
  1836. /* TODO */
  1837. /* Call appropriate handler */
  1838. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1839. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1840. dp_rx_err_err("%pK: Invalid RBM %d",
  1841. soc, msdu_list.rbm[0]);
  1842. }
  1843. /* Return link descriptor through WBM ring (SW2WBM)*/
  1844. dp_rx_link_desc_return(soc, ring_desc,
  1845. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1846. goto next_entry;
  1847. }
  1848. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  1849. soc,
  1850. msdu_list.sw_cookie[0]);
  1851. qdf_assert_always(rx_desc);
  1852. mac_id = rx_desc->pool_id;
  1853. if (sw_pn_check_needed) {
  1854. goto process_reo_error_code;
  1855. }
  1856. if (mpdu_desc_info.bar_frame) {
  1857. qdf_assert_always(mpdu_desc_info.msdu_count == 1);
  1858. dp_rx_bar_frame_handle(soc, ring_desc, rx_desc,
  1859. &mpdu_desc_info, err_status,
  1860. error_code);
  1861. rx_bufs_reaped[mac_id] += 1;
  1862. goto next_entry;
  1863. }
  1864. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1865. /*
  1866. * We only handle one msdu per link desc for fragmented
  1867. * case. We drop the msdus and release the link desc
  1868. * back if there are more than one msdu in link desc.
  1869. */
  1870. if (qdf_unlikely(num_msdus > 1)) {
  1871. count = dp_rx_msdus_drop(soc, ring_desc,
  1872. &mpdu_desc_info,
  1873. &mac_id, quota);
  1874. rx_bufs_reaped[mac_id] += count;
  1875. goto next_entry;
  1876. }
  1877. /*
  1878. * this is a unlikely scenario where the host is reaping
  1879. * a descriptor which it already reaped just a while ago
  1880. * but is yet to replenish it back to HW.
  1881. * In this case host will dump the last 128 descriptors
  1882. * including the software descriptor rx_desc and assert.
  1883. */
  1884. if (qdf_unlikely(!rx_desc->in_use)) {
  1885. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  1886. dp_info_rl("Reaping rx_desc not in use!");
  1887. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1888. ring_desc, rx_desc);
  1889. /* ignore duplicate RX desc and continue */
  1890. /* Pop out the descriptor */
  1891. goto next_entry;
  1892. }
  1893. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  1894. msdu_list.paddr[0]);
  1895. if (!ret) {
  1896. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1897. rx_desc->in_err_state = 1;
  1898. goto next_entry;
  1899. }
  1900. count = dp_rx_frag_handle(soc,
  1901. ring_desc, &mpdu_desc_info,
  1902. rx_desc, &mac_id, quota);
  1903. rx_bufs_reaped[mac_id] += count;
  1904. DP_STATS_INC(soc, rx.rx_frags, 1);
  1905. goto next_entry;
  1906. }
  1907. process_reo_error_code:
  1908. /*
  1909. * Expect REO errors to be handled after this point
  1910. */
  1911. qdf_assert_always(err_status == HAL_REO_ERROR_DETECTED);
  1912. dp_info_rl("Got pkt with REO ERROR: %d", error_code);
  1913. switch (error_code) {
  1914. case HAL_REO_ERR_PN_CHECK_FAILED:
  1915. case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
  1916. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1917. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1918. if (dp_pdev)
  1919. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1920. count = dp_rx_pn_error_handle(soc,
  1921. ring_desc,
  1922. &mpdu_desc_info, &mac_id,
  1923. quota);
  1924. rx_bufs_reaped[mac_id] += count;
  1925. break;
  1926. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1927. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  1928. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1929. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  1930. case HAL_REO_ERR_BAR_FRAME_OOR:
  1931. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1932. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1933. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1934. if (dp_pdev)
  1935. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1936. count = dp_rx_reo_err_entry_process(
  1937. soc,
  1938. ring_desc,
  1939. &mpdu_desc_info,
  1940. link_desc_va,
  1941. error_code);
  1942. rx_bufs_reaped[mac_id] += count;
  1943. break;
  1944. case HAL_REO_ERR_QUEUE_DESC_INVALID:
  1945. case HAL_REO_ERR_AMPDU_IN_NON_BA:
  1946. case HAL_REO_ERR_NON_BA_DUPLICATE:
  1947. case HAL_REO_ERR_BA_DUPLICATE:
  1948. case HAL_REO_ERR_BAR_FRAME_NO_BA_SESSION:
  1949. case HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN:
  1950. case HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET:
  1951. DP_STATS_INC(soc, rx.err.reo_error[error_code], 1);
  1952. count = dp_rx_msdus_drop(soc, ring_desc,
  1953. &mpdu_desc_info,
  1954. &mac_id, quota);
  1955. rx_bufs_reaped[mac_id] += count;
  1956. break;
  1957. default:
  1958. /* Assert if unexpected error type */
  1959. qdf_assert_always(0);
  1960. }
  1961. next_entry:
  1962. dp_rx_link_cookie_invalidate(ring_desc);
  1963. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  1964. rx_bufs_reaped_total = 0;
  1965. for (i = 0; i < MAX_PDEV_CNT; i++)
  1966. rx_bufs_reaped_total += rx_bufs_reaped[i];
  1967. if (dp_rx_reap_loop_pkt_limit_hit(soc, rx_bufs_reaped_total,
  1968. max_reap_limit))
  1969. break;
  1970. }
  1971. done:
  1972. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1973. if (soc->rx.flags.defrag_timeout_check) {
  1974. uint32_t now_ms =
  1975. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1976. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1977. dp_rx_defrag_waitlist_flush(soc);
  1978. }
  1979. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1980. if (rx_bufs_reaped[mac_id]) {
  1981. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1982. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1983. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1984. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1985. rx_desc_pool,
  1986. rx_bufs_reaped[mac_id],
  1987. &dp_pdev->free_list_head,
  1988. &dp_pdev->free_list_tail,
  1989. false);
  1990. rx_bufs_used += rx_bufs_reaped[mac_id];
  1991. }
  1992. }
  1993. return rx_bufs_used; /* Assume no scale factor for now */
  1994. }
  1995. #ifdef DROP_RXDMA_DECRYPT_ERR
  1996. /**
  1997. * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
  1998. *
  1999. * Return: true if rxdma decrypt err frames are handled and false otherwise
  2000. */
  2001. static inline bool dp_handle_rxdma_decrypt_err(void)
  2002. {
  2003. return false;
  2004. }
  2005. #else
  2006. static inline bool dp_handle_rxdma_decrypt_err(void)
  2007. {
  2008. return true;
  2009. }
  2010. #endif
  2011. void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc)
  2012. {
  2013. if (soc->wbm_sg_last_msdu_war) {
  2014. uint32_t len;
  2015. qdf_nbuf_t temp = soc->wbm_sg_param.wbm_sg_nbuf_tail;
  2016. len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc,
  2017. qdf_nbuf_data(temp));
  2018. temp = soc->wbm_sg_param.wbm_sg_nbuf_head;
  2019. while (temp) {
  2020. QDF_NBUF_CB_RX_PKT_LEN(temp) = len;
  2021. temp = temp->next;
  2022. }
  2023. }
  2024. }
  2025. #ifdef RX_DESC_DEBUG_CHECK
  2026. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  2027. hal_ring_handle_t hal_ring_hdl,
  2028. hal_ring_desc_t ring_desc,
  2029. struct dp_rx_desc *rx_desc)
  2030. {
  2031. struct hal_buf_info hbi;
  2032. hal_rx_wbm_rel_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2033. /* Sanity check for possible buffer paddr corruption */
  2034. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  2035. return QDF_STATUS_SUCCESS;
  2036. hal_srng_dump_ring_desc(soc->hal_soc, hal_ring_hdl, ring_desc);
  2037. return QDF_STATUS_E_FAILURE;
  2038. }
  2039. #else
  2040. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  2041. hal_ring_handle_t hal_ring_hdl,
  2042. hal_ring_desc_t ring_desc,
  2043. struct dp_rx_desc *rx_desc)
  2044. {
  2045. return QDF_STATUS_SUCCESS;
  2046. }
  2047. #endif
  2048. bool
  2049. dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
  2050. {
  2051. /*
  2052. * Currently Null Queue and Unencrypted error handlers has support for
  2053. * SG. Other error handler do not deal with SG buffer.
  2054. */
  2055. if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
  2056. (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
  2057. ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
  2058. (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
  2059. return true;
  2060. return false;
  2061. }
  2062. #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
  2063. void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
  2064. qdf_nbuf_t nbuf)
  2065. {
  2066. /*
  2067. * In case of fast recycle TX driver can avoid invalidate
  2068. * of buffer in case of SFE forward. We need to invalidate
  2069. * the TLV headers after writing to this location
  2070. */
  2071. qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
  2072. (void *)(nbuf->data +
  2073. soc->rx_pkt_tlv_size +
  2074. L3_HEADER_PAD));
  2075. }
  2076. #else
  2077. void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
  2078. qdf_nbuf_t nbuf)
  2079. {
  2080. }
  2081. #endif
  2082. #ifndef CONFIG_NBUF_AP_PLATFORM
  2083. static inline uint16_t
  2084. dp_rx_get_peer_id(struct dp_soc *soc,
  2085. uint8_t *rx_tlv_hdr,
  2086. qdf_nbuf_t nbuf)
  2087. {
  2088. uint32_t peer_mdata = 0;
  2089. peer_mdata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
  2090. rx_tlv_hdr);
  2091. return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
  2092. }
  2093. static inline void
  2094. dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
  2095. qdf_nbuf_t nbuf,
  2096. uint8_t *rx_tlv_hdr,
  2097. union hal_wbm_err_info_u *wbm_err)
  2098. {
  2099. hal_rx_priv_info_get_from_tlv(soc->hal_soc, rx_tlv_hdr,
  2100. (uint8_t *)&wbm_err->info,
  2101. sizeof(union hal_wbm_err_info_u));
  2102. }
  2103. void
  2104. dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
  2105. qdf_nbuf_t nbuf,
  2106. union hal_wbm_err_info_u wbm_err)
  2107. {
  2108. hal_rx_priv_info_set_in_tlv(soc->hal_soc,
  2109. qdf_nbuf_data(nbuf),
  2110. (uint8_t *)&wbm_err.info,
  2111. sizeof(union hal_wbm_err_info_u));
  2112. }
  2113. #else
  2114. static inline uint16_t
  2115. dp_rx_get_peer_id(struct dp_soc *soc,
  2116. uint8_t *rx_tlv_hdr,
  2117. qdf_nbuf_t nbuf)
  2118. {
  2119. uint32_t peer_mdata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
  2120. return dp_rx_peer_metadata_peer_id_get(soc, peer_mdata);
  2121. }
  2122. static inline void
  2123. dp_rx_get_wbm_err_info_from_nbuf(struct dp_soc *soc,
  2124. qdf_nbuf_t nbuf,
  2125. uint8_t *rx_tlv_hdr,
  2126. union hal_wbm_err_info_u *wbm_err)
  2127. {
  2128. wbm_err->info = QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf);
  2129. }
  2130. void
  2131. dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
  2132. qdf_nbuf_t nbuf,
  2133. union hal_wbm_err_info_u wbm_err)
  2134. {
  2135. QDF_NBUF_CB_RX_ERROR_CODE_INFO(nbuf) = wbm_err.info;
  2136. }
  2137. #endif /* CONFIG_NBUF_AP_PLATFORM */
  2138. uint32_t
  2139. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2140. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  2141. {
  2142. hal_soc_handle_t hal_soc;
  2143. uint32_t rx_bufs_used = 0;
  2144. struct dp_pdev *dp_pdev;
  2145. uint8_t *rx_tlv_hdr;
  2146. bool is_tkip_mic_err;
  2147. qdf_nbuf_t nbuf_head = NULL;
  2148. qdf_nbuf_t nbuf, next;
  2149. union hal_wbm_err_info_u wbm_err = { 0 };
  2150. uint8_t pool_id;
  2151. uint8_t tid = 0;
  2152. uint8_t link_id = 0;
  2153. /* Debug -- Remove later */
  2154. qdf_assert(soc && hal_ring_hdl);
  2155. hal_soc = soc->hal_soc;
  2156. /* Debug -- Remove later */
  2157. qdf_assert(hal_soc);
  2158. nbuf_head = soc->arch_ops.dp_rx_wbm_err_reap_desc(int_ctx, soc,
  2159. hal_ring_hdl,
  2160. quota,
  2161. &rx_bufs_used);
  2162. nbuf = nbuf_head;
  2163. while (nbuf) {
  2164. struct dp_txrx_peer *txrx_peer;
  2165. struct dp_peer *peer;
  2166. uint16_t peer_id;
  2167. uint8_t err_code;
  2168. uint8_t *tlv_hdr;
  2169. dp_txrx_ref_handle txrx_ref_handle = NULL;
  2170. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2171. /*
  2172. * retrieve the wbm desc info from nbuf CB/TLV, so we can
  2173. * handle error cases appropriately
  2174. */
  2175. dp_rx_get_wbm_err_info_from_nbuf(soc, nbuf,
  2176. rx_tlv_hdr,
  2177. &wbm_err);
  2178. peer_id = dp_rx_get_peer_id(soc,
  2179. rx_tlv_hdr,
  2180. nbuf);
  2181. txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc, peer_id,
  2182. &txrx_ref_handle,
  2183. DP_MOD_ID_RX_ERR);
  2184. if (!txrx_peer)
  2185. dp_info_rl("peer is null peer_id %u err_src %u, "
  2186. "REO: push_rsn %u err_code %u, "
  2187. "RXDMA: push_rsn %u err_code %u",
  2188. peer_id, wbm_err.info_bit.wbm_err_src,
  2189. wbm_err.info_bit.reo_psh_rsn,
  2190. wbm_err.info_bit.reo_err_code,
  2191. wbm_err.info_bit.rxdma_psh_rsn,
  2192. wbm_err.info_bit.rxdma_err_code);
  2193. /* Set queue_mapping in nbuf to 0 */
  2194. dp_set_rx_queue(nbuf, 0);
  2195. next = nbuf->next;
  2196. /*
  2197. * Form the SG for msdu continued buffers
  2198. * QCN9000 has this support
  2199. */
  2200. if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  2201. nbuf = dp_rx_sg_create(soc, nbuf);
  2202. next = nbuf->next;
  2203. /*
  2204. * SG error handling is not done correctly,
  2205. * drop SG frames for now.
  2206. */
  2207. dp_rx_nbuf_free(nbuf);
  2208. dp_info_rl("scattered msdu dropped");
  2209. nbuf = next;
  2210. if (txrx_peer)
  2211. dp_txrx_peer_unref_delete(txrx_ref_handle,
  2212. DP_MOD_ID_RX_ERR);
  2213. continue;
  2214. }
  2215. dp_rx_nbuf_set_link_id_from_tlv(soc, rx_tlv_hdr, nbuf);
  2216. pool_id = wbm_err.info_bit.pool_id;
  2217. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  2218. if (dp_pdev && dp_pdev->link_peer_stats &&
  2219. txrx_peer && txrx_peer->is_mld_peer) {
  2220. link_id = dp_rx_get_stats_arr_idx_from_link_id(
  2221. nbuf,
  2222. txrx_peer);
  2223. } else {
  2224. link_id = 0;
  2225. }
  2226. if (wbm_err.info_bit.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  2227. if (wbm_err.info_bit.reo_psh_rsn
  2228. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  2229. DP_STATS_INC(soc,
  2230. rx.err.reo_error
  2231. [wbm_err.info_bit.reo_err_code], 1);
  2232. /* increment @pdev level */
  2233. if (dp_pdev)
  2234. DP_STATS_INC(dp_pdev, err.reo_error,
  2235. 1);
  2236. switch (wbm_err.info_bit.reo_err_code) {
  2237. /*
  2238. * Handling for packets which have NULL REO
  2239. * queue descriptor
  2240. */
  2241. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  2242. pool_id = wbm_err.info_bit.pool_id;
  2243. soc->arch_ops.dp_rx_null_q_desc_handle(
  2244. soc, nbuf,
  2245. rx_tlv_hdr,
  2246. pool_id,
  2247. txrx_peer,
  2248. FALSE,
  2249. link_id);
  2250. break;
  2251. /* TODO */
  2252. /* Add per error code accounting */
  2253. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  2254. if (txrx_peer)
  2255. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2256. rx.err.jump_2k_err,
  2257. 1,
  2258. link_id);
  2259. pool_id = wbm_err.info_bit.pool_id;
  2260. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  2261. rx_tlv_hdr)) {
  2262. tid =
  2263. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  2264. }
  2265. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2266. hal_rx_msdu_start_msdu_len_get(
  2267. soc->hal_soc, rx_tlv_hdr);
  2268. nbuf->next = NULL;
  2269. dp_2k_jump_handle(soc, nbuf,
  2270. rx_tlv_hdr,
  2271. peer_id, tid);
  2272. break;
  2273. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  2274. if (txrx_peer)
  2275. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2276. rx.err.oor_err,
  2277. 1,
  2278. link_id);
  2279. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  2280. rx_tlv_hdr)) {
  2281. tid =
  2282. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  2283. }
  2284. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  2285. hal_rx_msdu_start_msdu_len_get(
  2286. soc->hal_soc, rx_tlv_hdr);
  2287. nbuf->next = NULL;
  2288. dp_rx_oor_handle(soc, nbuf,
  2289. peer_id,
  2290. rx_tlv_hdr);
  2291. break;
  2292. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  2293. case HAL_REO_ERR_BAR_FRAME_OOR:
  2294. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  2295. if (peer) {
  2296. dp_rx_err_handle_bar(soc, peer,
  2297. nbuf);
  2298. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  2299. }
  2300. dp_rx_nbuf_free(nbuf);
  2301. break;
  2302. case HAL_REO_ERR_PN_CHECK_FAILED:
  2303. case HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET:
  2304. if (txrx_peer)
  2305. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2306. rx.err.pn_err,
  2307. 1,
  2308. link_id);
  2309. dp_rx_nbuf_free(nbuf);
  2310. break;
  2311. default:
  2312. dp_info_rl("Got pkt with REO ERROR: %d",
  2313. wbm_err.info_bit.
  2314. reo_err_code);
  2315. dp_rx_nbuf_free(nbuf);
  2316. }
  2317. } else if (wbm_err.info_bit.reo_psh_rsn
  2318. == HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
  2319. dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
  2320. rx_tlv_hdr,
  2321. HAL_RX_WBM_ERR_SRC_REO,
  2322. link_id);
  2323. } else {
  2324. /* should not enter here */
  2325. dp_rx_err_alert("invalid reo push reason %u",
  2326. wbm_err.info_bit.reo_psh_rsn);
  2327. dp_rx_nbuf_free(nbuf);
  2328. qdf_assert_always(0);
  2329. }
  2330. } else if (wbm_err.info_bit.wbm_err_src ==
  2331. HAL_RX_WBM_ERR_SRC_RXDMA) {
  2332. if (wbm_err.info_bit.rxdma_psh_rsn
  2333. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2334. DP_STATS_INC(soc,
  2335. rx.err.rxdma_error
  2336. [wbm_err.info_bit.rxdma_err_code], 1);
  2337. /* increment @pdev level */
  2338. if (dp_pdev)
  2339. DP_STATS_INC(dp_pdev,
  2340. err.rxdma_error, 1);
  2341. switch (wbm_err.info_bit.rxdma_err_code) {
  2342. case HAL_RXDMA_ERR_UNENCRYPTED:
  2343. case HAL_RXDMA_ERR_WIFI_PARSE:
  2344. if (txrx_peer)
  2345. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2346. rx.err.rxdma_wifi_parse_err,
  2347. 1,
  2348. link_id);
  2349. pool_id = wbm_err.info_bit.pool_id;
  2350. dp_rx_process_rxdma_err(soc, nbuf,
  2351. rx_tlv_hdr,
  2352. txrx_peer,
  2353. wbm_err.
  2354. info_bit.
  2355. rxdma_err_code,
  2356. pool_id,
  2357. link_id);
  2358. break;
  2359. case HAL_RXDMA_ERR_TKIP_MIC:
  2360. dp_rx_process_mic_error(soc, nbuf,
  2361. rx_tlv_hdr,
  2362. txrx_peer);
  2363. if (txrx_peer)
  2364. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2365. rx.err.mic_err,
  2366. 1,
  2367. link_id);
  2368. break;
  2369. case HAL_RXDMA_ERR_DECRYPT:
  2370. /* All the TKIP-MIC failures are treated as Decrypt Errors
  2371. * for QCN9224 Targets
  2372. */
  2373. is_tkip_mic_err = hal_rx_msdu_end_is_tkip_mic_err(hal_soc, rx_tlv_hdr);
  2374. if (is_tkip_mic_err && txrx_peer) {
  2375. dp_rx_process_mic_error(soc, nbuf,
  2376. rx_tlv_hdr,
  2377. txrx_peer);
  2378. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2379. rx.err.mic_err,
  2380. 1,
  2381. link_id);
  2382. break;
  2383. }
  2384. if (txrx_peer) {
  2385. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2386. rx.err.decrypt_err,
  2387. 1,
  2388. link_id);
  2389. dp_rx_nbuf_free(nbuf);
  2390. break;
  2391. }
  2392. if (!dp_handle_rxdma_decrypt_err()) {
  2393. dp_rx_nbuf_free(nbuf);
  2394. break;
  2395. }
  2396. pool_id = wbm_err.info_bit.pool_id;
  2397. err_code = wbm_err.info_bit.rxdma_err_code;
  2398. tlv_hdr = rx_tlv_hdr;
  2399. dp_rx_process_rxdma_err(soc, nbuf,
  2400. tlv_hdr, NULL,
  2401. err_code,
  2402. pool_id,
  2403. link_id);
  2404. break;
  2405. case HAL_RXDMA_MULTICAST_ECHO:
  2406. if (txrx_peer)
  2407. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  2408. rx.mec_drop, 1,
  2409. qdf_nbuf_len(nbuf),
  2410. link_id);
  2411. dp_rx_nbuf_free(nbuf);
  2412. break;
  2413. case HAL_RXDMA_UNAUTHORIZED_WDS:
  2414. pool_id = wbm_err.info_bit.pool_id;
  2415. err_code = wbm_err.info_bit.rxdma_err_code;
  2416. tlv_hdr = rx_tlv_hdr;
  2417. dp_rx_process_rxdma_err(soc, nbuf,
  2418. tlv_hdr,
  2419. txrx_peer,
  2420. err_code,
  2421. pool_id,
  2422. link_id);
  2423. break;
  2424. default:
  2425. dp_rx_nbuf_free(nbuf);
  2426. dp_err_rl("RXDMA error %d",
  2427. wbm_err.info_bit.rxdma_err_code);
  2428. }
  2429. } else if (wbm_err.info_bit.rxdma_psh_rsn
  2430. == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
  2431. dp_rx_err_route_hdl(soc, nbuf, txrx_peer,
  2432. rx_tlv_hdr,
  2433. HAL_RX_WBM_ERR_SRC_RXDMA,
  2434. link_id);
  2435. } else if (wbm_err.info_bit.rxdma_psh_rsn
  2436. == HAL_RX_WBM_RXDMA_PSH_RSN_FLUSH) {
  2437. dp_rx_err_err("rxdma push reason %u",
  2438. wbm_err.info_bit.rxdma_psh_rsn);
  2439. DP_STATS_INC(soc, rx.err.rx_flush_count, 1);
  2440. dp_rx_nbuf_free(nbuf);
  2441. } else {
  2442. /* should not enter here */
  2443. dp_rx_err_alert("invalid rxdma push reason %u",
  2444. wbm_err.info_bit.rxdma_psh_rsn);
  2445. dp_rx_nbuf_free(nbuf);
  2446. qdf_assert_always(0);
  2447. }
  2448. } else {
  2449. /* Should not come here */
  2450. qdf_assert(0);
  2451. }
  2452. if (txrx_peer)
  2453. dp_txrx_peer_unref_delete(txrx_ref_handle,
  2454. DP_MOD_ID_RX_ERR);
  2455. nbuf = next;
  2456. }
  2457. return rx_bufs_used; /* Assume no scale factor for now */
  2458. }
  2459. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2460. /**
  2461. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  2462. *
  2463. * @soc: core DP main context
  2464. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2465. * @rx_desc: void pointer to rx descriptor
  2466. *
  2467. * Return: void
  2468. */
  2469. static void dup_desc_dbg(struct dp_soc *soc,
  2470. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2471. void *rx_desc)
  2472. {
  2473. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  2474. dp_rx_dump_info_and_assert(
  2475. soc,
  2476. soc->rx_rel_ring.hal_srng,
  2477. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  2478. rx_desc);
  2479. }
  2480. /**
  2481. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  2482. *
  2483. * @soc: core DP main context
  2484. * @mac_id: mac id which is one of 3 mac_ids
  2485. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2486. * @head: head of descs list to be freed
  2487. * @tail: tail of decs list to be freed
  2488. *
  2489. * Return: number of msdu in MPDU to be popped
  2490. */
  2491. static inline uint32_t
  2492. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2493. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2494. union dp_rx_desc_list_elem_t **head,
  2495. union dp_rx_desc_list_elem_t **tail)
  2496. {
  2497. void *rx_msdu_link_desc;
  2498. qdf_nbuf_t msdu;
  2499. qdf_nbuf_t last;
  2500. struct hal_rx_msdu_list msdu_list;
  2501. uint16_t num_msdus;
  2502. struct hal_buf_info buf_info;
  2503. uint32_t rx_bufs_used = 0;
  2504. uint32_t msdu_cnt;
  2505. uint32_t i;
  2506. uint8_t push_reason;
  2507. uint8_t rxdma_error_code = 0;
  2508. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  2509. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2510. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2511. hal_rxdma_desc_t ring_desc;
  2512. struct rx_desc_pool *rx_desc_pool;
  2513. if (!pdev) {
  2514. dp_rx_err_debug("%pK: pdev is null for mac_id = %d",
  2515. soc, mac_id);
  2516. return rx_bufs_used;
  2517. }
  2518. msdu = 0;
  2519. last = NULL;
  2520. hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
  2521. &buf_info, &msdu_cnt);
  2522. push_reason =
  2523. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  2524. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2525. rxdma_error_code =
  2526. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  2527. }
  2528. do {
  2529. rx_msdu_link_desc =
  2530. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2531. qdf_assert_always(rx_msdu_link_desc);
  2532. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2533. &msdu_list, &num_msdus);
  2534. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2535. /* if the msdus belongs to NSS offloaded radio &&
  2536. * the rbm is not SW1_BM then return the msdu_link
  2537. * descriptor without freeing the msdus (nbufs). let
  2538. * these buffers be given to NSS completion ring for
  2539. * NSS to free them.
  2540. * else iterate through the msdu link desc list and
  2541. * free each msdu in the list.
  2542. */
  2543. if (msdu_list.rbm[0] !=
  2544. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id) &&
  2545. wlan_cfg_get_dp_pdev_nss_enabled(
  2546. pdev->wlan_cfg_ctx))
  2547. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  2548. else {
  2549. for (i = 0; i < num_msdus; i++) {
  2550. struct dp_rx_desc *rx_desc =
  2551. soc->arch_ops.
  2552. dp_rx_desc_cookie_2_va(
  2553. soc,
  2554. msdu_list.sw_cookie[i]);
  2555. qdf_assert_always(rx_desc);
  2556. msdu = rx_desc->nbuf;
  2557. /*
  2558. * this is a unlikely scenario
  2559. * where the host is reaping
  2560. * a descriptor which
  2561. * it already reaped just a while ago
  2562. * but is yet to replenish
  2563. * it back to HW.
  2564. * In this case host will dump
  2565. * the last 128 descriptors
  2566. * including the software descriptor
  2567. * rx_desc and assert.
  2568. */
  2569. ring_desc = rxdma_dst_ring_desc;
  2570. if (qdf_unlikely(!rx_desc->in_use)) {
  2571. dup_desc_dbg(soc,
  2572. ring_desc,
  2573. rx_desc);
  2574. continue;
  2575. }
  2576. if (rx_desc->unmapped == 0) {
  2577. rx_desc_pool =
  2578. &soc->rx_desc_buf[rx_desc->pool_id];
  2579. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2580. dp_rx_nbuf_unmap_pool(soc,
  2581. rx_desc_pool,
  2582. msdu);
  2583. rx_desc->unmapped = 1;
  2584. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2585. }
  2586. dp_rx_err_debug("%pK: msdu_nbuf=%pK ",
  2587. soc, msdu);
  2588. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2589. rx_desc->pool_id);
  2590. rx_bufs_used++;
  2591. dp_rx_add_to_free_desc_list(head,
  2592. tail, rx_desc);
  2593. }
  2594. }
  2595. } else {
  2596. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  2597. }
  2598. /*
  2599. * Store the current link buffer into to the local structure
  2600. * to be used for release purpose.
  2601. */
  2602. hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
  2603. buf_info.paddr, buf_info.sw_cookie,
  2604. buf_info.rbm);
  2605. hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
  2606. &buf_info);
  2607. dp_rx_link_desc_return_by_addr(soc,
  2608. (hal_buff_addrinfo_t)
  2609. rx_link_buf_info,
  2610. bm_action);
  2611. } while (buf_info.paddr);
  2612. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  2613. if (pdev)
  2614. DP_STATS_INC(pdev, err.rxdma_error, 1);
  2615. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  2616. dp_rx_err_err("%pK: Packet received with Decrypt error", soc);
  2617. }
  2618. return rx_bufs_used;
  2619. }
  2620. uint32_t
  2621. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2622. uint32_t mac_id, uint32_t quota)
  2623. {
  2624. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2625. hal_rxdma_desc_t rxdma_dst_ring_desc;
  2626. hal_soc_handle_t hal_soc;
  2627. void *err_dst_srng;
  2628. union dp_rx_desc_list_elem_t *head = NULL;
  2629. union dp_rx_desc_list_elem_t *tail = NULL;
  2630. struct dp_srng *dp_rxdma_srng;
  2631. struct rx_desc_pool *rx_desc_pool;
  2632. uint32_t work_done = 0;
  2633. uint32_t rx_bufs_used = 0;
  2634. if (!pdev)
  2635. return 0;
  2636. err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
  2637. if (!err_dst_srng) {
  2638. dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  2639. soc, err_dst_srng);
  2640. return 0;
  2641. }
  2642. hal_soc = soc->hal_soc;
  2643. qdf_assert(hal_soc);
  2644. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  2645. dp_rx_err_err("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  2646. soc, err_dst_srng);
  2647. return 0;
  2648. }
  2649. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  2650. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  2651. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  2652. rxdma_dst_ring_desc,
  2653. &head, &tail);
  2654. }
  2655. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  2656. if (rx_bufs_used) {
  2657. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
  2658. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2659. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2660. } else {
  2661. dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
  2662. rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
  2663. }
  2664. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2665. rx_desc_pool, rx_bufs_used, &head, &tail, false);
  2666. work_done += rx_bufs_used;
  2667. }
  2668. return work_done;
  2669. }
  2670. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2671. static inline void
  2672. dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2673. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2674. union dp_rx_desc_list_elem_t **head,
  2675. union dp_rx_desc_list_elem_t **tail,
  2676. uint32_t *rx_bufs_used)
  2677. {
  2678. void *rx_msdu_link_desc;
  2679. qdf_nbuf_t msdu;
  2680. qdf_nbuf_t last;
  2681. struct hal_rx_msdu_list msdu_list;
  2682. uint16_t num_msdus;
  2683. struct hal_buf_info buf_info;
  2684. uint32_t msdu_cnt, i;
  2685. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2686. struct rx_desc_pool *rx_desc_pool;
  2687. struct dp_rx_desc *rx_desc;
  2688. msdu = 0;
  2689. last = NULL;
  2690. hal_rx_reo_ent_buf_paddr_get(soc->hal_soc, rxdma_dst_ring_desc,
  2691. &buf_info, &msdu_cnt);
  2692. do {
  2693. rx_msdu_link_desc =
  2694. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2695. if (!rx_msdu_link_desc) {
  2696. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
  2697. break;
  2698. }
  2699. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2700. &msdu_list, &num_msdus);
  2701. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2702. for (i = 0; i < num_msdus; i++) {
  2703. if (!dp_rx_is_sw_cookie_valid(soc, msdu_list.sw_cookie[i])) {
  2704. dp_rx_err_info_rl("Invalid MSDU info cookie: 0x%x",
  2705. msdu_list.sw_cookie[i]);
  2706. continue;
  2707. }
  2708. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  2709. soc,
  2710. msdu_list.sw_cookie[i]);
  2711. qdf_assert_always(rx_desc);
  2712. rx_desc_pool =
  2713. &soc->rx_desc_buf[rx_desc->pool_id];
  2714. msdu = rx_desc->nbuf;
  2715. /*
  2716. * this is a unlikely scenario where the host is reaping
  2717. * a descriptor which it already reaped just a while ago
  2718. * but is yet to replenish it back to HW.
  2719. */
  2720. if (qdf_unlikely(!rx_desc->in_use) ||
  2721. qdf_unlikely(!msdu)) {
  2722. dp_rx_err_info_rl("Reaping rx_desc not in use!");
  2723. continue;
  2724. }
  2725. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2726. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, msdu);
  2727. rx_desc->unmapped = 1;
  2728. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2729. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2730. rx_desc->pool_id);
  2731. rx_bufs_used[rx_desc->pool_id]++;
  2732. dp_rx_add_to_free_desc_list(head,
  2733. tail, rx_desc);
  2734. }
  2735. }
  2736. /*
  2737. * Store the current link buffer into to the local structure
  2738. * to be used for release purpose.
  2739. */
  2740. hal_rxdma_buff_addr_info_set(soc->hal_soc, rx_link_buf_info,
  2741. buf_info.paddr, buf_info.sw_cookie,
  2742. buf_info.rbm);
  2743. hal_rx_mon_next_link_desc_get(soc->hal_soc, rx_msdu_link_desc,
  2744. &buf_info);
  2745. dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
  2746. rx_link_buf_info,
  2747. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  2748. } while (buf_info.paddr);
  2749. }
  2750. void
  2751. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  2752. uint32_t buf_type)
  2753. {
  2754. struct hal_buf_info buf_info = {0};
  2755. struct dp_rx_desc *rx_desc = NULL;
  2756. struct rx_desc_pool *rx_desc_pool;
  2757. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = {0};
  2758. union dp_rx_desc_list_elem_t *head = NULL;
  2759. union dp_rx_desc_list_elem_t *tail = NULL;
  2760. uint8_t pool_id;
  2761. uint8_t mac_id;
  2762. hal_rx_reo_buf_paddr_get(soc->hal_soc, hal_desc, &buf_info);
  2763. if (!buf_info.paddr) {
  2764. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
  2765. return;
  2766. }
  2767. /* buffer_addr_info is the first element of ring_desc */
  2768. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)hal_desc,
  2769. &buf_info);
  2770. if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
  2771. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
  2772. rx_desc = soc->arch_ops.dp_rx_desc_cookie_2_va(
  2773. soc,
  2774. buf_info.sw_cookie);
  2775. if (rx_desc && rx_desc->nbuf) {
  2776. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  2777. dp_ipa_rx_buf_smmu_mapping_lock(soc);
  2778. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool,
  2779. rx_desc->nbuf);
  2780. rx_desc->unmapped = 1;
  2781. dp_ipa_rx_buf_smmu_mapping_unlock(soc);
  2782. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  2783. rx_desc->pool_id);
  2784. dp_rx_add_to_free_desc_list(&head,
  2785. &tail,
  2786. rx_desc);
  2787. rx_bufs_reaped[rx_desc->pool_id]++;
  2788. }
  2789. } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
  2790. pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(buf_info.sw_cookie);
  2791. dp_wbm_int_err_mpdu_pop(soc, pool_id, hal_desc,
  2792. &head, &tail, rx_bufs_reaped);
  2793. }
  2794. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  2795. struct rx_desc_pool *rx_desc_pool;
  2796. struct dp_srng *dp_rxdma_srng;
  2797. if (!rx_bufs_reaped[mac_id])
  2798. continue;
  2799. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
  2800. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2801. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2802. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2803. rx_desc_pool,
  2804. rx_bufs_reaped[mac_id],
  2805. &head, &tail, false);
  2806. }
  2807. }
  2808. #endif /* QCA_HOST_MODE_WIFI_DISABLED */