dp_rx_err.c 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "dp_internal.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #include "dp_rx_defrag.h"
  27. #include "dp_ipa.h"
  28. #ifdef FEATURE_WDS
  29. #include "dp_txrx_wds.h"
  30. #endif
  31. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  32. #include "qdf_net_types.h"
  33. #include "dp_rx_buffer_pool.h"
  34. /* Max buffer in invalid peer SG list*/
  35. #define DP_MAX_INVALID_BUFFERS 10
  36. /**
  37. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  38. * back on same vap or a different vap.
  39. *
  40. * @soc: core DP main context
  41. * @peer: dp peer handler
  42. * @rx_tlv_hdr: start of the rx TLV header
  43. * @nbuf: pkt buffer
  44. *
  45. * Return: bool (true if it is a looped back pkt else false)
  46. *
  47. */
  48. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  49. struct dp_peer *peer,
  50. uint8_t *rx_tlv_hdr,
  51. qdf_nbuf_t nbuf)
  52. {
  53. struct dp_vdev *vdev = peer->vdev;
  54. struct dp_ast_entry *ase = NULL;
  55. uint16_t sa_idx = 0;
  56. uint8_t *data;
  57. /*
  58. * Multicast Echo Check is required only if vdev is STA and
  59. * received pkt is a multicast/broadcast pkt. otherwise
  60. * skip the MEC check.
  61. */
  62. if (vdev->opmode != wlan_op_mode_sta)
  63. return false;
  64. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  65. return false;
  66. data = qdf_nbuf_data(nbuf);
  67. /*
  68. * if the received pkts src mac addr matches with vdev
  69. * mac address then drop the pkt as it is looped back
  70. */
  71. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  72. vdev->mac_addr.raw,
  73. QDF_MAC_ADDR_SIZE)))
  74. return true;
  75. /*
  76. * In case of qwrap isolation mode, donot drop loopback packets.
  77. * In isolation mode, all packets from the wired stations need to go
  78. * to rootap and loop back to reach the wireless stations and
  79. * vice-versa.
  80. */
  81. if (qdf_unlikely(vdev->isolation_vdev))
  82. return false;
  83. /* if the received pkts src mac addr matches with the
  84. * wired PCs MAC addr which is behind the STA or with
  85. * wireless STAs MAC addr which are behind the Repeater,
  86. * then drop the pkt as it is looped back
  87. */
  88. qdf_spin_lock_bh(&soc->ast_lock);
  89. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  90. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  91. if ((sa_idx < 0) ||
  92. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  93. qdf_spin_unlock_bh(&soc->ast_lock);
  94. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  95. "invalid sa_idx: %d", sa_idx);
  96. qdf_assert_always(0);
  97. }
  98. ase = soc->ast_table[sa_idx];
  99. if (!ase) {
  100. /* We do not get a peer map event for STA and without
  101. * this event we don't know what is STA's sa_idx.
  102. * For this reason the AST is still not associated to
  103. * any index postion in ast_table.
  104. * In these kind of scenarios where sa is valid but
  105. * ast is not in ast_table, we use the below API to get
  106. * AST entry for STA's own mac_address.
  107. */
  108. ase = dp_peer_ast_hash_find_by_vdevid
  109. (soc, &data[QDF_MAC_ADDR_SIZE],
  110. peer->vdev->vdev_id);
  111. if (ase) {
  112. ase->ast_idx = sa_idx;
  113. soc->ast_table[sa_idx] = ase;
  114. ase->is_mapped = TRUE;
  115. }
  116. }
  117. } else {
  118. ase = dp_peer_ast_hash_find_by_pdevid(soc,
  119. &data[QDF_MAC_ADDR_SIZE],
  120. vdev->pdev->pdev_id);
  121. }
  122. if (ase) {
  123. if (ase->pdev_id != vdev->pdev->pdev_id) {
  124. qdf_spin_unlock_bh(&soc->ast_lock);
  125. QDF_TRACE(QDF_MODULE_ID_DP,
  126. QDF_TRACE_LEVEL_INFO,
  127. "Detected DBDC Root AP "QDF_MAC_ADDR_FMT", %d %d",
  128. QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]),
  129. vdev->pdev->pdev_id,
  130. ase->pdev_id);
  131. return false;
  132. }
  133. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  134. (ase->peer_id != peer->peer_id)) {
  135. qdf_spin_unlock_bh(&soc->ast_lock);
  136. QDF_TRACE(QDF_MODULE_ID_DP,
  137. QDF_TRACE_LEVEL_INFO,
  138. "received pkt with same src mac "QDF_MAC_ADDR_FMT,
  139. QDF_MAC_ADDR_REF(&data[QDF_MAC_ADDR_SIZE]));
  140. return true;
  141. }
  142. }
  143. qdf_spin_unlock_bh(&soc->ast_lock);
  144. return false;
  145. }
  146. void dp_rx_link_desc_refill_duplicate_check(
  147. struct dp_soc *soc,
  148. struct hal_buf_info *buf_info,
  149. hal_buff_addrinfo_t ring_buf_info)
  150. {
  151. struct hal_buf_info current_link_desc_buf_info = { 0 };
  152. /* do duplicate link desc address check */
  153. hal_rx_buffer_addr_info_get_paddr(ring_buf_info,
  154. &current_link_desc_buf_info);
  155. if (qdf_unlikely(current_link_desc_buf_info.paddr ==
  156. buf_info->paddr)) {
  157. dp_info_rl("duplicate link desc addr: %llu, cookie: 0x%x",
  158. current_link_desc_buf_info.paddr,
  159. current_link_desc_buf_info.sw_cookie);
  160. DP_STATS_INC(soc, rx.err.dup_refill_link_desc, 1);
  161. }
  162. *buf_info = current_link_desc_buf_info;
  163. }
  164. /**
  165. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  166. * (WBM) by address
  167. *
  168. * @soc: core DP main context
  169. * @link_desc_addr: link descriptor addr
  170. *
  171. * Return: QDF_STATUS
  172. */
  173. QDF_STATUS
  174. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  175. hal_buff_addrinfo_t link_desc_addr,
  176. uint8_t bm_action)
  177. {
  178. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  179. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  180. hal_soc_handle_t hal_soc = soc->hal_soc;
  181. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  182. void *src_srng_desc;
  183. if (!wbm_rel_srng) {
  184. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  185. "WBM RELEASE RING not initialized");
  186. return status;
  187. }
  188. /* do duplicate link desc address check */
  189. dp_rx_link_desc_refill_duplicate_check(
  190. soc,
  191. &soc->last_op_info.wbm_rel_link_desc,
  192. link_desc_addr);
  193. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  194. /* TODO */
  195. /*
  196. * Need API to convert from hal_ring pointer to
  197. * Ring Type / Ring Id combo
  198. */
  199. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  200. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  201. wbm_rel_srng);
  202. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  203. goto done;
  204. }
  205. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  206. if (qdf_likely(src_srng_desc)) {
  207. /* Return link descriptor through WBM ring (SW2WBM)*/
  208. hal_rx_msdu_link_desc_set(hal_soc,
  209. src_srng_desc, link_desc_addr, bm_action);
  210. status = QDF_STATUS_SUCCESS;
  211. } else {
  212. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  213. DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
  214. dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
  215. srng->ring_id,
  216. soc->stats.rx.err.hal_ring_access_full_fail);
  217. dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  218. *srng->u.src_ring.hp_addr,
  219. srng->u.src_ring.reap_hp,
  220. *srng->u.src_ring.tp_addr,
  221. srng->u.src_ring.cached_tp);
  222. QDF_BUG(0);
  223. }
  224. done:
  225. hal_srng_access_end(hal_soc, wbm_rel_srng);
  226. return status;
  227. }
  228. /**
  229. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  230. * (WBM), following error handling
  231. *
  232. * @soc: core DP main context
  233. * @ring_desc: opaque pointer to the REO error ring descriptor
  234. *
  235. * Return: QDF_STATUS
  236. */
  237. QDF_STATUS
  238. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  239. uint8_t bm_action)
  240. {
  241. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  242. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  243. }
  244. /**
  245. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  246. *
  247. * @soc: core txrx main context
  248. * @ring_desc: opaque pointer to the REO error ring descriptor
  249. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  250. * @head: head of the local descriptor free-list
  251. * @tail: tail of the local descriptor free-list
  252. * @quota: No. of units (packets) that can be serviced in one shot.
  253. *
  254. * This function is used to drop all MSDU in an MPDU
  255. *
  256. * Return: uint32_t: No. of elements processed
  257. */
  258. static uint32_t
  259. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  260. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  261. uint8_t *mac_id,
  262. uint32_t quota)
  263. {
  264. uint32_t rx_bufs_used = 0;
  265. void *link_desc_va;
  266. struct hal_buf_info buf_info;
  267. struct dp_pdev *pdev;
  268. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  269. int i;
  270. uint8_t *rx_tlv_hdr;
  271. uint32_t tid;
  272. struct rx_desc_pool *rx_desc_pool;
  273. struct dp_rx_desc *rx_desc;
  274. /* First field in REO Dst ring Desc is buffer_addr_info */
  275. void *buf_addr_info = ring_desc;
  276. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  277. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  278. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  279. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  280. more_msdu_link_desc:
  281. /* No UNMAP required -- this is "malloc_consistent" memory */
  282. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  283. &mpdu_desc_info->msdu_count);
  284. for (i = 0; (i < mpdu_desc_info->msdu_count); i++) {
  285. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
  286. msdu_list.sw_cookie[i]);
  287. qdf_assert_always(rx_desc);
  288. /* all buffers from a MSDU link link belong to same pdev */
  289. *mac_id = rx_desc->pool_id;
  290. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  291. if (!pdev) {
  292. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  293. "pdev is null for pool_id = %d",
  294. rx_desc->pool_id);
  295. return rx_bufs_used;
  296. }
  297. if (!dp_rx_desc_check_magic(rx_desc)) {
  298. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  299. FL("Invalid rx_desc cookie=%d"),
  300. msdu_list.sw_cookie[i]);
  301. return rx_bufs_used;
  302. }
  303. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  304. dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
  305. rx_desc_pool->buf_size,
  306. false);
  307. qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
  308. QDF_DMA_FROM_DEVICE,
  309. rx_desc_pool->buf_size);
  310. rx_desc->unmapped = 1;
  311. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  312. rx_bufs_used++;
  313. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  314. rx_desc->rx_buf_start);
  315. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  316. "Packet received with PN error for tid :%d", tid);
  317. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  318. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  319. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  320. /* Just free the buffers */
  321. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf, *mac_id);
  322. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  323. &pdev->free_list_tail, rx_desc);
  324. }
  325. /*
  326. * If the msdu's are spread across multiple link-descriptors,
  327. * we cannot depend solely on the msdu_count(e.g., if msdu is
  328. * spread across multiple buffers).Hence, it is
  329. * necessary to check the next link_descriptor and release
  330. * all the msdu's that are part of it.
  331. */
  332. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  333. link_desc_va,
  334. &next_link_desc_addr_info);
  335. if (hal_rx_is_buf_addr_info_valid(
  336. &next_link_desc_addr_info)) {
  337. /* Clear the next link desc info for the current link_desc */
  338. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  339. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  340. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  341. hal_rx_buffer_addr_info_get_paddr(
  342. &next_link_desc_addr_info,
  343. &buf_info);
  344. cur_link_desc_addr_info = next_link_desc_addr_info;
  345. buf_addr_info = &cur_link_desc_addr_info;
  346. link_desc_va =
  347. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  348. goto more_msdu_link_desc;
  349. }
  350. quota--;
  351. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  352. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  353. return rx_bufs_used;
  354. }
  355. /**
  356. * dp_rx_pn_error_handle() - Handles PN check errors
  357. *
  358. * @soc: core txrx main context
  359. * @ring_desc: opaque pointer to the REO error ring descriptor
  360. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  361. * @head: head of the local descriptor free-list
  362. * @tail: tail of the local descriptor free-list
  363. * @quota: No. of units (packets) that can be serviced in one shot.
  364. *
  365. * This function implements PN error handling
  366. * If the peer is configured to ignore the PN check errors
  367. * or if DP feels, that this frame is still OK, the frame can be
  368. * re-injected back to REO to use some of the other features
  369. * of REO e.g. duplicate detection/routing to other cores
  370. *
  371. * Return: uint32_t: No. of elements processed
  372. */
  373. static uint32_t
  374. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  375. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  376. uint8_t *mac_id,
  377. uint32_t quota)
  378. {
  379. uint16_t peer_id;
  380. uint32_t rx_bufs_used = 0;
  381. struct dp_peer *peer;
  382. bool peer_pn_policy = false;
  383. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  384. mpdu_desc_info->peer_meta_data);
  385. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  386. if (qdf_likely(peer)) {
  387. /*
  388. * TODO: Check for peer specific policies & set peer_pn_policy
  389. */
  390. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  391. "discard rx due to PN error for peer %pK "QDF_MAC_ADDR_FMT,
  392. peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  393. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  394. }
  395. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  396. "Packet received with PN error");
  397. /* No peer PN policy -- definitely drop */
  398. if (!peer_pn_policy)
  399. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  400. mpdu_desc_info,
  401. mac_id, quota);
  402. return rx_bufs_used;
  403. }
  404. /**
  405. * dp_rx_oor_handle() - Handles the msdu which is OOR error
  406. *
  407. * @soc: core txrx main context
  408. * @nbuf: pointer to msdu skb
  409. * @rx_tlv_hdr: start of rx tlv header
  410. * @mpdu_desc_info: pointer to mpdu level description info
  411. * @peer_id: dp peer ID
  412. * @tid: dp tid
  413. *
  414. * This function process the msdu delivered from REO2TCL
  415. * ring with error type OOR
  416. *
  417. * Return: None
  418. */
  419. static void
  420. dp_rx_oor_handle(struct dp_soc *soc,
  421. qdf_nbuf_t nbuf,
  422. uint8_t *rx_tlv_hdr,
  423. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  424. uint16_t peer_id,
  425. uint8_t tid)
  426. {
  427. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  428. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  429. struct dp_peer *peer = NULL;
  430. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  431. if (!peer || tid >= DP_MAX_TIDS) {
  432. dp_info_rl("peer or tid %d not valid", tid);
  433. goto free_nbuf;
  434. }
  435. /*
  436. * For REO error 7 OOR, if it is retry frame under BA session,
  437. * then it is likely SN duplicated frame, do not deliver EAPOL
  438. * to stack in this case since the connection might fail due to
  439. * duplicated EAP response.
  440. */
  441. if (mpdu_desc_info->mpdu_flags & HAL_MPDU_F_RETRY_BIT &&
  442. peer->rx_tid[tid].ba_status == DP_RX_BA_ACTIVE)
  443. frame_mask &= ~FRAME_MASK_IPV4_EAPOL;
  444. if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
  445. rx_tlv_hdr)) {
  446. DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
  447. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  448. return;
  449. }
  450. free_nbuf:
  451. if (peer)
  452. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  453. DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
  454. qdf_nbuf_free(nbuf);
  455. }
  456. /**
  457. * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
  458. *
  459. * @soc: core txrx main context
  460. * @ring_desc: opaque pointer to the REO error ring descriptor
  461. * @mpdu_desc_info: pointer to mpdu level description info
  462. * @link_desc_va: pointer to msdu_link_desc virtual address
  463. * @err_code: reo erro code fetched from ring entry
  464. *
  465. * Function to handle msdus fetched from msdu link desc, currently
  466. * only support 2K jump, OOR error.
  467. *
  468. * Return: msdu count processed.
  469. */
  470. static uint32_t
  471. dp_rx_reo_err_entry_process(struct dp_soc *soc,
  472. void *ring_desc,
  473. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  474. void *link_desc_va,
  475. enum hal_reo_error_code err_code)
  476. {
  477. uint32_t rx_bufs_used = 0;
  478. struct dp_pdev *pdev;
  479. int i;
  480. uint8_t *rx_tlv_hdr_first;
  481. uint8_t *rx_tlv_hdr_last;
  482. uint32_t tid = DP_MAX_TIDS;
  483. uint16_t peer_id;
  484. struct dp_rx_desc *rx_desc;
  485. struct rx_desc_pool *rx_desc_pool;
  486. qdf_nbuf_t nbuf;
  487. struct hal_buf_info buf_info;
  488. struct hal_rx_msdu_list msdu_list;
  489. uint16_t num_msdus;
  490. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  491. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  492. /* First field in REO Dst ring Desc is buffer_addr_info */
  493. void *buf_addr_info = ring_desc;
  494. qdf_nbuf_t head_nbuf = NULL;
  495. qdf_nbuf_t tail_nbuf = NULL;
  496. uint16_t msdu_processed = 0;
  497. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  498. mpdu_desc_info->peer_meta_data);
  499. more_msdu_link_desc:
  500. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  501. &num_msdus);
  502. for (i = 0; i < num_msdus; i++) {
  503. rx_desc = dp_rx_cookie_2_va_rxdma_buf(
  504. soc,
  505. msdu_list.sw_cookie[i]);
  506. qdf_assert_always(rx_desc);
  507. /* all buffers from a MSDU link belong to same pdev */
  508. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  509. nbuf = rx_desc->nbuf;
  510. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  511. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  512. rx_desc_pool->buf_size,
  513. false);
  514. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  515. QDF_DMA_FROM_DEVICE,
  516. rx_desc_pool->buf_size);
  517. rx_desc->unmapped = 1;
  518. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
  519. rx_bufs_used++;
  520. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  521. &pdev->free_list_tail, rx_desc);
  522. DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
  523. if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
  524. HAL_MSDU_F_MSDU_CONTINUATION))
  525. continue;
  526. if (dp_rx_buffer_pool_refill(soc, head_nbuf,
  527. rx_desc->pool_id)) {
  528. /* MSDU queued back to the pool */
  529. goto process_next_msdu;
  530. }
  531. rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
  532. rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
  533. if (qdf_unlikely(head_nbuf != tail_nbuf)) {
  534. nbuf = dp_rx_sg_create(head_nbuf);
  535. qdf_nbuf_set_is_frag(nbuf, 1);
  536. DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
  537. }
  538. /*
  539. * only first msdu, mpdu start description tlv valid?
  540. * and use it for following msdu.
  541. */
  542. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  543. rx_tlv_hdr_last))
  544. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  545. rx_tlv_hdr_first);
  546. switch (err_code) {
  547. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  548. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
  549. peer_id, tid);
  550. break;
  551. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  552. dp_rx_oor_handle(soc, nbuf, rx_tlv_hdr_last,
  553. mpdu_desc_info, peer_id, tid);
  554. break;
  555. default:
  556. dp_err_rl("Non-support error code %d", err_code);
  557. qdf_nbuf_free(nbuf);
  558. }
  559. process_next_msdu:
  560. msdu_processed++;
  561. head_nbuf = NULL;
  562. tail_nbuf = NULL;
  563. }
  564. /*
  565. * If the msdu's are spread across multiple link-descriptors,
  566. * we cannot depend solely on the msdu_count(e.g., if msdu is
  567. * spread across multiple buffers).Hence, it is
  568. * necessary to check the next link_descriptor and release
  569. * all the msdu's that are part of it.
  570. */
  571. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  572. link_desc_va,
  573. &next_link_desc_addr_info);
  574. if (hal_rx_is_buf_addr_info_valid(
  575. &next_link_desc_addr_info)) {
  576. /* Clear the next link desc info for the current link_desc */
  577. hal_rx_clear_next_msdu_link_desc_buf_addr_info(link_desc_va);
  578. dp_rx_link_desc_return_by_addr(
  579. soc,
  580. buf_addr_info,
  581. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  582. hal_rx_buffer_addr_info_get_paddr(
  583. &next_link_desc_addr_info,
  584. &buf_info);
  585. link_desc_va =
  586. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  587. cur_link_desc_addr_info = next_link_desc_addr_info;
  588. buf_addr_info = &cur_link_desc_addr_info;
  589. goto more_msdu_link_desc;
  590. }
  591. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  592. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  593. if (qdf_unlikely(msdu_processed != mpdu_desc_info->msdu_count))
  594. DP_STATS_INC(soc, rx.err.msdu_count_mismatch, 1);
  595. return rx_bufs_used;
  596. }
  597. #ifdef DP_INVALID_PEER_ASSERT
  598. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
  599. do { \
  600. qdf_assert_always(!(head)); \
  601. qdf_assert_always(!(tail)); \
  602. } while (0)
  603. #else
  604. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
  605. #endif
  606. /**
  607. * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
  608. * to pdev invalid peer list
  609. *
  610. * @soc: core DP main context
  611. * @nbuf: Buffer pointer
  612. * @rx_tlv_hdr: start of rx tlv header
  613. * @mac_id: mac id
  614. *
  615. * Return: bool: true for last msdu of mpdu
  616. */
  617. static bool
  618. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
  619. uint8_t *rx_tlv_hdr, uint8_t mac_id)
  620. {
  621. bool mpdu_done = false;
  622. qdf_nbuf_t curr_nbuf = NULL;
  623. qdf_nbuf_t tmp_nbuf = NULL;
  624. /* TODO: Currently only single radio is supported, hence
  625. * pdev hard coded to '0' index
  626. */
  627. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  628. if (!dp_pdev) {
  629. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  630. "pdev is null for mac_id = %d", mac_id);
  631. return mpdu_done;
  632. }
  633. /* if invalid peer SG list has max values free the buffers in list
  634. * and treat current buffer as start of list
  635. *
  636. * current logic to detect the last buffer from attn_tlv is not reliable
  637. * in OFDMA UL scenario hence add max buffers check to avoid list pile
  638. * up
  639. */
  640. if (!dp_pdev->first_nbuf ||
  641. (dp_pdev->invalid_peer_head_msdu &&
  642. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
  643. (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
  644. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  645. dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc,
  646. rx_tlv_hdr);
  647. dp_pdev->first_nbuf = true;
  648. /* If the new nbuf received is the first msdu of the
  649. * amsdu and there are msdus in the invalid peer msdu
  650. * list, then let us free all the msdus of the invalid
  651. * peer msdu list.
  652. * This scenario can happen when we start receiving
  653. * new a-msdu even before the previous a-msdu is completely
  654. * received.
  655. */
  656. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  657. while (curr_nbuf) {
  658. tmp_nbuf = curr_nbuf->next;
  659. qdf_nbuf_free(curr_nbuf);
  660. curr_nbuf = tmp_nbuf;
  661. }
  662. dp_pdev->invalid_peer_head_msdu = NULL;
  663. dp_pdev->invalid_peer_tail_msdu = NULL;
  664. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
  665. &(dp_pdev->ppdu_info.rx_status));
  666. }
  667. if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
  668. hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  669. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  670. qdf_assert_always(dp_pdev->first_nbuf == true);
  671. dp_pdev->first_nbuf = false;
  672. mpdu_done = true;
  673. }
  674. /*
  675. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  676. * should be NULL here, add the checking for debugging purpose
  677. * in case some corner case.
  678. */
  679. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  680. dp_pdev->invalid_peer_tail_msdu);
  681. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  682. dp_pdev->invalid_peer_tail_msdu,
  683. nbuf);
  684. return mpdu_done;
  685. }
  686. static
  687. void dp_rx_err_handle_bar(struct dp_soc *soc,
  688. struct dp_peer *peer,
  689. qdf_nbuf_t nbuf)
  690. {
  691. uint8_t *rx_tlv_hdr;
  692. unsigned char type, subtype;
  693. uint16_t start_seq_num;
  694. uint32_t tid;
  695. struct ieee80211_frame_bar *bar;
  696. /*
  697. * 1. Is this a BAR frame. If not Discard it.
  698. * 2. If it is, get the peer id, tid, ssn
  699. * 2a Do a tid update
  700. */
  701. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  702. bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV);
  703. type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
  704. subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
  705. if (!(type == IEEE80211_FC0_TYPE_CTL &&
  706. subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
  707. dp_err_rl("Not a BAR frame!");
  708. return;
  709. }
  710. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  711. qdf_assert_always(tid < DP_MAX_TIDS);
  712. start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
  713. dp_info_rl("tid %u window_size %u start_seq_num %u",
  714. tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
  715. dp_rx_tid_update_wifi3(peer, tid,
  716. peer->rx_tid[tid].ba_win_size,
  717. start_seq_num);
  718. }
  719. static void
  720. dp_rx_bar_frame_handle(struct dp_soc *soc,
  721. hal_ring_desc_t ring_desc,
  722. struct dp_rx_desc *rx_desc,
  723. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  724. uint8_t error)
  725. {
  726. qdf_nbuf_t nbuf;
  727. struct dp_pdev *pdev;
  728. struct dp_peer *peer;
  729. struct rx_desc_pool *rx_desc_pool;
  730. uint16_t peer_id;
  731. uint8_t *rx_tlv_hdr;
  732. uint32_t tid;
  733. nbuf = rx_desc->nbuf;
  734. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  735. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  736. rx_desc_pool->buf_size,
  737. false);
  738. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  739. QDF_DMA_FROM_DEVICE,
  740. rx_desc_pool->buf_size);
  741. rx_desc->unmapped = 1;
  742. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  743. peer_id =
  744. hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  745. rx_tlv_hdr);
  746. peer = dp_peer_get_ref_by_id(soc, peer_id,
  747. DP_MOD_ID_RX_ERR);
  748. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  749. rx_tlv_hdr);
  750. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  751. if (!peer)
  752. goto next;
  753. dp_info("BAR frame: peer = "QDF_MAC_ADDR_FMT
  754. " peer_id = %d"
  755. " tid = %u"
  756. " SSN = %d"
  757. " error status = %d",
  758. QDF_MAC_ADDR_REF(peer->mac_addr.raw),
  759. peer->peer_id,
  760. tid,
  761. mpdu_desc_info->mpdu_seq,
  762. error);
  763. switch (error) {
  764. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  765. /* fallthrough */
  766. case HAL_REO_ERR_BAR_FRAME_OOR:
  767. dp_rx_err_handle_bar(soc, peer, nbuf);
  768. DP_STATS_INC(soc,
  769. rx.err.reo_error[error], 1);
  770. break;
  771. default:
  772. DP_STATS_INC(soc, rx.bar_frame, 1);
  773. }
  774. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  775. next:
  776. dp_rx_link_desc_return(soc, ring_desc,
  777. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  778. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  779. &pdev->free_list_tail,
  780. rx_desc);
  781. qdf_nbuf_free(nbuf);
  782. }
  783. /**
  784. * dp_2k_jump_handle() - Function to handle 2k jump exception
  785. * on WBM ring
  786. *
  787. * @soc: core DP main context
  788. * @nbuf: buffer pointer
  789. * @rx_tlv_hdr: start of rx tlv header
  790. * @peer_id: peer id of first msdu
  791. * @tid: Tid for which exception occurred
  792. *
  793. * This function handles 2k jump violations arising out
  794. * of receiving aggregates in non BA case. This typically
  795. * may happen if aggregates are received on a QOS enabled TID
  796. * while Rx window size is still initialized to value of 2. Or
  797. * it may also happen if negotiated window size is 1 but peer
  798. * sends aggregates.
  799. *
  800. */
  801. void
  802. dp_2k_jump_handle(struct dp_soc *soc,
  803. qdf_nbuf_t nbuf,
  804. uint8_t *rx_tlv_hdr,
  805. uint16_t peer_id,
  806. uint8_t tid)
  807. {
  808. struct dp_peer *peer = NULL;
  809. struct dp_rx_tid *rx_tid = NULL;
  810. uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
  811. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  812. if (!peer) {
  813. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  814. "peer not found");
  815. goto free_nbuf;
  816. }
  817. if (tid >= DP_MAX_TIDS) {
  818. dp_info_rl("invalid tid");
  819. goto nbuf_deliver;
  820. }
  821. rx_tid = &peer->rx_tid[tid];
  822. qdf_spin_lock_bh(&rx_tid->tid_lock);
  823. /* only if BA session is active, allow send Delba */
  824. if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  825. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  826. goto nbuf_deliver;
  827. }
  828. if (!rx_tid->delba_tx_status) {
  829. rx_tid->delba_tx_retry++;
  830. rx_tid->delba_tx_status = 1;
  831. rx_tid->delba_rcode =
  832. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  833. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  834. if (soc->cdp_soc.ol_ops->send_delba) {
  835. DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1);
  836. soc->cdp_soc.ol_ops->send_delba(
  837. peer->vdev->pdev->soc->ctrl_psoc,
  838. peer->vdev->vdev_id,
  839. peer->mac_addr.raw,
  840. tid,
  841. rx_tid->delba_rcode);
  842. }
  843. } else {
  844. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  845. }
  846. nbuf_deliver:
  847. if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
  848. rx_tlv_hdr)) {
  849. DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
  850. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  851. return;
  852. }
  853. free_nbuf:
  854. if (peer)
  855. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  856. DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
  857. qdf_nbuf_free(nbuf);
  858. }
  859. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  860. defined(QCA_WIFI_QCA6750)
  861. /**
  862. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  863. * @soc: pointer to dp_soc struct
  864. * @pool_id: Pool id to find dp_pdev
  865. * @rx_tlv_hdr: TLV header of received packet
  866. * @nbuf: SKB
  867. *
  868. * In certain types of packets if peer_id is not correct then
  869. * driver may not be able find. Try finding peer by addr_2 of
  870. * received MPDU. If you find the peer then most likely sw_peer_id &
  871. * ast_idx is corrupted.
  872. *
  873. * Return: True if you find the peer by addr_2 of received MPDU else false
  874. */
  875. static bool
  876. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  877. uint8_t pool_id,
  878. uint8_t *rx_tlv_hdr,
  879. qdf_nbuf_t nbuf)
  880. {
  881. struct dp_peer *peer = NULL;
  882. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  883. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  884. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  885. if (!pdev) {
  886. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  887. "pdev is null for pool_id = %d", pool_id);
  888. return false;
  889. }
  890. /*
  891. * WAR- In certain types of packets if peer_id is not correct then
  892. * driver may not be able find. Try finding peer by addr_2 of
  893. * received MPDU
  894. */
  895. if (wh)
  896. peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
  897. DP_VDEV_ALL, DP_MOD_ID_RX_ERR);
  898. if (peer) {
  899. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  900. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  901. QDF_TRACE_LEVEL_DEBUG);
  902. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  903. 1, qdf_nbuf_len(nbuf));
  904. qdf_nbuf_free(nbuf);
  905. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  906. return true;
  907. }
  908. return false;
  909. }
  910. /**
  911. * dp_rx_check_pkt_len() - Check for pktlen validity
  912. * @soc: DP SOC context
  913. * @pkt_len: computed length of the pkt from caller in bytes
  914. *
  915. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  916. *
  917. */
  918. static inline
  919. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  920. {
  921. if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
  922. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  923. 1, pkt_len);
  924. return true;
  925. } else {
  926. return false;
  927. }
  928. }
  929. #else
  930. static inline bool
  931. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  932. uint8_t pool_id,
  933. uint8_t *rx_tlv_hdr,
  934. qdf_nbuf_t nbuf)
  935. {
  936. return false;
  937. }
  938. static inline
  939. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  940. {
  941. return false;
  942. }
  943. #endif
  944. /**
  945. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  946. * descriptor violation on either a
  947. * REO or WBM ring
  948. *
  949. * @soc: core DP main context
  950. * @nbuf: buffer pointer
  951. * @rx_tlv_hdr: start of rx tlv header
  952. * @pool_id: mac id
  953. * @peer: peer handle
  954. *
  955. * This function handles NULL queue descriptor violations arising out
  956. * a missing REO queue for a given peer or a given TID. This typically
  957. * may happen if a packet is received on a QOS enabled TID before the
  958. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  959. * it may also happen for MC/BC frames if they are not routed to the
  960. * non-QOS TID queue, in the absence of any other default TID queue.
  961. * This error can show up both in a REO destination or WBM release ring.
  962. *
  963. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  964. * if nbuf could not be handled or dropped.
  965. */
  966. static QDF_STATUS
  967. dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  968. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  969. struct dp_peer *peer)
  970. {
  971. uint32_t pkt_len;
  972. uint16_t msdu_len;
  973. struct dp_vdev *vdev;
  974. uint8_t tid;
  975. qdf_ether_header_t *eh;
  976. struct hal_rx_msdu_metadata msdu_metadata;
  977. uint16_t sa_idx = 0;
  978. qdf_nbuf_set_rx_chfrag_start(nbuf,
  979. hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  980. rx_tlv_hdr));
  981. qdf_nbuf_set_rx_chfrag_end(nbuf,
  982. hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
  983. rx_tlv_hdr));
  984. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  985. rx_tlv_hdr));
  986. qdf_nbuf_set_da_valid(nbuf,
  987. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  988. rx_tlv_hdr));
  989. qdf_nbuf_set_sa_valid(nbuf,
  990. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  991. rx_tlv_hdr));
  992. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  993. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  994. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN;
  995. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  996. if (dp_rx_check_pkt_len(soc, pkt_len))
  997. goto drop_nbuf;
  998. /* Set length in nbuf */
  999. qdf_nbuf_set_pktlen(
  1000. nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
  1001. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  1002. }
  1003. /*
  1004. * Check if DMA completed -- msdu_done is the last bit
  1005. * to be written
  1006. */
  1007. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  1008. dp_err_rl("MSDU DONE failure");
  1009. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1010. QDF_TRACE_LEVEL_INFO);
  1011. qdf_assert(0);
  1012. }
  1013. if (!peer &&
  1014. dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
  1015. rx_tlv_hdr, nbuf))
  1016. return QDF_STATUS_E_FAILURE;
  1017. if (!peer) {
  1018. bool mpdu_done = false;
  1019. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1020. if (!pdev) {
  1021. dp_err_rl("pdev is null for pool_id = %d", pool_id);
  1022. return QDF_STATUS_E_FAILURE;
  1023. }
  1024. dp_err_rl("peer is NULL");
  1025. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1026. qdf_nbuf_len(nbuf));
  1027. /* QCN9000 has the support enabled */
  1028. if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
  1029. mpdu_done = true;
  1030. nbuf->next = NULL;
  1031. /* Trigger invalid peer handler wrapper */
  1032. dp_rx_process_invalid_peer_wrapper(soc,
  1033. nbuf, mpdu_done, pool_id);
  1034. } else {
  1035. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  1036. /* Trigger invalid peer handler wrapper */
  1037. dp_rx_process_invalid_peer_wrapper(soc,
  1038. pdev->invalid_peer_head_msdu,
  1039. mpdu_done, pool_id);
  1040. }
  1041. if (mpdu_done) {
  1042. pdev->invalid_peer_head_msdu = NULL;
  1043. pdev->invalid_peer_tail_msdu = NULL;
  1044. }
  1045. return QDF_STATUS_E_FAILURE;
  1046. }
  1047. vdev = peer->vdev;
  1048. if (!vdev) {
  1049. dp_err_rl("Null vdev!");
  1050. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1051. goto drop_nbuf;
  1052. }
  1053. /*
  1054. * Advance the packet start pointer by total size of
  1055. * pre-header TLV's
  1056. */
  1057. if (qdf_nbuf_is_frag(nbuf))
  1058. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  1059. else
  1060. qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
  1061. RX_PKT_TLVS_LEN));
  1062. dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
  1063. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  1064. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  1065. if ((sa_idx < 0) ||
  1066. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  1067. DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
  1068. goto drop_nbuf;
  1069. }
  1070. }
  1071. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  1072. /* this is a looped back MCBC pkt, drop it */
  1073. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  1074. goto drop_nbuf;
  1075. }
  1076. /*
  1077. * In qwrap mode if the received packet matches with any of the vdev
  1078. * mac addresses, drop it. Donot receive multicast packets originated
  1079. * from any proxysta.
  1080. */
  1081. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  1082. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  1083. goto drop_nbuf;
  1084. }
  1085. if (qdf_unlikely((peer->nawds_enabled == true) &&
  1086. hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1087. rx_tlv_hdr))) {
  1088. dp_err_rl("free buffer for multicast packet");
  1089. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  1090. goto drop_nbuf;
  1091. }
  1092. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
  1093. dp_err_rl("mcast Policy Check Drop pkt");
  1094. goto drop_nbuf;
  1095. }
  1096. /* WDS Source Port Learning */
  1097. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  1098. vdev->wds_enabled))
  1099. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf,
  1100. msdu_metadata);
  1101. if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
  1102. tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
  1103. if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
  1104. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  1105. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  1106. }
  1107. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  1108. qdf_nbuf_set_next(nbuf, NULL);
  1109. dp_rx_deliver_raw(vdev, nbuf, peer);
  1110. } else {
  1111. qdf_nbuf_set_next(nbuf, NULL);
  1112. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  1113. qdf_nbuf_len(nbuf));
  1114. /*
  1115. * Update the protocol tag in SKB based on
  1116. * CCE metadata
  1117. */
  1118. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1119. EXCEPTION_DEST_RING_ID,
  1120. true, true);
  1121. /* Update the flow tag in SKB based on FSE metadata */
  1122. dp_rx_update_flow_tag(soc, vdev, nbuf,
  1123. rx_tlv_hdr, true);
  1124. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
  1125. soc->hal_soc, rx_tlv_hdr) &&
  1126. (vdev->rx_decap_type ==
  1127. htt_cmn_pkt_type_ethernet))) {
  1128. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1129. DP_STATS_INC_PKT(peer, rx.multicast, 1,
  1130. qdf_nbuf_len(nbuf));
  1131. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
  1132. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  1133. qdf_nbuf_len(nbuf));
  1134. }
  1135. qdf_nbuf_set_exc_frame(nbuf, 1);
  1136. dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
  1137. }
  1138. return QDF_STATUS_SUCCESS;
  1139. drop_nbuf:
  1140. qdf_nbuf_free(nbuf);
  1141. return QDF_STATUS_E_FAILURE;
  1142. }
  1143. /**
  1144. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  1145. * frames to OS or wifi parse errors.
  1146. * @soc: core DP main context
  1147. * @nbuf: buffer pointer
  1148. * @rx_tlv_hdr: start of rx tlv header
  1149. * @peer: peer reference
  1150. * @err_code: rxdma err code
  1151. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1152. * pool_id has same mapping)
  1153. *
  1154. * Return: None
  1155. */
  1156. void
  1157. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1158. uint8_t *rx_tlv_hdr, struct dp_peer *peer,
  1159. uint8_t err_code, uint8_t mac_id)
  1160. {
  1161. uint32_t pkt_len, l2_hdr_offset;
  1162. uint16_t msdu_len;
  1163. struct dp_vdev *vdev;
  1164. qdf_ether_header_t *eh;
  1165. bool is_broadcast;
  1166. /*
  1167. * Check if DMA completed -- msdu_done is the last bit
  1168. * to be written
  1169. */
  1170. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  1171. dp_err_rl("MSDU DONE failure");
  1172. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1173. QDF_TRACE_LEVEL_INFO);
  1174. qdf_assert(0);
  1175. }
  1176. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  1177. rx_tlv_hdr);
  1178. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  1179. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  1180. if (dp_rx_check_pkt_len(soc, pkt_len)) {
  1181. /* Drop & free packet */
  1182. qdf_nbuf_free(nbuf);
  1183. return;
  1184. }
  1185. /* Set length in nbuf */
  1186. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1187. qdf_nbuf_set_next(nbuf, NULL);
  1188. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1189. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  1190. if (!peer) {
  1191. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
  1192. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1193. qdf_nbuf_len(nbuf));
  1194. /* Trigger invalid peer handler wrapper */
  1195. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  1196. return;
  1197. }
  1198. vdev = peer->vdev;
  1199. if (!vdev) {
  1200. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1201. FL("INVALID vdev %pK OR osif_rx"), vdev);
  1202. /* Drop & free packet */
  1203. qdf_nbuf_free(nbuf);
  1204. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1205. return;
  1206. }
  1207. /*
  1208. * Advance the packet start pointer by total size of
  1209. * pre-header TLV's
  1210. */
  1211. dp_rx_skip_tlvs(nbuf, l2_hdr_offset);
  1212. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  1213. uint8_t *pkt_type;
  1214. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  1215. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  1216. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  1217. htons(QDF_LLC_STP)) {
  1218. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  1219. goto process_mesh;
  1220. } else {
  1221. goto process_rx;
  1222. }
  1223. }
  1224. }
  1225. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  1226. goto process_mesh;
  1227. /*
  1228. * WAPI cert AP sends rekey frames as unencrypted.
  1229. * Thus RXDMA will report unencrypted frame error.
  1230. * To pass WAPI cert case, SW needs to pass unencrypted
  1231. * rekey frame to stack.
  1232. */
  1233. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1234. goto process_rx;
  1235. }
  1236. /*
  1237. * In dynamic WEP case rekey frames are not encrypted
  1238. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  1239. * key install is already done
  1240. */
  1241. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  1242. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  1243. goto process_rx;
  1244. process_mesh:
  1245. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  1246. qdf_nbuf_free(nbuf);
  1247. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1248. return;
  1249. }
  1250. if (vdev->mesh_vdev) {
  1251. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  1252. == QDF_STATUS_SUCCESS) {
  1253. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  1254. FL("mesh pkt filtered"));
  1255. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  1256. qdf_nbuf_free(nbuf);
  1257. return;
  1258. }
  1259. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  1260. }
  1261. process_rx:
  1262. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1263. rx_tlv_hdr) &&
  1264. (vdev->rx_decap_type ==
  1265. htt_cmn_pkt_type_ethernet))) {
  1266. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1267. is_broadcast = (QDF_IS_ADDR_BROADCAST
  1268. (eh->ether_dhost)) ? 1 : 0 ;
  1269. DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
  1270. if (is_broadcast) {
  1271. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  1272. qdf_nbuf_len(nbuf));
  1273. }
  1274. }
  1275. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  1276. dp_rx_deliver_raw(vdev, nbuf, peer);
  1277. } else {
  1278. /* Update the protocol tag in SKB based on CCE metadata */
  1279. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1280. EXCEPTION_DEST_RING_ID, true, true);
  1281. /* Update the flow tag in SKB based on FSE metadata */
  1282. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  1283. DP_STATS_INC(peer, rx.to_stack.num, 1);
  1284. qdf_nbuf_set_exc_frame(nbuf, 1);
  1285. dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
  1286. }
  1287. return;
  1288. }
  1289. /**
  1290. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  1291. * @soc: core DP main context
  1292. * @nbuf: buffer pointer
  1293. * @rx_tlv_hdr: start of rx tlv header
  1294. * @peer: peer handle
  1295. *
  1296. * return: void
  1297. */
  1298. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1299. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  1300. {
  1301. struct dp_vdev *vdev = NULL;
  1302. struct dp_pdev *pdev = NULL;
  1303. struct ol_if_ops *tops = NULL;
  1304. uint16_t rx_seq, fragno;
  1305. uint8_t is_raw;
  1306. unsigned int tid;
  1307. QDF_STATUS status;
  1308. struct cdp_rx_mic_err_info mic_failure_info;
  1309. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1310. rx_tlv_hdr))
  1311. return;
  1312. if (!peer) {
  1313. dp_info_rl("peer not found");
  1314. goto fail;
  1315. }
  1316. vdev = peer->vdev;
  1317. if (!vdev) {
  1318. dp_info_rl("VDEV not found");
  1319. goto fail;
  1320. }
  1321. pdev = vdev->pdev;
  1322. if (!pdev) {
  1323. dp_info_rl("PDEV not found");
  1324. goto fail;
  1325. }
  1326. is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
  1327. if (is_raw) {
  1328. fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
  1329. /* Can get only last fragment */
  1330. if (fragno) {
  1331. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  1332. qdf_nbuf_data(nbuf));
  1333. rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
  1334. qdf_nbuf_data(nbuf));
  1335. status = dp_rx_defrag_add_last_frag(soc, peer,
  1336. tid, rx_seq, nbuf);
  1337. dp_info_rl("Frag pkt seq# %d frag# %d consumed "
  1338. "status %d !", rx_seq, fragno, status);
  1339. return;
  1340. }
  1341. }
  1342. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  1343. &mic_failure_info.da_mac_addr.bytes[0])) {
  1344. dp_err_rl("Failed to get da_mac_addr");
  1345. goto fail;
  1346. }
  1347. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  1348. &mic_failure_info.ta_mac_addr.bytes[0])) {
  1349. dp_err_rl("Failed to get ta_mac_addr");
  1350. goto fail;
  1351. }
  1352. mic_failure_info.key_id = 0;
  1353. mic_failure_info.multicast =
  1354. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  1355. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  1356. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  1357. mic_failure_info.data = NULL;
  1358. mic_failure_info.vdev_id = vdev->vdev_id;
  1359. tops = pdev->soc->cdp_soc.ol_ops;
  1360. if (tops->rx_mic_error)
  1361. tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
  1362. &mic_failure_info);
  1363. fail:
  1364. qdf_nbuf_free(nbuf);
  1365. return;
  1366. }
  1367. #ifdef DP_RX_DESC_COOKIE_INVALIDATE
  1368. /**
  1369. * dp_rx_link_cookie_check() - Validate link desc cookie
  1370. * @ring_desc: ring descriptor
  1371. *
  1372. * Return: qdf status
  1373. */
  1374. static inline QDF_STATUS
  1375. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1376. {
  1377. if (qdf_unlikely(HAL_RX_REO_BUF_LINK_COOKIE_INVALID_GET(ring_desc)))
  1378. return QDF_STATUS_E_FAILURE;
  1379. return QDF_STATUS_SUCCESS;
  1380. }
  1381. /**
  1382. * dp_rx_link_cookie_invalidate() - Invalidate link desc cookie
  1383. * @ring_desc: ring descriptor
  1384. *
  1385. * Return: None
  1386. */
  1387. static inline void
  1388. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1389. {
  1390. HAL_RX_REO_BUF_LINK_COOKIE_INVALID_SET(ring_desc);
  1391. }
  1392. #else
  1393. static inline QDF_STATUS
  1394. dp_rx_link_cookie_check(hal_ring_desc_t ring_desc)
  1395. {
  1396. return QDF_STATUS_SUCCESS;
  1397. }
  1398. static inline void
  1399. dp_rx_link_cookie_invalidate(hal_ring_desc_t ring_desc)
  1400. {
  1401. }
  1402. #endif
  1403. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  1404. /**
  1405. * dp_rx_err_ring_record_entry() - Record rx err ring history
  1406. * @soc: Datapath soc structure
  1407. * @paddr: paddr of the buffer in RX err ring
  1408. * @sw_cookie: SW cookie of the buffer in RX err ring
  1409. * @rbm: Return buffer manager of the buffer in RX err ring
  1410. *
  1411. * Returns: None
  1412. */
  1413. static inline void
  1414. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1415. uint32_t sw_cookie, uint8_t rbm)
  1416. {
  1417. struct dp_buf_info_record *record;
  1418. uint32_t idx;
  1419. if (qdf_unlikely(!soc->rx_err_ring_history))
  1420. return;
  1421. idx = dp_history_get_next_index(&soc->rx_err_ring_history->index,
  1422. DP_RX_ERR_HIST_MAX);
  1423. /* No NULL check needed for record since its an array */
  1424. record = &soc->rx_err_ring_history->entry[idx];
  1425. record->timestamp = qdf_get_log_timestamp();
  1426. record->hbi.paddr = paddr;
  1427. record->hbi.sw_cookie = sw_cookie;
  1428. record->hbi.rbm = rbm;
  1429. }
  1430. #else
  1431. static inline void
  1432. dp_rx_err_ring_record_entry(struct dp_soc *soc, uint64_t paddr,
  1433. uint32_t sw_cookie, uint8_t rbm)
  1434. {
  1435. }
  1436. #endif
  1437. uint32_t
  1438. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1439. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1440. {
  1441. hal_ring_desc_t ring_desc;
  1442. hal_soc_handle_t hal_soc;
  1443. uint32_t count = 0;
  1444. uint32_t rx_bufs_used = 0;
  1445. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1446. uint8_t mac_id = 0;
  1447. uint8_t buf_type;
  1448. uint8_t error, rbm;
  1449. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1450. struct hal_buf_info hbi;
  1451. struct dp_pdev *dp_pdev;
  1452. struct dp_srng *dp_rxdma_srng;
  1453. struct rx_desc_pool *rx_desc_pool;
  1454. uint32_t cookie = 0;
  1455. void *link_desc_va;
  1456. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1457. uint16_t num_msdus;
  1458. struct dp_rx_desc *rx_desc = NULL;
  1459. QDF_STATUS status;
  1460. bool ret;
  1461. /* Debug -- Remove later */
  1462. qdf_assert(soc && hal_ring_hdl);
  1463. hal_soc = soc->hal_soc;
  1464. /* Debug -- Remove later */
  1465. qdf_assert(hal_soc);
  1466. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1467. /* TODO */
  1468. /*
  1469. * Need API to convert from hal_ring pointer to
  1470. * Ring Type / Ring Id combo
  1471. */
  1472. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1473. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1474. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1475. goto done;
  1476. }
  1477. while (qdf_likely(quota-- && (ring_desc =
  1478. hal_srng_dst_peek(hal_soc,
  1479. hal_ring_hdl)))) {
  1480. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1481. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1482. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  1483. /* Get the MPDU DESC info */
  1484. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1485. if (mpdu_desc_info.msdu_count == 0)
  1486. goto next_entry;
  1487. /*
  1488. * For REO error ring, expect only MSDU LINK DESC
  1489. */
  1490. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  1491. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1492. /*
  1493. * check for the magic number in the sw cookie
  1494. */
  1495. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  1496. LINK_DESC_ID_START);
  1497. status = dp_rx_link_cookie_check(ring_desc);
  1498. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  1499. DP_STATS_INC(soc, rx.err.invalid_link_cookie, 1);
  1500. break;
  1501. }
  1502. /*
  1503. * Check if the buffer is to be processed on this processor
  1504. */
  1505. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1506. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1507. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1508. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1509. &num_msdus);
  1510. dp_rx_err_ring_record_entry(soc, msdu_list.paddr[0],
  1511. msdu_list.sw_cookie[0],
  1512. msdu_list.rbm[0]);
  1513. if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
  1514. (msdu_list.rbm[0] !=
  1515. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) &&
  1516. (msdu_list.rbm[0] != DP_DEFRAG_RBM))) {
  1517. /* TODO */
  1518. /* Call appropriate handler */
  1519. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1520. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1521. QDF_TRACE(QDF_MODULE_ID_DP,
  1522. QDF_TRACE_LEVEL_ERROR,
  1523. FL("Invalid RBM %d"),
  1524. msdu_list.rbm[0]);
  1525. }
  1526. /* Return link descriptor through WBM ring (SW2WBM)*/
  1527. dp_rx_link_desc_return(soc, ring_desc,
  1528. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1529. goto next_entry;
  1530. }
  1531. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
  1532. msdu_list.sw_cookie[0]);
  1533. qdf_assert_always(rx_desc);
  1534. mac_id = rx_desc->pool_id;
  1535. if (mpdu_desc_info.bar_frame) {
  1536. qdf_assert_always(mpdu_desc_info.msdu_count == 1);
  1537. dp_rx_bar_frame_handle(soc,
  1538. ring_desc,
  1539. rx_desc,
  1540. &mpdu_desc_info,
  1541. error);
  1542. rx_bufs_reaped[mac_id] += 1;
  1543. goto next_entry;
  1544. }
  1545. dp_info("Got pkt with REO ERROR: %d", error);
  1546. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1547. /*
  1548. * We only handle one msdu per link desc for fragmented
  1549. * case. We drop the msdus and release the link desc
  1550. * back if there are more than one msdu in link desc.
  1551. */
  1552. if (qdf_unlikely(num_msdus > 1)) {
  1553. count = dp_rx_msdus_drop(soc, ring_desc,
  1554. &mpdu_desc_info,
  1555. &mac_id, quota);
  1556. rx_bufs_reaped[mac_id] += count;
  1557. goto next_entry;
  1558. }
  1559. /*
  1560. * this is a unlikely scenario where the host is reaping
  1561. * a descriptor which it already reaped just a while ago
  1562. * but is yet to replenish it back to HW.
  1563. * In this case host will dump the last 128 descriptors
  1564. * including the software descriptor rx_desc and assert.
  1565. */
  1566. if (qdf_unlikely(!rx_desc->in_use)) {
  1567. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  1568. dp_info_rl("Reaping rx_desc not in use!");
  1569. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1570. ring_desc, rx_desc);
  1571. /* ignore duplicate RX desc and continue */
  1572. /* Pop out the descriptor */
  1573. goto next_entry;
  1574. }
  1575. ret = dp_rx_desc_paddr_sanity_check(rx_desc,
  1576. msdu_list.paddr[0]);
  1577. if (!ret) {
  1578. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  1579. rx_desc->in_err_state = 1;
  1580. goto next_entry;
  1581. }
  1582. count = dp_rx_frag_handle(soc,
  1583. ring_desc, &mpdu_desc_info,
  1584. rx_desc, &mac_id, quota);
  1585. rx_bufs_reaped[mac_id] += count;
  1586. DP_STATS_INC(soc, rx.rx_frags, 1);
  1587. goto next_entry;
  1588. }
  1589. /*
  1590. * Expect REO errors to be handled after this point
  1591. */
  1592. qdf_assert_always(error == HAL_REO_ERROR_DETECTED);
  1593. if (hal_rx_reo_is_pn_error(ring_desc)) {
  1594. /* TOD0 */
  1595. DP_STATS_INC(soc,
  1596. rx.err.
  1597. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  1598. 1);
  1599. /* increment @pdev level */
  1600. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1601. if (dp_pdev)
  1602. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1603. count = dp_rx_pn_error_handle(soc,
  1604. ring_desc,
  1605. &mpdu_desc_info, &mac_id,
  1606. quota);
  1607. rx_bufs_reaped[mac_id] += count;
  1608. goto next_entry;
  1609. }
  1610. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  1611. /* TOD0 */
  1612. DP_STATS_INC(soc,
  1613. rx.err.
  1614. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  1615. 1);
  1616. /* increment @pdev level */
  1617. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1618. if (dp_pdev)
  1619. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1620. count = dp_rx_reo_err_entry_process(
  1621. soc,
  1622. ring_desc,
  1623. &mpdu_desc_info,
  1624. link_desc_va,
  1625. HAL_REO_ERR_REGULAR_FRAME_2K_JUMP);
  1626. rx_bufs_reaped[mac_id] += count;
  1627. goto next_entry;
  1628. }
  1629. if (hal_rx_reo_is_oor_error(ring_desc)) {
  1630. DP_STATS_INC(
  1631. soc,
  1632. rx.err.
  1633. reo_error[HAL_REO_ERR_REGULAR_FRAME_OOR],
  1634. 1);
  1635. /* increment @pdev level */
  1636. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1637. if (dp_pdev)
  1638. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1639. count = dp_rx_reo_err_entry_process(
  1640. soc,
  1641. ring_desc,
  1642. &mpdu_desc_info,
  1643. link_desc_va,
  1644. HAL_REO_ERR_REGULAR_FRAME_OOR);
  1645. rx_bufs_reaped[mac_id] += count;
  1646. goto next_entry;
  1647. }
  1648. /* Assert if unexpected error type */
  1649. qdf_assert_always(0);
  1650. next_entry:
  1651. dp_rx_link_cookie_invalidate(ring_desc);
  1652. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  1653. }
  1654. done:
  1655. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1656. if (soc->rx.flags.defrag_timeout_check) {
  1657. uint32_t now_ms =
  1658. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1659. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1660. dp_rx_defrag_waitlist_flush(soc);
  1661. }
  1662. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1663. if (rx_bufs_reaped[mac_id]) {
  1664. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1665. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1666. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1667. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1668. rx_desc_pool,
  1669. rx_bufs_reaped[mac_id],
  1670. &dp_pdev->free_list_head,
  1671. &dp_pdev->free_list_tail);
  1672. rx_bufs_used += rx_bufs_reaped[mac_id];
  1673. }
  1674. }
  1675. return rx_bufs_used; /* Assume no scale factor for now */
  1676. }
  1677. #ifdef DROP_RXDMA_DECRYPT_ERR
  1678. /**
  1679. * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
  1680. *
  1681. * Return: true if rxdma decrypt err frames are handled and false otheriwse
  1682. */
  1683. static inline bool dp_handle_rxdma_decrypt_err(void)
  1684. {
  1685. return false;
  1686. }
  1687. #else
  1688. static inline bool dp_handle_rxdma_decrypt_err(void)
  1689. {
  1690. return true;
  1691. }
  1692. #endif
  1693. static inline bool
  1694. dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
  1695. {
  1696. /*
  1697. * Currently Null Queue and Unencrypted error handlers has support for
  1698. * SG. Other error handler do not deal with SG buffer.
  1699. */
  1700. if (((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) &&
  1701. (info->reo_err_code == HAL_REO_ERR_QUEUE_DESC_ADDR_0)) ||
  1702. ((info->wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) &&
  1703. (info->rxdma_err_code == HAL_RXDMA_ERR_UNENCRYPTED)))
  1704. return true;
  1705. return false;
  1706. }
  1707. uint32_t
  1708. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1709. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1710. {
  1711. hal_ring_desc_t ring_desc;
  1712. hal_soc_handle_t hal_soc;
  1713. struct dp_rx_desc *rx_desc;
  1714. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1715. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1716. uint32_t rx_bufs_used = 0;
  1717. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1718. uint8_t buf_type, rbm;
  1719. uint32_t rx_buf_cookie;
  1720. uint8_t mac_id;
  1721. struct dp_pdev *dp_pdev;
  1722. struct dp_srng *dp_rxdma_srng;
  1723. struct rx_desc_pool *rx_desc_pool;
  1724. uint8_t *rx_tlv_hdr;
  1725. qdf_nbuf_t nbuf_head = NULL;
  1726. qdf_nbuf_t nbuf_tail = NULL;
  1727. qdf_nbuf_t nbuf, next;
  1728. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  1729. uint8_t pool_id;
  1730. uint8_t tid = 0;
  1731. uint8_t msdu_continuation = 0;
  1732. bool process_sg_buf = false;
  1733. /* Debug -- Remove later */
  1734. qdf_assert(soc && hal_ring_hdl);
  1735. hal_soc = soc->hal_soc;
  1736. /* Debug -- Remove later */
  1737. qdf_assert(hal_soc);
  1738. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1739. /* TODO */
  1740. /*
  1741. * Need API to convert from hal_ring pointer to
  1742. * Ring Type / Ring Id combo
  1743. */
  1744. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1745. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1746. goto done;
  1747. }
  1748. while (qdf_likely(quota)) {
  1749. ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  1750. if (qdf_unlikely(!ring_desc))
  1751. break;
  1752. /* XXX */
  1753. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  1754. /*
  1755. * For WBM ring, expect only MSDU buffers
  1756. */
  1757. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  1758. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1759. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  1760. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1761. == HAL_RX_WBM_ERR_SRC_REO));
  1762. /*
  1763. * Check if the buffer is to be processed on this processor
  1764. */
  1765. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1766. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  1767. /* TODO */
  1768. /* Call appropriate handler */
  1769. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1770. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1771. FL("Invalid RBM %d"), rbm);
  1772. continue;
  1773. }
  1774. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  1775. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1776. qdf_assert_always(rx_desc);
  1777. if (!dp_rx_desc_check_magic(rx_desc)) {
  1778. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1779. FL("Invalid rx_desc cookie=%d"),
  1780. rx_buf_cookie);
  1781. continue;
  1782. }
  1783. /*
  1784. * this is a unlikely scenario where the host is reaping
  1785. * a descriptor which it already reaped just a while ago
  1786. * but is yet to replenish it back to HW.
  1787. * In this case host will dump the last 128 descriptors
  1788. * including the software descriptor rx_desc and assert.
  1789. */
  1790. if (qdf_unlikely(!rx_desc->in_use)) {
  1791. DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
  1792. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1793. ring_desc, rx_desc);
  1794. }
  1795. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
  1796. nbuf = rx_desc->nbuf;
  1797. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  1798. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  1799. rx_desc_pool->buf_size,
  1800. false);
  1801. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  1802. QDF_DMA_FROM_DEVICE,
  1803. rx_desc_pool->buf_size);
  1804. rx_desc->unmapped = 1;
  1805. if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support &&
  1806. dp_rx_is_sg_formation_required(&wbm_err_info))) {
  1807. /* SG is detected from continuation bit */
  1808. msdu_continuation = hal_rx_wbm_err_msdu_continuation_get(hal_soc,
  1809. ring_desc);
  1810. if (msdu_continuation &&
  1811. !(soc->wbm_sg_param.wbm_is_first_msdu_in_sg)) {
  1812. /* Update length from first buffer in SG */
  1813. soc->wbm_sg_param.wbm_sg_desc_msdu_len =
  1814. hal_rx_msdu_start_msdu_len_get(
  1815. qdf_nbuf_data(nbuf));
  1816. soc->wbm_sg_param.wbm_is_first_msdu_in_sg = true;
  1817. }
  1818. if (msdu_continuation) {
  1819. /* MSDU continued packets */
  1820. qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
  1821. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  1822. soc->wbm_sg_param.wbm_sg_desc_msdu_len;
  1823. } else {
  1824. /* This is the terminal packet in SG */
  1825. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1826. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  1827. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  1828. soc->wbm_sg_param.wbm_sg_desc_msdu_len;
  1829. process_sg_buf = true;
  1830. }
  1831. }
  1832. /*
  1833. * save the wbm desc info in nbuf TLV. We will need this
  1834. * info when we do the actual nbuf processing
  1835. */
  1836. wbm_err_info.pool_id = rx_desc->pool_id;
  1837. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  1838. &wbm_err_info);
  1839. rx_bufs_reaped[rx_desc->pool_id]++;
  1840. if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {
  1841. DP_RX_LIST_APPEND(soc->wbm_sg_param.wbm_sg_nbuf_head,
  1842. soc->wbm_sg_param.wbm_sg_nbuf_tail,
  1843. nbuf);
  1844. if (process_sg_buf) {
  1845. if (!dp_rx_buffer_pool_refill(
  1846. soc,
  1847. soc->wbm_sg_param.wbm_sg_nbuf_head,
  1848. rx_desc->pool_id))
  1849. DP_RX_MERGE_TWO_LIST(
  1850. nbuf_head, nbuf_tail,
  1851. soc->wbm_sg_param.wbm_sg_nbuf_head,
  1852. soc->wbm_sg_param.wbm_sg_nbuf_tail);
  1853. dp_rx_wbm_sg_list_reset(soc);
  1854. process_sg_buf = false;
  1855. }
  1856. } else if (!dp_rx_buffer_pool_refill(soc, nbuf,
  1857. rx_desc->pool_id)) {
  1858. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, nbuf);
  1859. }
  1860. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1861. &tail[rx_desc->pool_id],
  1862. rx_desc);
  1863. /*
  1864. * if continuation bit is set then we have MSDU spread
  1865. * across multiple buffers, let us not decrement quota
  1866. * till we reap all buffers of that MSDU.
  1867. */
  1868. if (qdf_likely(!msdu_continuation))
  1869. quota -= 1;
  1870. }
  1871. done:
  1872. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1873. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1874. if (rx_bufs_reaped[mac_id]) {
  1875. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1876. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1877. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1878. rx_desc_pool, rx_bufs_reaped[mac_id],
  1879. &head[mac_id], &tail[mac_id]);
  1880. rx_bufs_used += rx_bufs_reaped[mac_id];
  1881. }
  1882. }
  1883. nbuf = nbuf_head;
  1884. while (nbuf) {
  1885. struct dp_peer *peer;
  1886. uint16_t peer_id;
  1887. uint8_t err_code;
  1888. uint8_t *tlv_hdr;
  1889. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1890. /*
  1891. * retrieve the wbm desc info from nbuf TLV, so we can
  1892. * handle error cases appropriately
  1893. */
  1894. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  1895. peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1896. rx_tlv_hdr);
  1897. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_RX_ERR);
  1898. if (!peer)
  1899. dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
  1900. peer_id, wbm_err_info.wbm_err_src,
  1901. wbm_err_info.reo_psh_rsn);
  1902. /* Set queue_mapping in nbuf to 0 */
  1903. dp_set_rx_queue(nbuf, 0);
  1904. next = nbuf->next;
  1905. /*
  1906. * Form the SG for msdu continued buffers
  1907. * QCN9000 has this support
  1908. */
  1909. if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  1910. nbuf = dp_rx_sg_create(nbuf);
  1911. next = nbuf->next;
  1912. /*
  1913. * SG error handling is not done correctly,
  1914. * drop SG frames for now.
  1915. */
  1916. qdf_nbuf_free(nbuf);
  1917. dp_info_rl("scattered msdu dropped");
  1918. nbuf = next;
  1919. if (peer)
  1920. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  1921. continue;
  1922. }
  1923. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  1924. if (wbm_err_info.reo_psh_rsn
  1925. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  1926. DP_STATS_INC(soc,
  1927. rx.err.reo_error
  1928. [wbm_err_info.reo_err_code], 1);
  1929. /* increment @pdev level */
  1930. pool_id = wbm_err_info.pool_id;
  1931. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1932. if (dp_pdev)
  1933. DP_STATS_INC(dp_pdev, err.reo_error,
  1934. 1);
  1935. switch (wbm_err_info.reo_err_code) {
  1936. /*
  1937. * Handling for packets which have NULL REO
  1938. * queue descriptor
  1939. */
  1940. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1941. pool_id = wbm_err_info.pool_id;
  1942. dp_rx_null_q_desc_handle(soc, nbuf,
  1943. rx_tlv_hdr,
  1944. pool_id, peer);
  1945. break;
  1946. /* TODO */
  1947. /* Add per error code accounting */
  1948. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1949. pool_id = wbm_err_info.pool_id;
  1950. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1951. rx_tlv_hdr)) {
  1952. peer_id =
  1953. hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1954. rx_tlv_hdr);
  1955. tid =
  1956. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  1957. }
  1958. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  1959. hal_rx_msdu_start_msdu_len_get(
  1960. rx_tlv_hdr);
  1961. nbuf->next = NULL;
  1962. dp_2k_jump_handle(soc, nbuf,
  1963. rx_tlv_hdr,
  1964. peer_id, tid);
  1965. break;
  1966. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1967. case HAL_REO_ERR_BAR_FRAME_OOR:
  1968. if (peer)
  1969. dp_rx_err_handle_bar(soc,
  1970. peer,
  1971. nbuf);
  1972. qdf_nbuf_free(nbuf);
  1973. break;
  1974. default:
  1975. dp_info_rl("Got pkt with REO ERROR: %d",
  1976. wbm_err_info.reo_err_code);
  1977. qdf_nbuf_free(nbuf);
  1978. }
  1979. }
  1980. } else if (wbm_err_info.wbm_err_src ==
  1981. HAL_RX_WBM_ERR_SRC_RXDMA) {
  1982. if (wbm_err_info.rxdma_psh_rsn
  1983. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1984. DP_STATS_INC(soc,
  1985. rx.err.rxdma_error
  1986. [wbm_err_info.rxdma_err_code], 1);
  1987. /* increment @pdev level */
  1988. pool_id = wbm_err_info.pool_id;
  1989. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1990. if (dp_pdev)
  1991. DP_STATS_INC(dp_pdev,
  1992. err.rxdma_error, 1);
  1993. switch (wbm_err_info.rxdma_err_code) {
  1994. case HAL_RXDMA_ERR_UNENCRYPTED:
  1995. case HAL_RXDMA_ERR_WIFI_PARSE:
  1996. pool_id = wbm_err_info.pool_id;
  1997. dp_rx_process_rxdma_err(soc, nbuf,
  1998. rx_tlv_hdr,
  1999. peer,
  2000. wbm_err_info.
  2001. rxdma_err_code,
  2002. pool_id);
  2003. break;
  2004. case HAL_RXDMA_ERR_TKIP_MIC:
  2005. dp_rx_process_mic_error(soc, nbuf,
  2006. rx_tlv_hdr,
  2007. peer);
  2008. if (peer)
  2009. DP_STATS_INC(peer, rx.err.mic_err, 1);
  2010. break;
  2011. case HAL_RXDMA_ERR_DECRYPT:
  2012. if (peer) {
  2013. DP_STATS_INC(peer, rx.err.
  2014. decrypt_err, 1);
  2015. qdf_nbuf_free(nbuf);
  2016. break;
  2017. }
  2018. if (!dp_handle_rxdma_decrypt_err()) {
  2019. qdf_nbuf_free(nbuf);
  2020. break;
  2021. }
  2022. pool_id = wbm_err_info.pool_id;
  2023. err_code = wbm_err_info.rxdma_err_code;
  2024. tlv_hdr = rx_tlv_hdr;
  2025. dp_rx_process_rxdma_err(soc, nbuf,
  2026. tlv_hdr, NULL,
  2027. err_code,
  2028. pool_id);
  2029. break;
  2030. default:
  2031. qdf_nbuf_free(nbuf);
  2032. dp_err_rl("RXDMA error %d",
  2033. wbm_err_info.rxdma_err_code);
  2034. }
  2035. }
  2036. } else {
  2037. /* Should not come here */
  2038. qdf_assert(0);
  2039. }
  2040. if (peer)
  2041. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  2042. nbuf = next;
  2043. }
  2044. return rx_bufs_used; /* Assume no scale factor for now */
  2045. }
  2046. /**
  2047. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  2048. *
  2049. * @soc: core DP main context
  2050. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2051. * @rx_desc: void pointer to rx descriptor
  2052. *
  2053. * Return: void
  2054. */
  2055. static void dup_desc_dbg(struct dp_soc *soc,
  2056. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2057. void *rx_desc)
  2058. {
  2059. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  2060. dp_rx_dump_info_and_assert(
  2061. soc,
  2062. soc->rx_rel_ring.hal_srng,
  2063. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  2064. rx_desc);
  2065. }
  2066. /**
  2067. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  2068. *
  2069. * @soc: core DP main context
  2070. * @mac_id: mac id which is one of 3 mac_ids
  2071. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  2072. * @head: head of descs list to be freed
  2073. * @tail: tail of decs list to be freed
  2074. * Return: number of msdu in MPDU to be popped
  2075. */
  2076. static inline uint32_t
  2077. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2078. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2079. union dp_rx_desc_list_elem_t **head,
  2080. union dp_rx_desc_list_elem_t **tail)
  2081. {
  2082. void *rx_msdu_link_desc;
  2083. qdf_nbuf_t msdu;
  2084. qdf_nbuf_t last;
  2085. struct hal_rx_msdu_list msdu_list;
  2086. uint16_t num_msdus;
  2087. struct hal_buf_info buf_info;
  2088. uint32_t rx_bufs_used = 0;
  2089. uint32_t msdu_cnt;
  2090. uint32_t i;
  2091. uint8_t push_reason;
  2092. uint8_t rxdma_error_code = 0;
  2093. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  2094. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2095. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2096. hal_rxdma_desc_t ring_desc;
  2097. struct rx_desc_pool *rx_desc_pool;
  2098. if (!pdev) {
  2099. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2100. "pdev is null for mac_id = %d", mac_id);
  2101. return rx_bufs_used;
  2102. }
  2103. msdu = 0;
  2104. last = NULL;
  2105. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  2106. &msdu_cnt);
  2107. push_reason =
  2108. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  2109. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  2110. rxdma_error_code =
  2111. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  2112. }
  2113. do {
  2114. rx_msdu_link_desc =
  2115. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2116. qdf_assert_always(rx_msdu_link_desc);
  2117. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2118. &msdu_list, &num_msdus);
  2119. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2120. /* if the msdus belongs to NSS offloaded radio &&
  2121. * the rbm is not SW1_BM then return the msdu_link
  2122. * descriptor without freeing the msdus (nbufs). let
  2123. * these buffers be given to NSS completion ring for
  2124. * NSS to free them.
  2125. * else iterate through the msdu link desc list and
  2126. * free each msdu in the list.
  2127. */
  2128. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  2129. wlan_cfg_get_dp_pdev_nss_enabled(
  2130. pdev->wlan_cfg_ctx))
  2131. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  2132. else {
  2133. for (i = 0; i < num_msdus; i++) {
  2134. struct dp_rx_desc *rx_desc =
  2135. dp_rx_cookie_2_va_rxdma_buf(soc,
  2136. msdu_list.sw_cookie[i]);
  2137. qdf_assert_always(rx_desc);
  2138. msdu = rx_desc->nbuf;
  2139. /*
  2140. * this is a unlikely scenario
  2141. * where the host is reaping
  2142. * a descriptor which
  2143. * it already reaped just a while ago
  2144. * but is yet to replenish
  2145. * it back to HW.
  2146. * In this case host will dump
  2147. * the last 128 descriptors
  2148. * including the software descriptor
  2149. * rx_desc and assert.
  2150. */
  2151. ring_desc = rxdma_dst_ring_desc;
  2152. if (qdf_unlikely(!rx_desc->in_use)) {
  2153. dup_desc_dbg(soc,
  2154. ring_desc,
  2155. rx_desc);
  2156. continue;
  2157. }
  2158. rx_desc_pool = &soc->
  2159. rx_desc_buf[rx_desc->pool_id];
  2160. dp_ipa_handle_rx_buf_smmu_mapping(
  2161. soc, msdu,
  2162. rx_desc_pool->buf_size,
  2163. false);
  2164. qdf_nbuf_unmap_nbytes_single(
  2165. soc->osdev, msdu,
  2166. QDF_DMA_FROM_DEVICE,
  2167. rx_desc_pool->buf_size);
  2168. rx_desc->unmapped = 1;
  2169. QDF_TRACE(QDF_MODULE_ID_DP,
  2170. QDF_TRACE_LEVEL_DEBUG,
  2171. "[%s][%d] msdu_nbuf=%pK ",
  2172. __func__, __LINE__, msdu);
  2173. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2174. rx_desc->pool_id);
  2175. rx_bufs_used++;
  2176. dp_rx_add_to_free_desc_list(head,
  2177. tail, rx_desc);
  2178. }
  2179. }
  2180. } else {
  2181. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  2182. }
  2183. /*
  2184. * Store the current link buffer into to the local structure
  2185. * to be used for release purpose.
  2186. */
  2187. hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
  2188. buf_info.sw_cookie, buf_info.rbm);
  2189. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
  2190. dp_rx_link_desc_return_by_addr(soc,
  2191. (hal_buff_addrinfo_t)
  2192. rx_link_buf_info,
  2193. bm_action);
  2194. } while (buf_info.paddr);
  2195. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  2196. if (pdev)
  2197. DP_STATS_INC(pdev, err.rxdma_error, 1);
  2198. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  2199. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2200. "Packet received with Decrypt error");
  2201. }
  2202. return rx_bufs_used;
  2203. }
  2204. uint32_t
  2205. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2206. uint32_t mac_id, uint32_t quota)
  2207. {
  2208. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  2209. hal_rxdma_desc_t rxdma_dst_ring_desc;
  2210. hal_soc_handle_t hal_soc;
  2211. void *err_dst_srng;
  2212. union dp_rx_desc_list_elem_t *head = NULL;
  2213. union dp_rx_desc_list_elem_t *tail = NULL;
  2214. struct dp_srng *dp_rxdma_srng;
  2215. struct rx_desc_pool *rx_desc_pool;
  2216. uint32_t work_done = 0;
  2217. uint32_t rx_bufs_used = 0;
  2218. if (!pdev)
  2219. return 0;
  2220. err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
  2221. if (!err_dst_srng) {
  2222. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2223. "%s %d : HAL Monitor Destination Ring Init \
  2224. Failed -- %pK",
  2225. __func__, __LINE__, err_dst_srng);
  2226. return 0;
  2227. }
  2228. hal_soc = soc->hal_soc;
  2229. qdf_assert(hal_soc);
  2230. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  2231. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2232. "%s %d : HAL Monitor Destination Ring Init \
  2233. Failed -- %pK",
  2234. __func__, __LINE__, err_dst_srng);
  2235. return 0;
  2236. }
  2237. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  2238. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  2239. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  2240. rxdma_dst_ring_desc,
  2241. &head, &tail);
  2242. }
  2243. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  2244. if (rx_bufs_used) {
  2245. if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
  2246. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2247. else
  2248. dp_rxdma_srng = &soc->rx_refill_buf_ring[pdev->lmac_id];
  2249. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2250. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2251. rx_desc_pool, rx_bufs_used, &head, &tail);
  2252. work_done += rx_bufs_used;
  2253. }
  2254. return work_done;
  2255. }
  2256. static inline uint32_t
  2257. dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  2258. hal_rxdma_desc_t rxdma_dst_ring_desc,
  2259. union dp_rx_desc_list_elem_t **head,
  2260. union dp_rx_desc_list_elem_t **tail)
  2261. {
  2262. void *rx_msdu_link_desc;
  2263. qdf_nbuf_t msdu;
  2264. qdf_nbuf_t last;
  2265. struct hal_rx_msdu_list msdu_list;
  2266. uint16_t num_msdus;
  2267. struct hal_buf_info buf_info;
  2268. uint32_t rx_bufs_used = 0, msdu_cnt, i;
  2269. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  2270. msdu = 0;
  2271. last = NULL;
  2272. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  2273. &msdu_cnt);
  2274. do {
  2275. rx_msdu_link_desc =
  2276. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  2277. if (!rx_msdu_link_desc) {
  2278. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
  2279. break;
  2280. }
  2281. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  2282. &msdu_list, &num_msdus);
  2283. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  2284. for (i = 0; i < num_msdus; i++) {
  2285. struct dp_rx_desc *rx_desc =
  2286. dp_rx_cookie_2_va_rxdma_buf(
  2287. soc,
  2288. msdu_list.sw_cookie[i]);
  2289. qdf_assert_always(rx_desc);
  2290. msdu = rx_desc->nbuf;
  2291. qdf_nbuf_unmap_single(soc->osdev, msdu,
  2292. QDF_DMA_FROM_DEVICE);
  2293. dp_rx_buffer_pool_nbuf_free(soc, msdu,
  2294. rx_desc->pool_id);
  2295. rx_bufs_used++;
  2296. dp_rx_add_to_free_desc_list(head,
  2297. tail, rx_desc);
  2298. }
  2299. }
  2300. /*
  2301. * Store the current link buffer into to the local structure
  2302. * to be used for release purpose.
  2303. */
  2304. hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
  2305. buf_info.sw_cookie, buf_info.rbm);
  2306. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
  2307. dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
  2308. rx_link_buf_info,
  2309. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  2310. } while (buf_info.paddr);
  2311. return rx_bufs_used;
  2312. }
  2313. /*
  2314. *
  2315. * dp_handle_wbm_internal_error() - handles wbm_internal_error case
  2316. *
  2317. * @soc: core DP main context
  2318. * @hal_desc: hal descriptor
  2319. * @buf_type: indicates if the buffer is of type link disc or msdu
  2320. * Return: None
  2321. *
  2322. * wbm_internal_error is seen in following scenarios :
  2323. *
  2324. * 1. Null pointers detected in WBM_RELEASE_RING descriptors
  2325. * 2. Null pointers detected during delinking process
  2326. *
  2327. * Some null pointer cases:
  2328. *
  2329. * a. MSDU buffer pointer is NULL
  2330. * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
  2331. * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
  2332. */
  2333. void
  2334. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  2335. uint32_t buf_type)
  2336. {
  2337. struct hal_buf_info buf_info = {0};
  2338. struct dp_rx_desc *rx_desc = NULL;
  2339. struct rx_desc_pool *rx_desc_pool;
  2340. uint32_t rx_buf_cookie;
  2341. uint32_t rx_bufs_reaped = 0;
  2342. union dp_rx_desc_list_elem_t *head = NULL;
  2343. union dp_rx_desc_list_elem_t *tail = NULL;
  2344. uint8_t pool_id;
  2345. hal_rx_reo_buf_paddr_get(hal_desc, &buf_info);
  2346. if (!buf_info.paddr) {
  2347. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
  2348. return;
  2349. }
  2350. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc);
  2351. pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie);
  2352. if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
  2353. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
  2354. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  2355. if (rx_desc && rx_desc->nbuf) {
  2356. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  2357. dp_ipa_handle_rx_buf_smmu_mapping(
  2358. soc, rx_desc->nbuf,
  2359. rx_desc_pool->buf_size,
  2360. false);
  2361. qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
  2362. QDF_DMA_FROM_DEVICE,
  2363. rx_desc_pool->buf_size);
  2364. rx_desc->unmapped = 1;
  2365. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  2366. rx_desc->pool_id);
  2367. dp_rx_add_to_free_desc_list(&head,
  2368. &tail,
  2369. rx_desc);
  2370. rx_bufs_reaped++;
  2371. }
  2372. } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
  2373. rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id,
  2374. hal_desc,
  2375. &head, &tail);
  2376. }
  2377. if (rx_bufs_reaped) {
  2378. struct rx_desc_pool *rx_desc_pool;
  2379. struct dp_srng *dp_rxdma_srng;
  2380. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
  2381. dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
  2382. rx_desc_pool = &soc->rx_desc_buf[pool_id];
  2383. dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng,
  2384. rx_desc_pool,
  2385. rx_bufs_reaped,
  2386. &head, &tail);
  2387. }
  2388. }