dp_rx_err.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "dp_internal.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #include "dp_rx_defrag.h"
  27. #ifdef FEATURE_WDS
  28. #include "dp_txrx_wds.h"
  29. #endif
  30. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  31. #include "qdf_net_types.h"
  32. /* Max buffer in invalid peer SG list*/
  33. #define DP_MAX_INVALID_BUFFERS 10
  34. /**
  35. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  36. * back on same vap or a different vap.
  37. *
  38. * @soc: core DP main context
  39. * @peer: dp peer handler
  40. * @rx_tlv_hdr: start of the rx TLV header
  41. * @nbuf: pkt buffer
  42. *
  43. * Return: bool (true if it is a looped back pkt else false)
  44. *
  45. */
  46. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  47. struct dp_peer *peer,
  48. uint8_t *rx_tlv_hdr,
  49. qdf_nbuf_t nbuf)
  50. {
  51. struct dp_vdev *vdev = peer->vdev;
  52. struct dp_ast_entry *ase = NULL;
  53. uint16_t sa_idx = 0;
  54. uint8_t *data;
  55. /*
  56. * Multicast Echo Check is required only if vdev is STA and
  57. * received pkt is a multicast/broadcast pkt. otherwise
  58. * skip the MEC check.
  59. */
  60. if (vdev->opmode != wlan_op_mode_sta)
  61. return false;
  62. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  63. return false;
  64. data = qdf_nbuf_data(nbuf);
  65. /*
  66. * if the received pkts src mac addr matches with vdev
  67. * mac address then drop the pkt as it is looped back
  68. */
  69. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  70. vdev->mac_addr.raw,
  71. QDF_MAC_ADDR_SIZE)))
  72. return true;
  73. /*
  74. * In case of qwrap isolation mode, donot drop loopback packets.
  75. * In isolation mode, all packets from the wired stations need to go
  76. * to rootap and loop back to reach the wireless stations and
  77. * vice-versa.
  78. */
  79. if (qdf_unlikely(vdev->isolation_vdev))
  80. return false;
  81. /* if the received pkts src mac addr matches with the
  82. * wired PCs MAC addr which is behind the STA or with
  83. * wireless STAs MAC addr which are behind the Repeater,
  84. * then drop the pkt as it is looped back
  85. */
  86. qdf_spin_lock_bh(&soc->ast_lock);
  87. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  88. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  89. if ((sa_idx < 0) ||
  90. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  91. qdf_spin_unlock_bh(&soc->ast_lock);
  92. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  93. "invalid sa_idx: %d", sa_idx);
  94. qdf_assert_always(0);
  95. }
  96. ase = soc->ast_table[sa_idx];
  97. if (!ase) {
  98. /* We do not get a peer map event for STA and without
  99. * this event we don't know what is STA's sa_idx.
  100. * For this reason the AST is still not associated to
  101. * any index postion in ast_table.
  102. * In these kind of scenarios where sa is valid but
  103. * ast is not in ast_table, we use the below API to get
  104. * AST entry for STA's own mac_address.
  105. */
  106. ase = dp_peer_ast_list_find(soc, peer,
  107. &data[QDF_MAC_ADDR_SIZE]);
  108. if (ase) {
  109. ase->ast_idx = sa_idx;
  110. soc->ast_table[sa_idx] = ase;
  111. ase->is_mapped = TRUE;
  112. }
  113. }
  114. } else {
  115. ase = dp_peer_ast_hash_find_by_pdevid(soc,
  116. &data[QDF_MAC_ADDR_SIZE],
  117. vdev->pdev->pdev_id);
  118. }
  119. if (ase) {
  120. if (ase->pdev_id != vdev->pdev->pdev_id) {
  121. qdf_spin_unlock_bh(&soc->ast_lock);
  122. QDF_TRACE(QDF_MODULE_ID_DP,
  123. QDF_TRACE_LEVEL_INFO,
  124. "Detected DBDC Root AP %pM, %d %d",
  125. &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
  126. ase->pdev_id);
  127. return false;
  128. }
  129. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  130. (ase->peer != peer)) {
  131. qdf_spin_unlock_bh(&soc->ast_lock);
  132. QDF_TRACE(QDF_MODULE_ID_DP,
  133. QDF_TRACE_LEVEL_INFO,
  134. "received pkt with same src mac %pM",
  135. &data[QDF_MAC_ADDR_SIZE]);
  136. return true;
  137. }
  138. }
  139. qdf_spin_unlock_bh(&soc->ast_lock);
  140. return false;
  141. }
  142. /**
  143. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  144. * (WBM) by address
  145. *
  146. * @soc: core DP main context
  147. * @link_desc_addr: link descriptor addr
  148. *
  149. * Return: QDF_STATUS
  150. */
  151. QDF_STATUS
  152. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  153. hal_buff_addrinfo_t link_desc_addr,
  154. uint8_t bm_action)
  155. {
  156. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  157. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  158. hal_soc_handle_t hal_soc = soc->hal_soc;
  159. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  160. void *src_srng_desc;
  161. if (!wbm_rel_srng) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. "WBM RELEASE RING not initialized");
  164. return status;
  165. }
  166. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  167. /* TODO */
  168. /*
  169. * Need API to convert from hal_ring pointer to
  170. * Ring Type / Ring Id combo
  171. */
  172. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  173. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  174. wbm_rel_srng);
  175. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  176. goto done;
  177. }
  178. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  179. if (qdf_likely(src_srng_desc)) {
  180. /* Return link descriptor through WBM ring (SW2WBM)*/
  181. hal_rx_msdu_link_desc_set(hal_soc,
  182. src_srng_desc, link_desc_addr, bm_action);
  183. status = QDF_STATUS_SUCCESS;
  184. } else {
  185. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  186. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  187. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  189. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  190. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  191. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  192. }
  193. done:
  194. hal_srng_access_end(hal_soc, wbm_rel_srng);
  195. return status;
  196. }
  197. /**
  198. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  199. * (WBM), following error handling
  200. *
  201. * @soc: core DP main context
  202. * @ring_desc: opaque pointer to the REO error ring descriptor
  203. *
  204. * Return: QDF_STATUS
  205. */
  206. QDF_STATUS
  207. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  208. uint8_t bm_action)
  209. {
  210. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  211. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  212. }
  213. /**
  214. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  215. *
  216. * @soc: core txrx main context
  217. * @ring_desc: opaque pointer to the REO error ring descriptor
  218. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  219. * @head: head of the local descriptor free-list
  220. * @tail: tail of the local descriptor free-list
  221. * @quota: No. of units (packets) that can be serviced in one shot.
  222. *
  223. * This function is used to drop all MSDU in an MPDU
  224. *
  225. * Return: uint32_t: No. of elements processed
  226. */
  227. static uint32_t
  228. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  229. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  230. uint8_t *mac_id,
  231. uint32_t quota)
  232. {
  233. uint32_t rx_bufs_used = 0;
  234. void *link_desc_va;
  235. struct hal_buf_info buf_info;
  236. struct dp_pdev *pdev;
  237. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  238. int i;
  239. uint8_t *rx_tlv_hdr;
  240. uint32_t tid;
  241. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  242. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  243. /* No UNMAP required -- this is "malloc_consistent" memory */
  244. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  245. &mpdu_desc_info->msdu_count);
  246. for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
  247. struct dp_rx_desc *rx_desc =
  248. dp_rx_cookie_2_va_rxdma_buf(soc,
  249. msdu_list.sw_cookie[i]);
  250. qdf_assert_always(rx_desc);
  251. /* all buffers from a MSDU link link belong to same pdev */
  252. *mac_id = rx_desc->pool_id;
  253. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  254. if (!dp_rx_desc_check_magic(rx_desc)) {
  255. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  256. FL("Invalid rx_desc cookie=%d"),
  257. msdu_list.sw_cookie[i]);
  258. return rx_bufs_used;
  259. }
  260. qdf_nbuf_unmap_single(soc->osdev,
  261. rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
  262. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  263. rx_bufs_used++;
  264. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  265. rx_desc->rx_buf_start);
  266. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  267. "Packet received with PN error for tid :%d", tid);
  268. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  269. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  270. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  271. /* Just free the buffers */
  272. qdf_nbuf_free(rx_desc->nbuf);
  273. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  274. &pdev->free_list_tail, rx_desc);
  275. }
  276. /* Return link descriptor through WBM ring (SW2WBM)*/
  277. dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  278. return rx_bufs_used;
  279. }
  280. /**
  281. * dp_rx_pn_error_handle() - Handles PN check errors
  282. *
  283. * @soc: core txrx main context
  284. * @ring_desc: opaque pointer to the REO error ring descriptor
  285. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  286. * @head: head of the local descriptor free-list
  287. * @tail: tail of the local descriptor free-list
  288. * @quota: No. of units (packets) that can be serviced in one shot.
  289. *
  290. * This function implements PN error handling
  291. * If the peer is configured to ignore the PN check errors
  292. * or if DP feels, that this frame is still OK, the frame can be
  293. * re-injected back to REO to use some of the other features
  294. * of REO e.g. duplicate detection/routing to other cores
  295. *
  296. * Return: uint32_t: No. of elements processed
  297. */
  298. static uint32_t
  299. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  300. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  301. uint8_t *mac_id,
  302. uint32_t quota)
  303. {
  304. uint16_t peer_id;
  305. uint32_t rx_bufs_used = 0;
  306. struct dp_peer *peer;
  307. bool peer_pn_policy = false;
  308. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  309. mpdu_desc_info->peer_meta_data);
  310. peer = dp_peer_find_by_id(soc, peer_id);
  311. if (qdf_likely(peer)) {
  312. /*
  313. * TODO: Check for peer specific policies & set peer_pn_policy
  314. */
  315. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  316. "discard rx due to PN error for peer %pK %pM",
  317. peer, peer->mac_addr.raw);
  318. dp_peer_unref_del_find_by_id(peer);
  319. }
  320. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  321. "Packet received with PN error");
  322. /* No peer PN policy -- definitely drop */
  323. if (!peer_pn_policy)
  324. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  325. mpdu_desc_info,
  326. mac_id, quota);
  327. return rx_bufs_used;
  328. }
  329. /**
  330. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  331. *
  332. * @soc: core txrx main context
  333. * @ring_desc: opaque pointer to the REO error ring descriptor
  334. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  335. * @head: head of the local descriptor free-list
  336. * @tail: tail of the local descriptor free-list
  337. * @quota: No. of units (packets) that can be serviced in one shot.
  338. *
  339. * This function implements the error handling when sequence number
  340. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  341. * need to be handled:
  342. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  343. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  344. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  345. * For case B), the frame is normally dropped, no more action is taken
  346. *
  347. * Return: uint32_t: No. of elements processed
  348. */
  349. static uint32_t
  350. dp_rx_2k_jump_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  351. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  352. uint8_t *mac_id, uint32_t quota)
  353. {
  354. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  355. mac_id, quota);
  356. }
  357. #ifdef DP_INVALID_PEER_ASSERT
  358. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
  359. do { \
  360. qdf_assert_always(!(head)); \
  361. qdf_assert_always(!(tail)); \
  362. } while (0)
  363. #else
  364. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
  365. #endif
  366. /**
  367. * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
  368. * to pdev invalid peer list
  369. *
  370. * @soc: core DP main context
  371. * @nbuf: Buffer pointer
  372. * @rx_tlv_hdr: start of rx tlv header
  373. * @mac_id: mac id
  374. *
  375. * Return: bool: true for last msdu of mpdu
  376. */
  377. static bool
  378. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
  379. uint8_t *rx_tlv_hdr, uint8_t mac_id)
  380. {
  381. bool mpdu_done = false;
  382. qdf_nbuf_t curr_nbuf = NULL;
  383. qdf_nbuf_t tmp_nbuf = NULL;
  384. /* TODO: Currently only single radio is supported, hence
  385. * pdev hard coded to '0' index
  386. */
  387. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  388. /* if invalid peer SG list has max values free the buffers in list
  389. * and treat current buffer as start of list
  390. *
  391. * current logic to detect the last buffer from attn_tlv is not reliable
  392. * in OFDMA UL scenario hence add max buffers check to avoid list pile
  393. * up
  394. */
  395. if (!dp_pdev->first_nbuf ||
  396. (dp_pdev->invalid_peer_head_msdu &&
  397. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
  398. (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
  399. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  400. dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc,
  401. rx_tlv_hdr);
  402. dp_pdev->first_nbuf = true;
  403. /* If the new nbuf received is the first msdu of the
  404. * amsdu and there are msdus in the invalid peer msdu
  405. * list, then let us free all the msdus of the invalid
  406. * peer msdu list.
  407. * This scenario can happen when we start receiving
  408. * new a-msdu even before the previous a-msdu is completely
  409. * received.
  410. */
  411. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  412. while (curr_nbuf) {
  413. tmp_nbuf = curr_nbuf->next;
  414. qdf_nbuf_free(curr_nbuf);
  415. curr_nbuf = tmp_nbuf;
  416. }
  417. dp_pdev->invalid_peer_head_msdu = NULL;
  418. dp_pdev->invalid_peer_tail_msdu = NULL;
  419. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
  420. &(dp_pdev->ppdu_info.rx_status));
  421. }
  422. if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
  423. hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  424. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  425. qdf_assert_always(dp_pdev->first_nbuf == true);
  426. dp_pdev->first_nbuf = false;
  427. mpdu_done = true;
  428. }
  429. /*
  430. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  431. * should be NULL here, add the checking for debugging purpose
  432. * in case some corner case.
  433. */
  434. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  435. dp_pdev->invalid_peer_tail_msdu);
  436. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  437. dp_pdev->invalid_peer_tail_msdu,
  438. nbuf);
  439. return mpdu_done;
  440. }
  441. static
  442. void dp_rx_wbm_err_handle_bar(struct dp_soc *soc,
  443. struct dp_peer *peer,
  444. qdf_nbuf_t nbuf)
  445. {
  446. uint8_t *rx_tlv_hdr;
  447. unsigned char type, subtype;
  448. uint16_t start_seq_num;
  449. uint32_t tid;
  450. struct ieee80211_frame_bar *bar;
  451. /*
  452. * 1. Is this a BAR frame. If not Discard it.
  453. * 2. If it is, get the peer id, tid, ssn
  454. * 2a Do a tid update
  455. */
  456. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  457. bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV);
  458. type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
  459. subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
  460. if (!(type == IEEE80211_FC0_TYPE_CTL &&
  461. subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
  462. dp_err_rl("Not a BAR frame!");
  463. return;
  464. }
  465. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  466. qdf_assert_always(tid < DP_MAX_TIDS);
  467. start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
  468. dp_info_rl("tid %u window_size %u start_seq_num %u",
  469. tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
  470. dp_rx_tid_update_wifi3(peer, tid,
  471. peer->rx_tid[tid].ba_win_size,
  472. start_seq_num);
  473. }
  474. /**
  475. * dp_2k_jump_handle() - Function to handle 2k jump exception
  476. * on WBM ring
  477. *
  478. * @soc: core DP main context
  479. * @nbuf: buffer pointer
  480. * @rx_tlv_hdr: start of rx tlv header
  481. * @peer_id: peer id of first msdu
  482. * @tid: Tid for which exception occurred
  483. *
  484. * This function handles 2k jump violations arising out
  485. * of receiving aggregates in non BA case. This typically
  486. * may happen if aggregates are received on a QOS enabled TID
  487. * while Rx window size is still initialized to value of 2. Or
  488. * it may also happen if negotiated window size is 1 but peer
  489. * sends aggregates.
  490. *
  491. */
  492. void
  493. dp_2k_jump_handle(struct dp_soc *soc,
  494. qdf_nbuf_t nbuf,
  495. uint8_t *rx_tlv_hdr,
  496. uint16_t peer_id,
  497. uint8_t tid)
  498. {
  499. uint32_t ppdu_id;
  500. struct dp_peer *peer = NULL;
  501. struct dp_rx_tid *rx_tid = NULL;
  502. peer = dp_peer_find_by_id(soc, peer_id);
  503. if (!peer || peer->delete_in_progress) {
  504. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  505. "peer not found");
  506. goto free_nbuf;
  507. }
  508. rx_tid = &peer->rx_tid[tid];
  509. if (qdf_unlikely(!rx_tid)) {
  510. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  511. "rx_tid is NULL!!");
  512. goto free_nbuf;
  513. }
  514. qdf_spin_lock_bh(&rx_tid->tid_lock);
  515. ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr);
  516. /*
  517. * If BA session is created and a non-aggregate packet is
  518. * landing here then the issue is with sequence number mismatch.
  519. * Proceed with delba even in that case
  520. */
  521. if (rx_tid->ppdu_id_2k != ppdu_id &&
  522. rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  523. rx_tid->ppdu_id_2k = ppdu_id;
  524. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  525. goto free_nbuf;
  526. }
  527. if (!rx_tid->delba_tx_status) {
  528. rx_tid->delba_tx_retry++;
  529. rx_tid->delba_tx_status = 1;
  530. rx_tid->delba_rcode =
  531. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  532. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  533. if (soc->cdp_soc.ol_ops->send_delba)
  534. soc->cdp_soc.ol_ops->send_delba(
  535. peer->vdev->pdev->soc->ctrl_psoc,
  536. peer->vdev->vdev_id,
  537. peer->mac_addr.raw,
  538. tid,
  539. rx_tid->delba_rcode);
  540. } else {
  541. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  542. }
  543. free_nbuf:
  544. if (peer)
  545. dp_peer_unref_del_find_by_id(peer);
  546. qdf_nbuf_free(nbuf);
  547. return;
  548. }
  549. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  550. defined(QCA_WIFI_QCA6750)
  551. /**
  552. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  553. * @soc: pointer to dp_soc struct
  554. * @pool_id: Pool id to find dp_pdev
  555. * @rx_tlv_hdr: TLV header of received packet
  556. * @nbuf: SKB
  557. *
  558. * In certain types of packets if peer_id is not correct then
  559. * driver may not be able find. Try finding peer by addr_2 of
  560. * received MPDU. If you find the peer then most likely sw_peer_id &
  561. * ast_idx is corrupted.
  562. *
  563. * Return: True if you find the peer by addr_2 of received MPDU else false
  564. */
  565. static bool
  566. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  567. uint8_t pool_id,
  568. uint8_t *rx_tlv_hdr,
  569. qdf_nbuf_t nbuf)
  570. {
  571. struct dp_peer *peer = NULL;
  572. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  573. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  574. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  575. /*
  576. * WAR- In certain types of packets if peer_id is not correct then
  577. * driver may not be able find. Try finding peer by addr_2 of
  578. * received MPDU
  579. */
  580. if (wh)
  581. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
  582. wh->i_addr2);
  583. if (peer) {
  584. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  585. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  586. QDF_TRACE_LEVEL_DEBUG);
  587. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  588. 1, qdf_nbuf_len(nbuf));
  589. qdf_nbuf_free(nbuf);
  590. return true;
  591. }
  592. return false;
  593. }
  594. /**
  595. * dp_rx_check_pkt_len() - Check for pktlen validity
  596. * @soc: DP SOC context
  597. * @pkt_len: computed length of the pkt from caller in bytes
  598. *
  599. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  600. *
  601. */
  602. static inline
  603. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  604. {
  605. if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
  606. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  607. 1, pkt_len);
  608. return true;
  609. } else {
  610. return false;
  611. }
  612. }
  613. #else
  614. static inline bool
  615. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  616. uint8_t pool_id,
  617. uint8_t *rx_tlv_hdr,
  618. qdf_nbuf_t nbuf)
  619. {
  620. return false;
  621. }
  622. static inline
  623. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  624. {
  625. return false;
  626. }
  627. #endif
  628. /**
  629. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  630. * descriptor violation on either a
  631. * REO or WBM ring
  632. *
  633. * @soc: core DP main context
  634. * @nbuf: buffer pointer
  635. * @rx_tlv_hdr: start of rx tlv header
  636. * @pool_id: mac id
  637. * @peer: peer handle
  638. *
  639. * This function handles NULL queue descriptor violations arising out
  640. * a missing REO queue for a given peer or a given TID. This typically
  641. * may happen if a packet is received on a QOS enabled TID before the
  642. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  643. * it may also happen for MC/BC frames if they are not routed to the
  644. * non-QOS TID queue, in the absence of any other default TID queue.
  645. * This error can show up both in a REO destination or WBM release ring.
  646. *
  647. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  648. * if nbuf could not be handled or dropped.
  649. */
  650. static QDF_STATUS
  651. dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  652. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  653. struct dp_peer *peer)
  654. {
  655. uint32_t pkt_len;
  656. uint16_t msdu_len;
  657. struct dp_vdev *vdev;
  658. uint8_t tid;
  659. qdf_ether_header_t *eh;
  660. struct hal_rx_msdu_metadata msdu_metadata;
  661. qdf_nbuf_set_rx_chfrag_start(nbuf,
  662. hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  663. rx_tlv_hdr));
  664. qdf_nbuf_set_rx_chfrag_end(nbuf,
  665. hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
  666. rx_tlv_hdr));
  667. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  668. rx_tlv_hdr));
  669. qdf_nbuf_set_da_valid(nbuf,
  670. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  671. rx_tlv_hdr));
  672. qdf_nbuf_set_sa_valid(nbuf,
  673. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  674. rx_tlv_hdr));
  675. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  676. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  677. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN;
  678. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  679. if (dp_rx_check_pkt_len(soc, pkt_len))
  680. goto drop_nbuf;
  681. /* Set length in nbuf */
  682. qdf_nbuf_set_pktlen(
  683. nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
  684. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  685. }
  686. /*
  687. * Check if DMA completed -- msdu_done is the last bit
  688. * to be written
  689. */
  690. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  691. dp_err_rl("MSDU DONE failure");
  692. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  693. QDF_TRACE_LEVEL_INFO);
  694. qdf_assert(0);
  695. }
  696. if (!peer &&
  697. dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
  698. rx_tlv_hdr, nbuf))
  699. return QDF_STATUS_E_FAILURE;
  700. if (!peer) {
  701. bool mpdu_done = false;
  702. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  703. dp_err_rl("peer is NULL");
  704. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  705. qdf_nbuf_len(nbuf));
  706. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  707. /* Trigger invalid peer handler wrapper */
  708. dp_rx_process_invalid_peer_wrapper(soc,
  709. pdev->invalid_peer_head_msdu,
  710. mpdu_done, pool_id);
  711. if (mpdu_done) {
  712. pdev->invalid_peer_head_msdu = NULL;
  713. pdev->invalid_peer_tail_msdu = NULL;
  714. }
  715. return QDF_STATUS_E_FAILURE;
  716. }
  717. vdev = peer->vdev;
  718. if (!vdev) {
  719. dp_err_rl("Null vdev!");
  720. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  721. goto drop_nbuf;
  722. }
  723. /*
  724. * Advance the packet start pointer by total size of
  725. * pre-header TLV's
  726. */
  727. if (qdf_nbuf_is_frag(nbuf))
  728. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  729. else
  730. qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
  731. RX_PKT_TLVS_LEN));
  732. dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
  733. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  734. /* this is a looped back MCBC pkt, drop it */
  735. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  736. goto drop_nbuf;
  737. }
  738. /*
  739. * In qwrap mode if the received packet matches with any of the vdev
  740. * mac addresses, drop it. Donot receive multicast packets originated
  741. * from any proxysta.
  742. */
  743. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  744. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  745. goto drop_nbuf;
  746. }
  747. if (qdf_unlikely((peer->nawds_enabled == true) &&
  748. hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  749. rx_tlv_hdr))) {
  750. dp_err_rl("free buffer for multicast packet");
  751. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  752. goto drop_nbuf;
  753. }
  754. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
  755. dp_err_rl("mcast Policy Check Drop pkt");
  756. goto drop_nbuf;
  757. }
  758. /* WDS Source Port Learning */
  759. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  760. vdev->wds_enabled))
  761. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf,
  762. msdu_metadata);
  763. if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
  764. tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
  765. if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
  766. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  767. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  768. }
  769. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  770. qdf_nbuf_set_next(nbuf, NULL);
  771. dp_rx_deliver_raw(vdev, nbuf, peer);
  772. } else {
  773. if (vdev->osif_rx) {
  774. qdf_nbuf_set_next(nbuf, NULL);
  775. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  776. qdf_nbuf_len(nbuf));
  777. /*
  778. * Update the protocol tag in SKB based on
  779. * CCE metadata
  780. */
  781. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  782. EXCEPTION_DEST_RING_ID,
  783. true, true);
  784. /* Update the flow tag in SKB based on FSE metadata */
  785. dp_rx_update_flow_tag(soc, vdev, nbuf,
  786. rx_tlv_hdr, true);
  787. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
  788. soc->hal_soc, rx_tlv_hdr) &&
  789. (vdev->rx_decap_type ==
  790. htt_cmn_pkt_type_ethernet))) {
  791. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  792. DP_STATS_INC_PKT(peer, rx.multicast, 1,
  793. qdf_nbuf_len(nbuf));
  794. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  795. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  796. qdf_nbuf_len(nbuf));
  797. }
  798. }
  799. vdev->osif_rx(vdev->osif_vdev, nbuf);
  800. } else {
  801. dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
  802. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  803. goto drop_nbuf;
  804. }
  805. }
  806. return QDF_STATUS_SUCCESS;
  807. drop_nbuf:
  808. qdf_nbuf_free(nbuf);
  809. return QDF_STATUS_E_FAILURE;
  810. }
  811. /**
  812. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  813. * frames to OS or wifi parse errors.
  814. * @soc: core DP main context
  815. * @nbuf: buffer pointer
  816. * @rx_tlv_hdr: start of rx tlv header
  817. * @peer: peer reference
  818. * @err_code: rxdma err code
  819. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  820. * pool_id has same mapping)
  821. *
  822. * Return: None
  823. */
  824. void
  825. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  826. uint8_t *rx_tlv_hdr, struct dp_peer *peer,
  827. uint8_t err_code, uint8_t mac_id)
  828. {
  829. uint32_t pkt_len, l2_hdr_offset;
  830. uint16_t msdu_len;
  831. struct dp_vdev *vdev;
  832. qdf_ether_header_t *eh;
  833. bool is_broadcast;
  834. /*
  835. * Check if DMA completed -- msdu_done is the last bit
  836. * to be written
  837. */
  838. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  839. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  840. FL("MSDU DONE failure"));
  841. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  842. QDF_TRACE_LEVEL_INFO);
  843. qdf_assert(0);
  844. }
  845. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  846. rx_tlv_hdr);
  847. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  848. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  849. if (dp_rx_check_pkt_len(soc, pkt_len)) {
  850. /* Drop & free packet */
  851. qdf_nbuf_free(nbuf);
  852. return;
  853. }
  854. /* Set length in nbuf */
  855. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  856. qdf_nbuf_set_next(nbuf, NULL);
  857. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  858. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  859. if (!peer) {
  860. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
  861. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  862. qdf_nbuf_len(nbuf));
  863. /* Trigger invalid peer handler wrapper */
  864. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  865. return;
  866. }
  867. vdev = peer->vdev;
  868. if (!vdev) {
  869. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  870. FL("INVALID vdev %pK OR osif_rx"), vdev);
  871. /* Drop & free packet */
  872. qdf_nbuf_free(nbuf);
  873. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  874. return;
  875. }
  876. /*
  877. * Advance the packet start pointer by total size of
  878. * pre-header TLV's
  879. */
  880. dp_rx_skip_tlvs(nbuf, l2_hdr_offset);
  881. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  882. uint8_t *pkt_type;
  883. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  884. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  885. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  886. htons(QDF_LLC_STP)) {
  887. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  888. goto process_mesh;
  889. } else {
  890. goto process_rx;
  891. }
  892. }
  893. }
  894. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  895. goto process_mesh;
  896. /*
  897. * WAPI cert AP sends rekey frames as unencrypted.
  898. * Thus RXDMA will report unencrypted frame error.
  899. * To pass WAPI cert case, SW needs to pass unencrypted
  900. * rekey frame to stack.
  901. */
  902. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  903. goto process_rx;
  904. }
  905. /*
  906. * In dynamic WEP case rekey frames are not encrypted
  907. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  908. * key install is already done
  909. */
  910. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  911. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  912. goto process_rx;
  913. process_mesh:
  914. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  915. qdf_nbuf_free(nbuf);
  916. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  917. return;
  918. }
  919. if (vdev->mesh_vdev) {
  920. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  921. == QDF_STATUS_SUCCESS) {
  922. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  923. FL("mesh pkt filtered"));
  924. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  925. qdf_nbuf_free(nbuf);
  926. return;
  927. }
  928. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  929. }
  930. process_rx:
  931. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  932. rx_tlv_hdr) &&
  933. (vdev->rx_decap_type ==
  934. htt_cmn_pkt_type_ethernet))) {
  935. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  936. is_broadcast = (QDF_IS_ADDR_BROADCAST
  937. (eh->ether_dhost)) ? 1 : 0 ;
  938. DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
  939. if (is_broadcast) {
  940. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  941. qdf_nbuf_len(nbuf));
  942. }
  943. }
  944. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  945. dp_rx_deliver_raw(vdev, nbuf, peer);
  946. } else {
  947. /* Update the protocol tag in SKB based on CCE metadata */
  948. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  949. EXCEPTION_DEST_RING_ID, true, true);
  950. /* Update the flow tag in SKB based on FSE metadata */
  951. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  952. DP_STATS_INC(peer, rx.to_stack.num, 1);
  953. dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
  954. }
  955. return;
  956. }
  957. /**
  958. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  959. * @soc: core DP main context
  960. * @nbuf: buffer pointer
  961. * @rx_tlv_hdr: start of rx tlv header
  962. * @peer: peer handle
  963. *
  964. * return: void
  965. */
  966. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  967. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  968. {
  969. struct dp_vdev *vdev = NULL;
  970. struct dp_pdev *pdev = NULL;
  971. struct ol_if_ops *tops = NULL;
  972. uint16_t rx_seq, fragno;
  973. uint8_t is_raw;
  974. unsigned int tid;
  975. QDF_STATUS status;
  976. struct cdp_rx_mic_err_info mic_failure_info;
  977. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  978. rx_tlv_hdr))
  979. return;
  980. if (!peer) {
  981. dp_info_rl("peer not found");
  982. goto fail;
  983. }
  984. vdev = peer->vdev;
  985. if (!vdev) {
  986. dp_info_rl("VDEV not found");
  987. goto fail;
  988. }
  989. pdev = vdev->pdev;
  990. if (!pdev) {
  991. dp_info_rl("PDEV not found");
  992. goto fail;
  993. }
  994. is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
  995. if (is_raw) {
  996. fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
  997. /* Can get only last fragment */
  998. if (fragno) {
  999. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  1000. qdf_nbuf_data(nbuf));
  1001. rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
  1002. qdf_nbuf_data(nbuf));
  1003. status = dp_rx_defrag_add_last_frag(soc, peer,
  1004. tid, rx_seq, nbuf);
  1005. dp_info_rl("Frag pkt seq# %d frag# %d consumed "
  1006. "status %d !", rx_seq, fragno, status);
  1007. return;
  1008. }
  1009. }
  1010. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  1011. &mic_failure_info.da_mac_addr.bytes[0])) {
  1012. dp_err_rl("Failed to get da_mac_addr");
  1013. goto fail;
  1014. }
  1015. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  1016. &mic_failure_info.ta_mac_addr.bytes[0])) {
  1017. dp_err_rl("Failed to get ta_mac_addr");
  1018. goto fail;
  1019. }
  1020. mic_failure_info.key_id = 0;
  1021. mic_failure_info.multicast =
  1022. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  1023. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  1024. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  1025. mic_failure_info.data = NULL;
  1026. mic_failure_info.vdev_id = vdev->vdev_id;
  1027. tops = pdev->soc->cdp_soc.ol_ops;
  1028. if (tops->rx_mic_error)
  1029. tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
  1030. &mic_failure_info);
  1031. fail:
  1032. qdf_nbuf_free(nbuf);
  1033. return;
  1034. }
  1035. uint32_t
  1036. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1037. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1038. {
  1039. hal_ring_desc_t ring_desc;
  1040. hal_soc_handle_t hal_soc;
  1041. uint32_t count = 0;
  1042. uint32_t rx_bufs_used = 0;
  1043. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1044. uint8_t mac_id = 0;
  1045. uint8_t buf_type;
  1046. uint8_t error, rbm;
  1047. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1048. struct hal_buf_info hbi;
  1049. struct dp_pdev *dp_pdev;
  1050. struct dp_srng *dp_rxdma_srng;
  1051. struct rx_desc_pool *rx_desc_pool;
  1052. uint32_t cookie = 0;
  1053. void *link_desc_va;
  1054. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1055. uint16_t num_msdus;
  1056. struct dp_rx_desc *rx_desc = NULL;
  1057. /* Debug -- Remove later */
  1058. qdf_assert(soc && hal_ring_hdl);
  1059. hal_soc = soc->hal_soc;
  1060. /* Debug -- Remove later */
  1061. qdf_assert(hal_soc);
  1062. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1063. /* TODO */
  1064. /*
  1065. * Need API to convert from hal_ring pointer to
  1066. * Ring Type / Ring Id combo
  1067. */
  1068. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1069. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1070. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1071. goto done;
  1072. }
  1073. while (qdf_likely(quota-- && (ring_desc =
  1074. hal_srng_dst_get_next(hal_soc,
  1075. hal_ring_hdl)))) {
  1076. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1077. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1078. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  1079. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  1080. /*
  1081. * For REO error ring, expect only MSDU LINK DESC
  1082. */
  1083. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  1084. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1085. /*
  1086. * check for the magic number in the sw cookie
  1087. */
  1088. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  1089. LINK_DESC_ID_START);
  1090. /*
  1091. * Check if the buffer is to be processed on this processor
  1092. */
  1093. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1094. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1095. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1096. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1097. &num_msdus);
  1098. if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
  1099. (msdu_list.rbm[0] !=
  1100. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) &&
  1101. (msdu_list.rbm[0] != DP_DEFRAG_RBM))) {
  1102. /* TODO */
  1103. /* Call appropriate handler */
  1104. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1105. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1106. QDF_TRACE(QDF_MODULE_ID_DP,
  1107. QDF_TRACE_LEVEL_ERROR,
  1108. FL("Invalid RBM %d"),
  1109. msdu_list.rbm[0]);
  1110. }
  1111. /* Return link descriptor through WBM ring (SW2WBM)*/
  1112. dp_rx_link_desc_return(soc, ring_desc,
  1113. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1114. continue;
  1115. }
  1116. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
  1117. msdu_list.sw_cookie[0]);
  1118. qdf_assert_always(rx_desc);
  1119. mac_id = rx_desc->pool_id;
  1120. /* Get the MPDU DESC info */
  1121. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1122. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1123. /*
  1124. * We only handle one msdu per link desc for fragmented
  1125. * case. We drop the msdus and release the link desc
  1126. * back if there are more than one msdu in link desc.
  1127. */
  1128. if (qdf_unlikely(num_msdus > 1)) {
  1129. count = dp_rx_msdus_drop(soc, ring_desc,
  1130. &mpdu_desc_info,
  1131. &mac_id, quota);
  1132. rx_bufs_reaped[mac_id] += count;
  1133. continue;
  1134. }
  1135. count = dp_rx_frag_handle(soc,
  1136. ring_desc, &mpdu_desc_info,
  1137. rx_desc, &mac_id, quota);
  1138. rx_bufs_reaped[mac_id] += count;
  1139. DP_STATS_INC(soc, rx.rx_frags, 1);
  1140. continue;
  1141. }
  1142. if (hal_rx_reo_is_pn_error(ring_desc)) {
  1143. /* TOD0 */
  1144. DP_STATS_INC(soc,
  1145. rx.err.
  1146. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  1147. 1);
  1148. /* increment @pdev level */
  1149. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1150. if (dp_pdev)
  1151. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1152. count = dp_rx_pn_error_handle(soc,
  1153. ring_desc,
  1154. &mpdu_desc_info, &mac_id,
  1155. quota);
  1156. rx_bufs_reaped[mac_id] += count;
  1157. continue;
  1158. }
  1159. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  1160. /* TOD0 */
  1161. DP_STATS_INC(soc,
  1162. rx.err.
  1163. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  1164. 1);
  1165. /* increment @pdev level */
  1166. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1167. if (dp_pdev)
  1168. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1169. count = dp_rx_2k_jump_handle(soc,
  1170. ring_desc, &mpdu_desc_info,
  1171. &mac_id, quota);
  1172. rx_bufs_reaped[mac_id] += count;
  1173. continue;
  1174. }
  1175. }
  1176. done:
  1177. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1178. if (soc->rx.flags.defrag_timeout_check) {
  1179. uint32_t now_ms =
  1180. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1181. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1182. dp_rx_defrag_waitlist_flush(soc);
  1183. }
  1184. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1185. if (rx_bufs_reaped[mac_id]) {
  1186. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1187. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1188. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1189. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1190. rx_desc_pool,
  1191. rx_bufs_reaped[mac_id],
  1192. &dp_pdev->free_list_head,
  1193. &dp_pdev->free_list_tail);
  1194. rx_bufs_used += rx_bufs_reaped[mac_id];
  1195. }
  1196. }
  1197. return rx_bufs_used; /* Assume no scale factor for now */
  1198. }
  1199. #ifdef DROP_RXDMA_DECRYPT_ERR
  1200. /**
  1201. * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
  1202. *
  1203. * Return: true if rxdma decrypt err frames are handled and false otheriwse
  1204. */
  1205. static inline bool dp_handle_rxdma_decrypt_err(void)
  1206. {
  1207. return false;
  1208. }
  1209. #else
  1210. static inline bool dp_handle_rxdma_decrypt_err(void)
  1211. {
  1212. return true;
  1213. }
  1214. #endif
  1215. uint32_t
  1216. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1217. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1218. {
  1219. hal_ring_desc_t ring_desc;
  1220. hal_soc_handle_t hal_soc;
  1221. struct dp_rx_desc *rx_desc;
  1222. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1223. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1224. uint32_t rx_bufs_used = 0;
  1225. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1226. uint8_t buf_type, rbm;
  1227. uint32_t rx_buf_cookie;
  1228. uint8_t mac_id;
  1229. struct dp_pdev *dp_pdev;
  1230. struct dp_srng *dp_rxdma_srng;
  1231. struct rx_desc_pool *rx_desc_pool;
  1232. uint8_t *rx_tlv_hdr;
  1233. qdf_nbuf_t nbuf_head = NULL;
  1234. qdf_nbuf_t nbuf_tail = NULL;
  1235. qdf_nbuf_t nbuf, next;
  1236. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  1237. uint8_t pool_id;
  1238. uint8_t tid = 0;
  1239. /* Debug -- Remove later */
  1240. qdf_assert(soc && hal_ring_hdl);
  1241. hal_soc = soc->hal_soc;
  1242. /* Debug -- Remove later */
  1243. qdf_assert(hal_soc);
  1244. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1245. /* TODO */
  1246. /*
  1247. * Need API to convert from hal_ring pointer to
  1248. * Ring Type / Ring Id combo
  1249. */
  1250. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1251. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1252. goto done;
  1253. }
  1254. while (qdf_likely(quota-- && (ring_desc =
  1255. hal_srng_dst_get_next(hal_soc,
  1256. hal_ring_hdl)))) {
  1257. /* XXX */
  1258. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  1259. /*
  1260. * For WBM ring, expect only MSDU buffers
  1261. */
  1262. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  1263. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1264. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  1265. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1266. == HAL_RX_WBM_ERR_SRC_REO));
  1267. /*
  1268. * Check if the buffer is to be processed on this processor
  1269. */
  1270. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1271. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  1272. /* TODO */
  1273. /* Call appropriate handler */
  1274. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1275. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1276. FL("Invalid RBM %d"), rbm);
  1277. continue;
  1278. }
  1279. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  1280. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1281. qdf_assert_always(rx_desc);
  1282. if (!dp_rx_desc_check_magic(rx_desc)) {
  1283. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1284. FL("Invalid rx_desc cookie=%d"),
  1285. rx_buf_cookie);
  1286. continue;
  1287. }
  1288. /*
  1289. * this is a unlikely scenario where the host is reaping
  1290. * a descriptor which it already reaped just a while ago
  1291. * but is yet to replenish it back to HW.
  1292. * In this case host will dump the last 128 descriptors
  1293. * including the software descriptor rx_desc and assert.
  1294. */
  1295. if (qdf_unlikely(!rx_desc->in_use)) {
  1296. DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
  1297. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1298. ring_desc, rx_desc);
  1299. }
  1300. nbuf = rx_desc->nbuf;
  1301. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE);
  1302. /*
  1303. * save the wbm desc info in nbuf TLV. We will need this
  1304. * info when we do the actual nbuf processing
  1305. */
  1306. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
  1307. wbm_err_info.pool_id = rx_desc->pool_id;
  1308. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  1309. &wbm_err_info);
  1310. rx_bufs_reaped[rx_desc->pool_id]++;
  1311. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1312. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1313. &tail[rx_desc->pool_id],
  1314. rx_desc);
  1315. }
  1316. done:
  1317. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1318. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1319. if (rx_bufs_reaped[mac_id]) {
  1320. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1321. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1322. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1323. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1324. rx_desc_pool, rx_bufs_reaped[mac_id],
  1325. &head[mac_id], &tail[mac_id]);
  1326. rx_bufs_used += rx_bufs_reaped[mac_id];
  1327. }
  1328. }
  1329. nbuf = nbuf_head;
  1330. while (nbuf) {
  1331. struct dp_peer *peer;
  1332. uint16_t peer_id;
  1333. uint8_t err_code;
  1334. uint8_t *tlv_hdr;
  1335. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1336. /*
  1337. * retrieve the wbm desc info from nbuf TLV, so we can
  1338. * handle error cases appropriately
  1339. */
  1340. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  1341. peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1342. rx_tlv_hdr);
  1343. peer = dp_peer_find_by_id(soc, peer_id);
  1344. if (!peer)
  1345. dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
  1346. peer_id, wbm_err_info.wbm_err_src,
  1347. wbm_err_info.reo_psh_rsn);
  1348. /* Set queue_mapping in nbuf to 0 */
  1349. dp_set_rx_queue(nbuf, 0);
  1350. next = nbuf->next;
  1351. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  1352. if (wbm_err_info.reo_psh_rsn
  1353. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  1354. DP_STATS_INC(soc,
  1355. rx.err.reo_error
  1356. [wbm_err_info.reo_err_code], 1);
  1357. /* increment @pdev level */
  1358. pool_id = wbm_err_info.pool_id;
  1359. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1360. if (dp_pdev)
  1361. DP_STATS_INC(dp_pdev, err.reo_error,
  1362. 1);
  1363. switch (wbm_err_info.reo_err_code) {
  1364. /*
  1365. * Handling for packets which have NULL REO
  1366. * queue descriptor
  1367. */
  1368. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1369. pool_id = wbm_err_info.pool_id;
  1370. dp_rx_null_q_desc_handle(soc, nbuf,
  1371. rx_tlv_hdr,
  1372. pool_id, peer);
  1373. nbuf = next;
  1374. if (peer)
  1375. dp_peer_unref_del_find_by_id(
  1376. peer);
  1377. continue;
  1378. /* TODO */
  1379. /* Add per error code accounting */
  1380. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1381. pool_id = wbm_err_info.pool_id;
  1382. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1383. rx_tlv_hdr)) {
  1384. peer_id =
  1385. hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1386. rx_tlv_hdr);
  1387. tid =
  1388. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  1389. }
  1390. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr,
  1391. peer_id, tid);
  1392. nbuf = next;
  1393. if (peer)
  1394. dp_peer_unref_del_find_by_id(
  1395. peer);
  1396. continue;
  1397. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1398. case HAL_REO_ERR_BAR_FRAME_OOR:
  1399. if (peer)
  1400. dp_rx_wbm_err_handle_bar(soc,
  1401. peer,
  1402. nbuf);
  1403. break;
  1404. default:
  1405. dp_info_rl("Got pkt with REO ERROR: %d",
  1406. wbm_err_info.reo_err_code);
  1407. break;
  1408. }
  1409. }
  1410. } else if (wbm_err_info.wbm_err_src ==
  1411. HAL_RX_WBM_ERR_SRC_RXDMA) {
  1412. if (wbm_err_info.rxdma_psh_rsn
  1413. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1414. DP_STATS_INC(soc,
  1415. rx.err.rxdma_error
  1416. [wbm_err_info.rxdma_err_code], 1);
  1417. /* increment @pdev level */
  1418. pool_id = wbm_err_info.pool_id;
  1419. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1420. if (dp_pdev)
  1421. DP_STATS_INC(dp_pdev,
  1422. err.rxdma_error, 1);
  1423. switch (wbm_err_info.rxdma_err_code) {
  1424. case HAL_RXDMA_ERR_UNENCRYPTED:
  1425. case HAL_RXDMA_ERR_WIFI_PARSE:
  1426. pool_id = wbm_err_info.pool_id;
  1427. dp_rx_process_rxdma_err(soc, nbuf,
  1428. rx_tlv_hdr,
  1429. peer,
  1430. wbm_err_info.
  1431. rxdma_err_code,
  1432. pool_id);
  1433. nbuf = next;
  1434. if (peer)
  1435. dp_peer_unref_del_find_by_id(peer);
  1436. continue;
  1437. case HAL_RXDMA_ERR_TKIP_MIC:
  1438. dp_rx_process_mic_error(soc, nbuf,
  1439. rx_tlv_hdr,
  1440. peer);
  1441. nbuf = next;
  1442. if (peer) {
  1443. DP_STATS_INC(peer, rx.err.mic_err, 1);
  1444. dp_peer_unref_del_find_by_id(
  1445. peer);
  1446. }
  1447. continue;
  1448. case HAL_RXDMA_ERR_DECRYPT:
  1449. if (!dp_handle_rxdma_decrypt_err()) {
  1450. if (peer)
  1451. DP_STATS_INC(peer,
  1452. rx.err.decrypt_err, 1);
  1453. break;
  1454. }
  1455. pool_id = wbm_err_info.pool_id;
  1456. err_code = wbm_err_info.rxdma_err_code;
  1457. tlv_hdr = rx_tlv_hdr;
  1458. dp_rx_process_rxdma_err(soc, nbuf,
  1459. tlv_hdr, peer,
  1460. err_code,
  1461. pool_id);
  1462. nbuf = next;
  1463. if (peer) {
  1464. DP_STATS_INC(peer, rx.err.
  1465. decrypt_err, 1);
  1466. dp_peer_unref_del_find_by_id(
  1467. peer);
  1468. }
  1469. continue;
  1470. default:
  1471. dp_err_rl("RXDMA error %d",
  1472. wbm_err_info.rxdma_err_code);
  1473. }
  1474. }
  1475. } else {
  1476. /* Should not come here */
  1477. qdf_assert(0);
  1478. }
  1479. if (peer)
  1480. dp_peer_unref_del_find_by_id(peer);
  1481. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1482. QDF_TRACE_LEVEL_DEBUG);
  1483. qdf_nbuf_free(nbuf);
  1484. nbuf = next;
  1485. }
  1486. return rx_bufs_used; /* Assume no scale factor for now */
  1487. }
  1488. /**
  1489. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  1490. *
  1491. * @soc: core DP main context
  1492. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1493. * @rx_desc: void pointer to rx descriptor
  1494. *
  1495. * Return: void
  1496. */
  1497. static void dup_desc_dbg(struct dp_soc *soc,
  1498. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1499. void *rx_desc)
  1500. {
  1501. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  1502. dp_rx_dump_info_and_assert(
  1503. soc,
  1504. soc->rx_rel_ring.hal_srng,
  1505. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  1506. rx_desc);
  1507. }
  1508. /**
  1509. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  1510. *
  1511. * @soc: core DP main context
  1512. * @mac_id: mac id which is one of 3 mac_ids
  1513. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1514. * @head: head of descs list to be freed
  1515. * @tail: tail of decs list to be freed
  1516. * Return: number of msdu in MPDU to be popped
  1517. */
  1518. static inline uint32_t
  1519. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1520. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1521. union dp_rx_desc_list_elem_t **head,
  1522. union dp_rx_desc_list_elem_t **tail)
  1523. {
  1524. void *rx_msdu_link_desc;
  1525. qdf_nbuf_t msdu;
  1526. qdf_nbuf_t last;
  1527. struct hal_rx_msdu_list msdu_list;
  1528. uint16_t num_msdus;
  1529. struct hal_buf_info buf_info;
  1530. uint32_t rx_bufs_used = 0;
  1531. uint32_t msdu_cnt;
  1532. uint32_t i;
  1533. uint8_t push_reason;
  1534. uint8_t rxdma_error_code = 0;
  1535. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  1536. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1537. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  1538. hal_rxdma_desc_t ring_desc;
  1539. msdu = 0;
  1540. last = NULL;
  1541. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1542. &msdu_cnt);
  1543. push_reason =
  1544. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  1545. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1546. rxdma_error_code =
  1547. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  1548. }
  1549. do {
  1550. rx_msdu_link_desc =
  1551. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1552. qdf_assert(rx_msdu_link_desc);
  1553. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1554. &msdu_list, &num_msdus);
  1555. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1556. /* if the msdus belongs to NSS offloaded radio &&
  1557. * the rbm is not SW1_BM then return the msdu_link
  1558. * descriptor without freeing the msdus (nbufs). let
  1559. * these buffers be given to NSS completion ring for
  1560. * NSS to free them.
  1561. * else iterate through the msdu link desc list and
  1562. * free each msdu in the list.
  1563. */
  1564. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  1565. wlan_cfg_get_dp_pdev_nss_enabled(
  1566. pdev->wlan_cfg_ctx))
  1567. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  1568. else {
  1569. for (i = 0; i < num_msdus; i++) {
  1570. struct dp_rx_desc *rx_desc =
  1571. dp_rx_cookie_2_va_rxdma_buf(soc,
  1572. msdu_list.sw_cookie[i]);
  1573. qdf_assert_always(rx_desc);
  1574. msdu = rx_desc->nbuf;
  1575. /*
  1576. * this is a unlikely scenario
  1577. * where the host is reaping
  1578. * a descriptor which
  1579. * it already reaped just a while ago
  1580. * but is yet to replenish
  1581. * it back to HW.
  1582. * In this case host will dump
  1583. * the last 128 descriptors
  1584. * including the software descriptor
  1585. * rx_desc and assert.
  1586. */
  1587. ring_desc = rxdma_dst_ring_desc;
  1588. if (qdf_unlikely(!rx_desc->in_use)) {
  1589. dup_desc_dbg(soc,
  1590. ring_desc,
  1591. rx_desc);
  1592. continue;
  1593. }
  1594. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1595. QDF_DMA_FROM_DEVICE);
  1596. QDF_TRACE(QDF_MODULE_ID_DP,
  1597. QDF_TRACE_LEVEL_DEBUG,
  1598. "[%s][%d] msdu_nbuf=%pK ",
  1599. __func__, __LINE__, msdu);
  1600. qdf_nbuf_free(msdu);
  1601. rx_bufs_used++;
  1602. dp_rx_add_to_free_desc_list(head,
  1603. tail, rx_desc);
  1604. }
  1605. }
  1606. } else {
  1607. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  1608. }
  1609. /*
  1610. * Store the current link buffer into to the local structure
  1611. * to be used for release purpose.
  1612. */
  1613. hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
  1614. buf_info.sw_cookie, buf_info.rbm);
  1615. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
  1616. dp_rx_link_desc_return_by_addr(soc,
  1617. (hal_buff_addrinfo_t)
  1618. rx_link_buf_info,
  1619. bm_action);
  1620. } while (buf_info.paddr);
  1621. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  1622. if (pdev)
  1623. DP_STATS_INC(pdev, err.rxdma_error, 1);
  1624. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  1625. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1626. "Packet received with Decrypt error");
  1627. }
  1628. return rx_bufs_used;
  1629. }
  1630. uint32_t
  1631. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1632. uint32_t mac_id, uint32_t quota)
  1633. {
  1634. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1635. hal_rxdma_desc_t rxdma_dst_ring_desc;
  1636. hal_soc_handle_t hal_soc;
  1637. void *err_dst_srng;
  1638. union dp_rx_desc_list_elem_t *head = NULL;
  1639. union dp_rx_desc_list_elem_t *tail = NULL;
  1640. struct dp_srng *dp_rxdma_srng;
  1641. struct rx_desc_pool *rx_desc_pool;
  1642. uint32_t work_done = 0;
  1643. uint32_t rx_bufs_used = 0;
  1644. if (!pdev)
  1645. return 0;
  1646. err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
  1647. if (!err_dst_srng) {
  1648. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1649. "%s %d : HAL Monitor Destination Ring Init \
  1650. Failed -- %pK",
  1651. __func__, __LINE__, err_dst_srng);
  1652. return 0;
  1653. }
  1654. hal_soc = soc->hal_soc;
  1655. qdf_assert(hal_soc);
  1656. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  1657. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1658. "%s %d : HAL Monitor Destination Ring Init \
  1659. Failed -- %pK",
  1660. __func__, __LINE__, err_dst_srng);
  1661. return 0;
  1662. }
  1663. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  1664. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  1665. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  1666. rxdma_dst_ring_desc,
  1667. &head, &tail);
  1668. }
  1669. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  1670. if (rx_bufs_used) {
  1671. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1672. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1673. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1674. rx_desc_pool, rx_bufs_used, &head, &tail);
  1675. work_done += rx_bufs_used;
  1676. }
  1677. return work_done;
  1678. }
  1679. static inline uint32_t
  1680. dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1681. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1682. union dp_rx_desc_list_elem_t **head,
  1683. union dp_rx_desc_list_elem_t **tail)
  1684. {
  1685. void *rx_msdu_link_desc;
  1686. qdf_nbuf_t msdu;
  1687. qdf_nbuf_t last;
  1688. struct hal_rx_msdu_list msdu_list;
  1689. uint16_t num_msdus;
  1690. struct hal_buf_info buf_info;
  1691. uint32_t rx_bufs_used = 0, msdu_cnt, i;
  1692. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  1693. msdu = 0;
  1694. last = NULL;
  1695. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1696. &msdu_cnt);
  1697. do {
  1698. rx_msdu_link_desc =
  1699. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1700. if (!rx_msdu_link_desc) {
  1701. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
  1702. break;
  1703. }
  1704. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1705. &msdu_list, &num_msdus);
  1706. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1707. for (i = 0; i < num_msdus; i++) {
  1708. struct dp_rx_desc *rx_desc =
  1709. dp_rx_cookie_2_va_rxdma_buf(
  1710. soc,
  1711. msdu_list.sw_cookie[i]);
  1712. qdf_assert_always(rx_desc);
  1713. msdu = rx_desc->nbuf;
  1714. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1715. QDF_DMA_FROM_DEVICE);
  1716. qdf_nbuf_free(msdu);
  1717. rx_bufs_used++;
  1718. dp_rx_add_to_free_desc_list(head,
  1719. tail, rx_desc);
  1720. }
  1721. }
  1722. /*
  1723. * Store the current link buffer into to the local structure
  1724. * to be used for release purpose.
  1725. */
  1726. hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
  1727. buf_info.sw_cookie, buf_info.rbm);
  1728. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
  1729. dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
  1730. rx_link_buf_info,
  1731. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1732. } while (buf_info.paddr);
  1733. return rx_bufs_used;
  1734. }
  1735. /*
  1736. *
  1737. * dp_handle_wbm_internal_error() - handles wbm_internal_error case
  1738. *
  1739. * @soc: core DP main context
  1740. * @hal_desc: hal descriptor
  1741. * @buf_type: indicates if the buffer is of type link disc or msdu
  1742. * Return: None
  1743. *
  1744. * wbm_internal_error is seen in following scenarios :
  1745. *
  1746. * 1. Null pointers detected in WBM_RELEASE_RING descriptors
  1747. * 2. Null pointers detected during delinking process
  1748. *
  1749. * Some null pointer cases:
  1750. *
  1751. * a. MSDU buffer pointer is NULL
  1752. * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
  1753. * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
  1754. */
  1755. void
  1756. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  1757. uint32_t buf_type)
  1758. {
  1759. struct hal_buf_info buf_info = {0};
  1760. struct dp_pdev *dp_pdev;
  1761. struct dp_rx_desc *rx_desc = NULL;
  1762. uint32_t rx_buf_cookie;
  1763. uint32_t rx_bufs_reaped = 0;
  1764. union dp_rx_desc_list_elem_t *head = NULL;
  1765. union dp_rx_desc_list_elem_t *tail = NULL;
  1766. uint8_t pool_id;
  1767. hal_rx_reo_buf_paddr_get(hal_desc, &buf_info);
  1768. if (!buf_info.paddr) {
  1769. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
  1770. return;
  1771. }
  1772. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc);
  1773. pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie);
  1774. if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
  1775. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
  1776. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1777. if (rx_desc && rx_desc->nbuf) {
  1778. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  1779. QDF_DMA_FROM_DEVICE);
  1780. rx_desc->unmapped = 1;
  1781. qdf_nbuf_free(rx_desc->nbuf);
  1782. dp_rx_add_to_free_desc_list(&head,
  1783. &tail,
  1784. rx_desc);
  1785. rx_bufs_reaped++;
  1786. }
  1787. } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
  1788. rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id,
  1789. hal_desc,
  1790. &head, &tail);
  1791. }
  1792. if (rx_bufs_reaped) {
  1793. struct rx_desc_pool *rx_desc_pool;
  1794. struct dp_srng *dp_rxdma_srng;
  1795. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
  1796. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1797. dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
  1798. rx_desc_pool = &soc->rx_desc_buf[pool_id];
  1799. dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng,
  1800. rx_desc_pool,
  1801. rx_bufs_reaped,
  1802. &head, &tail);
  1803. }
  1804. }