dp_rx_err.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "dp_internal.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #ifdef CONFIG_MCL
  27. #include <cds_ieee80211_common.h>
  28. #endif
  29. #include "dp_rx_defrag.h"
  30. #ifdef FEATURE_WDS
  31. #include "dp_txrx_wds.h"
  32. #endif
  33. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  34. #include "qdf_net_types.h"
  35. /**
  36. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  37. * back on same vap or a different vap.
  38. *
  39. * @soc: core DP main context
  40. * @peer: dp peer handler
  41. * @rx_tlv_hdr: start of the rx TLV header
  42. * @nbuf: pkt buffer
  43. *
  44. * Return: bool (true if it is a looped back pkt else false)
  45. *
  46. */
  47. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  48. struct dp_peer *peer,
  49. uint8_t *rx_tlv_hdr,
  50. qdf_nbuf_t nbuf)
  51. {
  52. struct dp_vdev *vdev = peer->vdev;
  53. struct dp_ast_entry *ase = NULL;
  54. uint16_t sa_idx = 0;
  55. uint8_t *data;
  56. /*
  57. * Multicast Echo Check is required only if vdev is STA and
  58. * received pkt is a multicast/broadcast pkt. otherwise
  59. * skip the MEC check.
  60. */
  61. if (vdev->opmode != wlan_op_mode_sta)
  62. return false;
  63. if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))
  64. return false;
  65. data = qdf_nbuf_data(nbuf);
  66. /*
  67. * if the received pkts src mac addr matches with vdev
  68. * mac address then drop the pkt as it is looped back
  69. */
  70. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  71. vdev->mac_addr.raw,
  72. QDF_MAC_ADDR_SIZE)))
  73. return true;
  74. /*
  75. * In case of qwrap isolation mode, donot drop loopback packets.
  76. * In isolation mode, all packets from the wired stations need to go
  77. * to rootap and loop back to reach the wireless stations and
  78. * vice-versa.
  79. */
  80. if (qdf_unlikely(vdev->isolation_vdev))
  81. return false;
  82. /* if the received pkts src mac addr matches with the
  83. * wired PCs MAC addr which is behind the STA or with
  84. * wireless STAs MAC addr which are behind the Repeater,
  85. * then drop the pkt as it is looped back
  86. */
  87. qdf_spin_lock_bh(&soc->ast_lock);
  88. if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) {
  89. sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
  90. if ((sa_idx < 0) ||
  91. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  92. qdf_spin_unlock_bh(&soc->ast_lock);
  93. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  94. "invalid sa_idx: %d", sa_idx);
  95. qdf_assert_always(0);
  96. }
  97. ase = soc->ast_table[sa_idx];
  98. if (!ase) {
  99. /* We do not get a peer map event for STA and without
  100. * this event we don't know what is STA's sa_idx.
  101. * For this reason the AST is still not associated to
  102. * any index postion in ast_table.
  103. * In these kind of scenarios where sa is valid but
  104. * ast is not in ast_table, we use the below API to get
  105. * AST entry for STA's own mac_address.
  106. */
  107. ase = dp_peer_ast_list_find(soc, peer,
  108. &data[QDF_MAC_ADDR_SIZE]);
  109. if (ase) {
  110. ase->ast_idx = sa_idx;
  111. soc->ast_table[sa_idx] = ase;
  112. ase->is_mapped = TRUE;
  113. }
  114. }
  115. } else {
  116. ase = dp_peer_ast_hash_find_by_pdevid(soc,
  117. &data[QDF_MAC_ADDR_SIZE],
  118. vdev->pdev->pdev_id);
  119. }
  120. if (ase) {
  121. if (ase->pdev_id != vdev->pdev->pdev_id) {
  122. qdf_spin_unlock_bh(&soc->ast_lock);
  123. QDF_TRACE(QDF_MODULE_ID_DP,
  124. QDF_TRACE_LEVEL_INFO,
  125. "Detected DBDC Root AP %pM, %d %d",
  126. &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
  127. ase->pdev_id);
  128. return false;
  129. }
  130. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  131. (ase->peer != peer)) {
  132. qdf_spin_unlock_bh(&soc->ast_lock);
  133. QDF_TRACE(QDF_MODULE_ID_DP,
  134. QDF_TRACE_LEVEL_INFO,
  135. "received pkt with same src mac %pM",
  136. &data[QDF_MAC_ADDR_SIZE]);
  137. return true;
  138. }
  139. }
  140. qdf_spin_unlock_bh(&soc->ast_lock);
  141. return false;
  142. }
  143. /**
  144. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  145. * (WBM) by address
  146. *
  147. * @soc: core DP main context
  148. * @link_desc_addr: link descriptor addr
  149. *
  150. * Return: QDF_STATUS
  151. */
  152. QDF_STATUS
  153. dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
  154. uint8_t bm_action)
  155. {
  156. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  157. void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  158. void *hal_soc = soc->hal_soc;
  159. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  160. void *src_srng_desc;
  161. if (!wbm_rel_srng) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. "WBM RELEASE RING not initialized");
  164. return status;
  165. }
  166. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  167. /* TODO */
  168. /*
  169. * Need API to convert from hal_ring pointer to
  170. * Ring Type / Ring Id combo
  171. */
  172. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  173. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  174. wbm_rel_srng);
  175. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  176. goto done;
  177. }
  178. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  179. if (qdf_likely(src_srng_desc)) {
  180. /* Return link descriptor through WBM ring (SW2WBM)*/
  181. hal_rx_msdu_link_desc_set(hal_soc,
  182. src_srng_desc, link_desc_addr, bm_action);
  183. status = QDF_STATUS_SUCCESS;
  184. } else {
  185. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  186. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  187. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  189. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  190. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  191. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  192. }
  193. done:
  194. hal_srng_access_end(hal_soc, wbm_rel_srng);
  195. return status;
  196. }
  197. /**
  198. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  199. * (WBM), following error handling
  200. *
  201. * @soc: core DP main context
  202. * @ring_desc: opaque pointer to the REO error ring descriptor
  203. *
  204. * Return: QDF_STATUS
  205. */
  206. QDF_STATUS
  207. dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action)
  208. {
  209. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  210. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  211. }
  212. /**
  213. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  214. *
  215. * @soc: core txrx main context
  216. * @ring_desc: opaque pointer to the REO error ring descriptor
  217. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  218. * @head: head of the local descriptor free-list
  219. * @tail: tail of the local descriptor free-list
  220. * @quota: No. of units (packets) that can be serviced in one shot.
  221. *
  222. * This function is used to drop all MSDU in an MPDU
  223. *
  224. * Return: uint32_t: No. of elements processed
  225. */
  226. static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
  227. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  228. uint8_t *mac_id,
  229. uint32_t quota)
  230. {
  231. uint32_t rx_bufs_used = 0;
  232. void *link_desc_va;
  233. struct hal_buf_info buf_info;
  234. struct dp_pdev *pdev;
  235. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  236. int i;
  237. uint8_t *rx_tlv_hdr;
  238. uint32_t tid;
  239. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  240. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  241. /* No UNMAP required -- this is "malloc_consistent" memory */
  242. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  243. &mpdu_desc_info->msdu_count);
  244. for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
  245. struct dp_rx_desc *rx_desc =
  246. dp_rx_cookie_2_va_rxdma_buf(soc,
  247. msdu_list.sw_cookie[i]);
  248. qdf_assert_always(rx_desc);
  249. /* all buffers from a MSDU link link belong to same pdev */
  250. *mac_id = rx_desc->pool_id;
  251. pdev = soc->pdev_list[rx_desc->pool_id];
  252. if (!dp_rx_desc_check_magic(rx_desc)) {
  253. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  254. FL("Invalid rx_desc cookie=%d"),
  255. msdu_list.sw_cookie[i]);
  256. return rx_bufs_used;
  257. }
  258. qdf_nbuf_unmap_single(soc->osdev,
  259. rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
  260. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  261. rx_bufs_used++;
  262. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  263. rx_desc->rx_buf_start);
  264. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  265. "Packet received with PN error for tid :%d", tid);
  266. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  267. if (hal_rx_encryption_info_valid(rx_tlv_hdr))
  268. hal_rx_print_pn(rx_tlv_hdr);
  269. /* Just free the buffers */
  270. qdf_nbuf_free(rx_desc->nbuf);
  271. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  272. &pdev->free_list_tail, rx_desc);
  273. }
  274. /* Return link descriptor through WBM ring (SW2WBM)*/
  275. dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  276. return rx_bufs_used;
  277. }
  278. /**
  279. * dp_rx_pn_error_handle() - Handles PN check errors
  280. *
  281. * @soc: core txrx main context
  282. * @ring_desc: opaque pointer to the REO error ring descriptor
  283. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  284. * @head: head of the local descriptor free-list
  285. * @tail: tail of the local descriptor free-list
  286. * @quota: No. of units (packets) that can be serviced in one shot.
  287. *
  288. * This function implements PN error handling
  289. * If the peer is configured to ignore the PN check errors
  290. * or if DP feels, that this frame is still OK, the frame can be
  291. * re-injected back to REO to use some of the other features
  292. * of REO e.g. duplicate detection/routing to other cores
  293. *
  294. * Return: uint32_t: No. of elements processed
  295. */
  296. static uint32_t
  297. dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
  298. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  299. uint8_t *mac_id,
  300. uint32_t quota)
  301. {
  302. uint16_t peer_id;
  303. uint32_t rx_bufs_used = 0;
  304. struct dp_peer *peer;
  305. bool peer_pn_policy = false;
  306. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  307. mpdu_desc_info->peer_meta_data);
  308. peer = dp_peer_find_by_id(soc, peer_id);
  309. if (qdf_likely(peer)) {
  310. /*
  311. * TODO: Check for peer specific policies & set peer_pn_policy
  312. */
  313. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  314. "discard rx due to PN error for peer %pK "
  315. "(%02x:%02x:%02x:%02x:%02x:%02x)",
  316. peer,
  317. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  318. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  319. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  320. dp_peer_unref_del_find_by_id(peer);
  321. }
  322. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  323. "Packet received with PN error");
  324. /* No peer PN policy -- definitely drop */
  325. if (!peer_pn_policy)
  326. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  327. mpdu_desc_info,
  328. mac_id, quota);
  329. return rx_bufs_used;
  330. }
  331. /**
  332. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  333. *
  334. * @soc: core txrx main context
  335. * @ring_desc: opaque pointer to the REO error ring descriptor
  336. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  337. * @head: head of the local descriptor free-list
  338. * @tail: tail of the local descriptor free-list
  339. * @quota: No. of units (packets) that can be serviced in one shot.
  340. *
  341. * This function implements the error handling when sequence number
  342. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  343. * need to be handled:
  344. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  345. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  346. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  347. * For case B), the frame is normally dropped, no more action is taken
  348. *
  349. * Return: uint32_t: No. of elements processed
  350. */
  351. static uint32_t
  352. dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
  353. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  354. uint8_t *mac_id, uint32_t quota)
  355. {
  356. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  357. mac_id, quota);
  358. }
  359. #ifdef CONFIG_MCL
  360. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
  361. do { \
  362. qdf_assert_always(!(head)); \
  363. qdf_assert_always(!(tail)); \
  364. } while (0)
  365. #else
  366. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
  367. #endif
  368. /**
  369. * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
  370. * to pdev invalid peer list
  371. *
  372. * @soc: core DP main context
  373. * @nbuf: Buffer pointer
  374. * @rx_tlv_hdr: start of rx tlv header
  375. * @mac_id: mac id
  376. *
  377. * Return: bool: true for last msdu of mpdu
  378. */
  379. static bool
  380. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  381. uint8_t mac_id)
  382. {
  383. bool mpdu_done = false;
  384. qdf_nbuf_t curr_nbuf = NULL;
  385. qdf_nbuf_t tmp_nbuf = NULL;
  386. /* TODO: Currently only single radio is supported, hence
  387. * pdev hard coded to '0' index
  388. */
  389. struct dp_pdev *dp_pdev = soc->pdev_list[mac_id];
  390. if (!dp_pdev->first_nbuf) {
  391. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  392. dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr);
  393. dp_pdev->first_nbuf = true;
  394. /* If the new nbuf received is the first msdu of the
  395. * amsdu and there are msdus in the invalid peer msdu
  396. * list, then let us free all the msdus of the invalid
  397. * peer msdu list.
  398. * This scenario can happen when we start receiving
  399. * new a-msdu even before the previous a-msdu is completely
  400. * received.
  401. */
  402. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  403. while (curr_nbuf) {
  404. tmp_nbuf = curr_nbuf->next;
  405. qdf_nbuf_free(curr_nbuf);
  406. curr_nbuf = tmp_nbuf;
  407. }
  408. dp_pdev->invalid_peer_head_msdu = NULL;
  409. dp_pdev->invalid_peer_tail_msdu = NULL;
  410. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
  411. &(dp_pdev->ppdu_info.rx_status));
  412. }
  413. if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
  414. hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  415. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  416. qdf_assert_always(dp_pdev->first_nbuf == true);
  417. dp_pdev->first_nbuf = false;
  418. mpdu_done = true;
  419. }
  420. /*
  421. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  422. * should be NULL here, add the checking for debugging purpose
  423. * in case some corner case.
  424. */
  425. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  426. dp_pdev->invalid_peer_tail_msdu);
  427. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  428. dp_pdev->invalid_peer_tail_msdu,
  429. nbuf);
  430. return mpdu_done;
  431. }
  432. /**
  433. * dp_2k_jump_handle() - Function to handle 2k jump exception
  434. * on WBM ring
  435. *
  436. * @soc: core DP main context
  437. * @nbuf: buffer pointer
  438. * @rx_tlv_hdr: start of rx tlv header
  439. * @peer_id: peer id of first msdu
  440. * @tid: Tid for which exception occurred
  441. *
  442. * This function handles 2k jump violations arising out
  443. * of receiving aggregates in non BA case. This typically
  444. * may happen if aggregates are received on a QOS enabled TID
  445. * while Rx window size is still initialized to value of 2. Or
  446. * it may also happen if negotiated window size is 1 but peer
  447. * sends aggregates.
  448. *
  449. */
  450. void
  451. dp_2k_jump_handle(struct dp_soc *soc,
  452. qdf_nbuf_t nbuf,
  453. uint8_t *rx_tlv_hdr,
  454. uint16_t peer_id,
  455. uint8_t tid)
  456. {
  457. uint32_t ppdu_id;
  458. struct dp_peer *peer = NULL;
  459. struct dp_rx_tid *rx_tid = NULL;
  460. peer = dp_peer_find_by_id(soc, peer_id);
  461. if (!peer || peer->delete_in_progress) {
  462. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  463. "peer not found");
  464. goto free_nbuf;
  465. }
  466. rx_tid = &peer->rx_tid[tid];
  467. if (qdf_unlikely(!rx_tid)) {
  468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  469. "rx_tid is NULL!!");
  470. goto free_nbuf;
  471. }
  472. qdf_spin_lock_bh(&rx_tid->tid_lock);
  473. ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr);
  474. /*
  475. * If BA session is created and a non-aggregate packet is
  476. * landing here then the issue is with sequence number mismatch.
  477. * Proceed with delba even in that case
  478. */
  479. if (rx_tid->ppdu_id_2k != ppdu_id &&
  480. rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  481. rx_tid->ppdu_id_2k = ppdu_id;
  482. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  483. goto free_nbuf;
  484. }
  485. if (!rx_tid->delba_tx_status) {
  486. rx_tid->delba_tx_retry++;
  487. rx_tid->delba_tx_status = 1;
  488. rx_tid->delba_rcode =
  489. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  490. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  491. soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev,
  492. peer->ctrl_peer,
  493. peer->mac_addr.raw,
  494. tid,
  495. peer->vdev->ctrl_vdev,
  496. rx_tid->delba_rcode);
  497. } else {
  498. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  499. }
  500. free_nbuf:
  501. if (peer)
  502. dp_peer_unref_del_find_by_id(peer);
  503. qdf_nbuf_free(nbuf);
  504. return;
  505. }
  506. #ifdef QCA_WIFI_QCA6390
  507. /**
  508. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  509. * @soc: pointer to dp_soc struct
  510. * @pool_id: Pool id to find dp_pdev
  511. * @rx_tlv_hdr: TLV header of received packet
  512. * @nbuf: SKB
  513. *
  514. * In certain types of packets if peer_id is not correct then
  515. * driver may not be able find. Try finding peer by addr_2 of
  516. * received MPDU. If you find the peer then most likely sw_peer_id &
  517. * ast_idx is corrupted.
  518. *
  519. * Return: True if you find the peer by addr_2 of received MPDU else false
  520. */
  521. static bool
  522. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  523. uint8_t pool_id,
  524. uint8_t *rx_tlv_hdr,
  525. qdf_nbuf_t nbuf)
  526. {
  527. uint8_t local_id;
  528. struct dp_peer *peer = NULL;
  529. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  530. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  531. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  532. /*
  533. * WAR- In certain types of packets if peer_id is not correct then
  534. * driver may not be able find. Try finding peer by addr_2 of
  535. * received MPDU
  536. */
  537. if (wh)
  538. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
  539. wh->i_addr2, &local_id);
  540. if (peer) {
  541. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  542. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  543. QDF_TRACE_LEVEL_DEBUG);
  544. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  545. 1, qdf_nbuf_len(nbuf));
  546. qdf_nbuf_free(nbuf);
  547. return true;
  548. }
  549. return false;
  550. }
  551. /**
  552. * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity
  553. * @soc: DP SOC context
  554. * @pkt_len: computed length of the pkt from caller in bytes
  555. *
  556. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  557. *
  558. */
  559. static inline
  560. bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
  561. {
  562. if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) {
  563. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  564. 1, pkt_len);
  565. return true;
  566. } else {
  567. return false;
  568. }
  569. }
  570. #else
  571. static inline bool
  572. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  573. uint8_t pool_id,
  574. uint8_t *rx_tlv_hdr,
  575. qdf_nbuf_t nbuf)
  576. {
  577. return false;
  578. }
  579. static inline
  580. bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
  581. {
  582. return false;
  583. }
  584. #endif
  585. /**
  586. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  587. * descriptor violation on either a
  588. * REO or WBM ring
  589. *
  590. * @soc: core DP main context
  591. * @nbuf: buffer pointer
  592. * @rx_tlv_hdr: start of rx tlv header
  593. * @pool_id: mac id
  594. * @peer: peer handle
  595. *
  596. * This function handles NULL queue descriptor violations arising out
  597. * a missing REO queue for a given peer or a given TID. This typically
  598. * may happen if a packet is received on a QOS enabled TID before the
  599. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  600. * it may also happen for MC/BC frames if they are not routed to the
  601. * non-QOS TID queue, in the absence of any other default TID queue.
  602. * This error can show up both in a REO destination or WBM release ring.
  603. *
  604. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  605. * if nbuf could not be handled or dropped.
  606. */
  607. static QDF_STATUS
  608. dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  609. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  610. struct dp_peer *peer)
  611. {
  612. uint32_t pkt_len, l2_hdr_offset;
  613. uint16_t msdu_len;
  614. struct dp_vdev *vdev;
  615. uint8_t tid;
  616. qdf_ether_header_t *eh;
  617. qdf_nbuf_set_rx_chfrag_start(nbuf,
  618. hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr));
  619. qdf_nbuf_set_rx_chfrag_end(nbuf,
  620. hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr));
  621. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr));
  622. qdf_nbuf_set_da_valid(nbuf,
  623. hal_rx_msdu_end_da_is_valid_get(rx_tlv_hdr));
  624. qdf_nbuf_set_sa_valid(nbuf,
  625. hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr));
  626. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  627. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  628. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  629. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  630. if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len))
  631. goto drop_nbuf;
  632. /* Set length in nbuf */
  633. qdf_nbuf_set_pktlen(nbuf,
  634. qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE));
  635. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  636. }
  637. /*
  638. * Check if DMA completed -- msdu_done is the last bit
  639. * to be written
  640. */
  641. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  642. dp_err_rl("MSDU DONE failure");
  643. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  644. QDF_TRACE_LEVEL_INFO);
  645. qdf_assert(0);
  646. }
  647. if (!peer &&
  648. dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
  649. rx_tlv_hdr, nbuf))
  650. return QDF_STATUS_E_FAILURE;
  651. if (!peer) {
  652. bool mpdu_done = false;
  653. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  654. dp_err_rl("peer is NULL");
  655. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  656. qdf_nbuf_len(nbuf));
  657. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  658. /* Trigger invalid peer handler wrapper */
  659. dp_rx_process_invalid_peer_wrapper(soc,
  660. pdev->invalid_peer_head_msdu,
  661. mpdu_done);
  662. if (mpdu_done) {
  663. pdev->invalid_peer_head_msdu = NULL;
  664. pdev->invalid_peer_tail_msdu = NULL;
  665. }
  666. return QDF_STATUS_E_FAILURE;
  667. }
  668. vdev = peer->vdev;
  669. if (!vdev) {
  670. dp_err_rl("Null vdev!");
  671. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  672. goto drop_nbuf;
  673. }
  674. /*
  675. * Advance the packet start pointer by total size of
  676. * pre-header TLV's
  677. */
  678. if (qdf_nbuf_is_frag(nbuf))
  679. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  680. else
  681. qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
  682. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  683. /* this is a looped back MCBC pkt, drop it */
  684. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  685. goto drop_nbuf;
  686. }
  687. /*
  688. * In qwrap mode if the received packet matches with any of the vdev
  689. * mac addresses, drop it. Donot receive multicast packets originated
  690. * from any proxysta.
  691. */
  692. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  693. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  694. goto drop_nbuf;
  695. }
  696. if (qdf_unlikely((peer->nawds_enabled == true) &&
  697. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  698. dp_err_rl("free buffer for multicast packet");
  699. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  700. goto drop_nbuf;
  701. }
  702. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
  703. dp_err_rl("mcast Policy Check Drop pkt");
  704. goto drop_nbuf;
  705. }
  706. /* WDS Source Port Learning */
  707. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  708. vdev->wds_enabled))
  709. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
  710. if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) {
  711. /* TODO: Assuming that qos_control_valid also indicates
  712. * unicast. Should we check this?
  713. */
  714. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  715. if (peer && !peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
  716. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  717. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  718. }
  719. }
  720. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  721. qdf_nbuf_set_next(nbuf, NULL);
  722. dp_rx_deliver_raw(vdev, nbuf, peer);
  723. } else {
  724. if (vdev->osif_rx) {
  725. qdf_nbuf_set_next(nbuf, NULL);
  726. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  727. qdf_nbuf_len(nbuf));
  728. /*
  729. * Update the protocol tag in SKB based on
  730. * CCE metadata
  731. */
  732. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  733. EXCEPTION_DEST_RING_ID,
  734. true, true);
  735. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
  736. rx_tlv_hdr) &&
  737. (vdev->rx_decap_type ==
  738. htt_cmn_pkt_type_ethernet))) {
  739. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  740. DP_STATS_INC_PKT(peer, rx.multicast, 1,
  741. qdf_nbuf_len(nbuf));
  742. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  743. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  744. qdf_nbuf_len(nbuf));
  745. }
  746. }
  747. vdev->osif_rx(vdev->osif_vdev, nbuf);
  748. } else {
  749. dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
  750. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  751. goto drop_nbuf;
  752. }
  753. }
  754. return QDF_STATUS_SUCCESS;
  755. drop_nbuf:
  756. qdf_nbuf_free(nbuf);
  757. return QDF_STATUS_E_FAILURE;
  758. }
  759. /**
  760. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  761. * frames to OS or wifi parse errors.
  762. * @soc: core DP main context
  763. * @nbuf: buffer pointer
  764. * @rx_tlv_hdr: start of rx tlv header
  765. * @peer: peer reference
  766. * @err_code: rxdma err code
  767. *
  768. * Return: None
  769. */
  770. void
  771. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  772. uint8_t *rx_tlv_hdr, struct dp_peer *peer,
  773. uint8_t err_code)
  774. {
  775. uint32_t pkt_len, l2_hdr_offset;
  776. uint16_t msdu_len;
  777. struct dp_vdev *vdev;
  778. qdf_ether_header_t *eh;
  779. bool is_broadcast;
  780. /*
  781. * Check if DMA completed -- msdu_done is the last bit
  782. * to be written
  783. */
  784. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  785. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  786. FL("MSDU DONE failure"));
  787. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  788. QDF_TRACE_LEVEL_INFO);
  789. qdf_assert(0);
  790. }
  791. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  792. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  793. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  794. /* Set length in nbuf */
  795. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  796. qdf_nbuf_set_next(nbuf, NULL);
  797. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  798. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  799. if (!peer) {
  800. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
  801. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  802. qdf_nbuf_len(nbuf));
  803. /* Trigger invalid peer handler wrapper */
  804. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true);
  805. return;
  806. }
  807. vdev = peer->vdev;
  808. if (!vdev) {
  809. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  810. FL("INVALID vdev %pK OR osif_rx"), vdev);
  811. /* Drop & free packet */
  812. qdf_nbuf_free(nbuf);
  813. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  814. return;
  815. }
  816. /*
  817. * Advance the packet start pointer by total size of
  818. * pre-header TLV's
  819. */
  820. qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN);
  821. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  822. uint8_t *pkt_type;
  823. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  824. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q) &&
  825. *(uint16_t *)(pkt_type + DP_SKIP_VLAN) == htons(QDF_LLC_STP)) {
  826. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  827. goto process_mesh;
  828. } else {
  829. DP_STATS_INC(vdev->pdev, dropped.wifi_parse, 1);
  830. qdf_nbuf_free(nbuf);
  831. return;
  832. }
  833. }
  834. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  835. goto process_mesh;
  836. /*
  837. * WAPI cert AP sends rekey frames as unencrypted.
  838. * Thus RXDMA will report unencrypted frame error.
  839. * To pass WAPI cert case, SW needs to pass unencrypted
  840. * rekey frame to stack.
  841. */
  842. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  843. qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id);
  844. goto process_rx;
  845. }
  846. /*
  847. * In dynamic WEP case rekey frames are not encrypted
  848. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  849. * key install is already done
  850. */
  851. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  852. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  853. goto process_rx;
  854. process_mesh:
  855. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  856. qdf_nbuf_free(nbuf);
  857. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  858. return;
  859. }
  860. if (vdev->mesh_vdev) {
  861. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  862. == QDF_STATUS_SUCCESS) {
  863. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  864. FL("mesh pkt filtered"));
  865. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  866. qdf_nbuf_free(nbuf);
  867. return;
  868. }
  869. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  870. }
  871. process_rx:
  872. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
  873. (vdev->rx_decap_type ==
  874. htt_cmn_pkt_type_ethernet))) {
  875. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  876. is_broadcast = (QDF_IS_ADDR_BROADCAST
  877. (eh->ether_dhost)) ? 1 : 0 ;
  878. DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
  879. if (is_broadcast) {
  880. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  881. qdf_nbuf_len(nbuf));
  882. }
  883. }
  884. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  885. dp_rx_deliver_raw(vdev, nbuf, peer);
  886. } else {
  887. /* Update the protocol tag in SKB based on CCE metadata */
  888. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  889. EXCEPTION_DEST_RING_ID, true, true);
  890. DP_STATS_INC(peer, rx.to_stack.num, 1);
  891. vdev->osif_rx(vdev->osif_vdev, nbuf);
  892. }
  893. return;
  894. }
  895. /**
  896. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  897. * @soc: core DP main context
  898. * @nbuf: buffer pointer
  899. * @rx_tlv_hdr: start of rx tlv header
  900. * @peer: peer handle
  901. *
  902. * return: void
  903. */
  904. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  905. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  906. {
  907. struct dp_vdev *vdev = NULL;
  908. struct dp_pdev *pdev = NULL;
  909. struct ol_if_ops *tops = NULL;
  910. struct ieee80211_frame *wh;
  911. uint8_t *rx_pkt_hdr;
  912. uint16_t rx_seq, fragno;
  913. unsigned int tid;
  914. QDF_STATUS status;
  915. if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr))
  916. return;
  917. rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf));
  918. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  919. if (!peer) {
  920. dp_err_rl("peer not found");
  921. goto fail;
  922. }
  923. vdev = peer->vdev;
  924. if (!vdev) {
  925. dp_err_rl("VDEV not found");
  926. goto fail;
  927. }
  928. pdev = vdev->pdev;
  929. if (!pdev) {
  930. dp_err_rl("PDEV not found");
  931. goto fail;
  932. }
  933. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf));
  934. rx_seq = (((*(uint16_t *)wh->i_seq) &
  935. IEEE80211_SEQ_SEQ_MASK) >>
  936. IEEE80211_SEQ_SEQ_SHIFT);
  937. fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
  938. /* Can get only last fragment */
  939. if (fragno) {
  940. status = dp_rx_defrag_add_last_frag(soc, peer,
  941. tid, rx_seq, nbuf);
  942. dp_info_rl("Frag pkt seq# %d frag# %d consumed status %d !",
  943. rx_seq, fragno, status);
  944. return;
  945. }
  946. tops = pdev->soc->cdp_soc.ol_ops;
  947. if (tops->rx_mic_error)
  948. tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh);
  949. fail:
  950. qdf_nbuf_free(nbuf);
  951. return;
  952. }
  953. /**
  954. * dp_rx_err_process() - Processes error frames routed to REO error ring
  955. *
  956. * @soc: core txrx main context
  957. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  958. * @quota: No. of units (packets) that can be serviced in one shot.
  959. *
  960. * This function implements error processing and top level demultiplexer
  961. * for all the frames routed to REO error ring.
  962. *
  963. * Return: uint32_t: No. of elements processed
  964. */
  965. uint32_t
  966. dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  967. {
  968. void *hal_soc;
  969. void *ring_desc;
  970. uint32_t count = 0;
  971. uint32_t rx_bufs_used = 0;
  972. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  973. uint8_t mac_id = 0;
  974. uint8_t buf_type;
  975. uint8_t error, rbm;
  976. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  977. struct hal_buf_info hbi;
  978. struct dp_pdev *dp_pdev;
  979. struct dp_srng *dp_rxdma_srng;
  980. struct rx_desc_pool *rx_desc_pool;
  981. uint32_t cookie = 0;
  982. void *link_desc_va;
  983. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  984. uint16_t num_msdus;
  985. struct dp_rx_desc *rx_desc = NULL;
  986. /* Debug -- Remove later */
  987. qdf_assert(soc && hal_ring);
  988. hal_soc = soc->hal_soc;
  989. /* Debug -- Remove later */
  990. qdf_assert(hal_soc);
  991. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  992. /* TODO */
  993. /*
  994. * Need API to convert from hal_ring pointer to
  995. * Ring Type / Ring Id combo
  996. */
  997. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  998. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  999. FL("HAL RING Access Failed -- %pK"), hal_ring);
  1000. goto done;
  1001. }
  1002. while (qdf_likely(quota-- && (ring_desc =
  1003. hal_srng_dst_get_next(hal_soc, hal_ring)))) {
  1004. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1005. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1006. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  1007. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  1008. /*
  1009. * For REO error ring, expect only MSDU LINK DESC
  1010. */
  1011. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  1012. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1013. /*
  1014. * check for the magic number in the sw cookie
  1015. */
  1016. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  1017. LINK_DESC_ID_START);
  1018. /*
  1019. * Check if the buffer is to be processed on this processor
  1020. */
  1021. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1022. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1023. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1024. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1025. &num_msdus);
  1026. if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
  1027. (msdu_list.rbm[0] !=
  1028. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) {
  1029. /* TODO */
  1030. /* Call appropriate handler */
  1031. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1032. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1033. QDF_TRACE(QDF_MODULE_ID_DP,
  1034. QDF_TRACE_LEVEL_ERROR,
  1035. FL("Invalid RBM %d"),
  1036. msdu_list.rbm[0]);
  1037. }
  1038. /* Return link descriptor through WBM ring (SW2WBM)*/
  1039. dp_rx_link_desc_return(soc, ring_desc,
  1040. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1041. continue;
  1042. }
  1043. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
  1044. msdu_list.sw_cookie[0]);
  1045. qdf_assert_always(rx_desc);
  1046. mac_id = rx_desc->pool_id;
  1047. /* Get the MPDU DESC info */
  1048. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1049. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1050. /*
  1051. * We only handle one msdu per link desc for fragmented
  1052. * case. We drop the msdus and release the link desc
  1053. * back if there are more than one msdu in link desc.
  1054. */
  1055. if (qdf_unlikely(num_msdus > 1)) {
  1056. count = dp_rx_msdus_drop(soc, ring_desc,
  1057. &mpdu_desc_info,
  1058. &mac_id, quota);
  1059. rx_bufs_reaped[mac_id] += count;
  1060. continue;
  1061. }
  1062. count = dp_rx_frag_handle(soc,
  1063. ring_desc, &mpdu_desc_info,
  1064. rx_desc, &mac_id, quota);
  1065. rx_bufs_reaped[mac_id] += count;
  1066. DP_STATS_INC(soc, rx.rx_frags, 1);
  1067. continue;
  1068. }
  1069. if (hal_rx_reo_is_pn_error(ring_desc)) {
  1070. /* TOD0 */
  1071. DP_STATS_INC(soc,
  1072. rx.err.
  1073. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  1074. 1);
  1075. count = dp_rx_pn_error_handle(soc,
  1076. ring_desc,
  1077. &mpdu_desc_info, &mac_id,
  1078. quota);
  1079. rx_bufs_reaped[mac_id] += count;
  1080. continue;
  1081. }
  1082. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  1083. /* TOD0 */
  1084. DP_STATS_INC(soc,
  1085. rx.err.
  1086. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  1087. 1);
  1088. count = dp_rx_2k_jump_handle(soc,
  1089. ring_desc, &mpdu_desc_info,
  1090. &mac_id, quota);
  1091. rx_bufs_reaped[mac_id] += count;
  1092. continue;
  1093. }
  1094. }
  1095. done:
  1096. hal_srng_access_end(hal_soc, hal_ring);
  1097. if (soc->rx.flags.defrag_timeout_check) {
  1098. uint32_t now_ms =
  1099. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1100. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1101. dp_rx_defrag_waitlist_flush(soc);
  1102. }
  1103. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1104. if (rx_bufs_reaped[mac_id]) {
  1105. dp_pdev = soc->pdev_list[mac_id];
  1106. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1107. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1108. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1109. rx_desc_pool,
  1110. rx_bufs_reaped[mac_id],
  1111. &dp_pdev->free_list_head,
  1112. &dp_pdev->free_list_tail);
  1113. rx_bufs_used += rx_bufs_reaped[mac_id];
  1114. }
  1115. }
  1116. return rx_bufs_used; /* Assume no scale factor for now */
  1117. }
  1118. /**
  1119. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  1120. *
  1121. * @soc: core txrx main context
  1122. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  1123. * @quota: No. of units (packets) that can be serviced in one shot.
  1124. *
  1125. * This function implements error processing and top level demultiplexer
  1126. * for all the frames routed to WBM2HOST sw release ring.
  1127. *
  1128. * Return: uint32_t: No. of elements processed
  1129. */
  1130. uint32_t
  1131. dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  1132. {
  1133. void *hal_soc;
  1134. void *ring_desc;
  1135. struct dp_rx_desc *rx_desc;
  1136. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1137. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1138. uint32_t rx_bufs_used = 0;
  1139. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1140. uint8_t buf_type, rbm;
  1141. uint32_t rx_buf_cookie;
  1142. uint8_t mac_id;
  1143. struct dp_pdev *dp_pdev;
  1144. struct dp_srng *dp_rxdma_srng;
  1145. struct rx_desc_pool *rx_desc_pool;
  1146. uint8_t *rx_tlv_hdr;
  1147. qdf_nbuf_t nbuf_head = NULL;
  1148. qdf_nbuf_t nbuf_tail = NULL;
  1149. qdf_nbuf_t nbuf, next;
  1150. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  1151. uint8_t pool_id;
  1152. uint8_t tid = 0;
  1153. /* Debug -- Remove later */
  1154. qdf_assert(soc && hal_ring);
  1155. hal_soc = soc->hal_soc;
  1156. /* Debug -- Remove later */
  1157. qdf_assert(hal_soc);
  1158. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  1159. /* TODO */
  1160. /*
  1161. * Need API to convert from hal_ring pointer to
  1162. * Ring Type / Ring Id combo
  1163. */
  1164. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1165. FL("HAL RING Access Failed -- %pK"), hal_ring);
  1166. goto done;
  1167. }
  1168. while (qdf_likely(quota-- && (ring_desc =
  1169. hal_srng_dst_get_next(hal_soc, hal_ring)))) {
  1170. /* XXX */
  1171. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  1172. /*
  1173. * For WBM ring, expect only MSDU buffers
  1174. */
  1175. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  1176. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1177. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  1178. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1179. == HAL_RX_WBM_ERR_SRC_REO));
  1180. /*
  1181. * Check if the buffer is to be processed on this processor
  1182. */
  1183. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1184. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  1185. /* TODO */
  1186. /* Call appropriate handler */
  1187. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1189. FL("Invalid RBM %d"), rbm);
  1190. continue;
  1191. }
  1192. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  1193. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1194. qdf_assert_always(rx_desc);
  1195. if (!dp_rx_desc_check_magic(rx_desc)) {
  1196. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1197. FL("Invalid rx_desc cookie=%d"),
  1198. rx_buf_cookie);
  1199. continue;
  1200. }
  1201. /*
  1202. * this is a unlikely scenario where the host is reaping
  1203. * a descriptor which it already reaped just a while ago
  1204. * but is yet to replenish it back to HW.
  1205. * In this case host will dump the last 128 descriptors
  1206. * including the software descriptor rx_desc and assert.
  1207. */
  1208. if (qdf_unlikely(!rx_desc->in_use)) {
  1209. DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
  1210. dp_rx_dump_info_and_assert(soc, hal_ring,
  1211. ring_desc, rx_desc);
  1212. }
  1213. nbuf = rx_desc->nbuf;
  1214. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE);
  1215. /*
  1216. * save the wbm desc info in nbuf TLV. We will need this
  1217. * info when we do the actual nbuf processing
  1218. */
  1219. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
  1220. wbm_err_info.pool_id = rx_desc->pool_id;
  1221. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  1222. &wbm_err_info);
  1223. rx_bufs_reaped[rx_desc->pool_id]++;
  1224. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1225. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1226. &tail[rx_desc->pool_id],
  1227. rx_desc);
  1228. }
  1229. done:
  1230. hal_srng_access_end(hal_soc, hal_ring);
  1231. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1232. if (rx_bufs_reaped[mac_id]) {
  1233. dp_pdev = soc->pdev_list[mac_id];
  1234. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1235. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1236. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1237. rx_desc_pool, rx_bufs_reaped[mac_id],
  1238. &head[mac_id], &tail[mac_id]);
  1239. rx_bufs_used += rx_bufs_reaped[mac_id];
  1240. }
  1241. }
  1242. nbuf = nbuf_head;
  1243. while (nbuf) {
  1244. struct dp_peer *peer;
  1245. uint16_t peer_id;
  1246. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1247. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  1248. peer = dp_peer_find_by_id(soc, peer_id);
  1249. /*
  1250. * retrieve the wbm desc info from nbuf TLV, so we can
  1251. * handle error cases appropriately
  1252. */
  1253. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  1254. /* Set queue_mapping in nbuf to 0 */
  1255. dp_set_rx_queue(nbuf, 0);
  1256. next = nbuf->next;
  1257. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  1258. if (wbm_err_info.reo_psh_rsn
  1259. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  1260. DP_STATS_INC(soc,
  1261. rx.err.reo_error
  1262. [wbm_err_info.reo_err_code], 1);
  1263. switch (wbm_err_info.reo_err_code) {
  1264. /*
  1265. * Handling for packets which have NULL REO
  1266. * queue descriptor
  1267. */
  1268. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1269. pool_id = wbm_err_info.pool_id;
  1270. dp_rx_null_q_desc_handle(soc, nbuf,
  1271. rx_tlv_hdr,
  1272. pool_id, peer);
  1273. nbuf = next;
  1274. if (peer)
  1275. dp_peer_unref_del_find_by_id(
  1276. peer);
  1277. continue;
  1278. /* TODO */
  1279. /* Add per error code accounting */
  1280. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1281. pool_id = wbm_err_info.pool_id;
  1282. if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) {
  1283. peer_id =
  1284. hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  1285. tid =
  1286. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  1287. }
  1288. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr,
  1289. peer_id, tid);
  1290. nbuf = next;
  1291. if (peer)
  1292. dp_peer_unref_del_find_by_id(
  1293. peer);
  1294. continue;
  1295. default:
  1296. dp_err_rl("Got pkt with REO ERROR: %d",
  1297. wbm_err_info.reo_err_code);
  1298. break;
  1299. }
  1300. }
  1301. } else if (wbm_err_info.wbm_err_src ==
  1302. HAL_RX_WBM_ERR_SRC_RXDMA) {
  1303. if (wbm_err_info.rxdma_psh_rsn
  1304. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1305. DP_STATS_INC(soc,
  1306. rx.err.rxdma_error
  1307. [wbm_err_info.rxdma_err_code], 1);
  1308. switch (wbm_err_info.rxdma_err_code) {
  1309. case HAL_RXDMA_ERR_UNENCRYPTED:
  1310. case HAL_RXDMA_ERR_WIFI_PARSE:
  1311. dp_rx_process_rxdma_err(soc, nbuf,
  1312. rx_tlv_hdr, peer,
  1313. wbm_err_info.rxdma_err_code);
  1314. nbuf = next;
  1315. if (peer)
  1316. dp_peer_unref_del_find_by_id(peer);
  1317. continue;
  1318. case HAL_RXDMA_ERR_TKIP_MIC:
  1319. dp_rx_process_mic_error(soc, nbuf,
  1320. rx_tlv_hdr,
  1321. peer);
  1322. nbuf = next;
  1323. if (peer) {
  1324. DP_STATS_INC(peer, rx.err.mic_err, 1);
  1325. dp_peer_unref_del_find_by_id(
  1326. peer);
  1327. }
  1328. continue;
  1329. case HAL_RXDMA_ERR_DECRYPT:
  1330. if (peer)
  1331. DP_STATS_INC(peer, rx.err.decrypt_err, 1);
  1332. QDF_TRACE(QDF_MODULE_ID_DP,
  1333. QDF_TRACE_LEVEL_DEBUG,
  1334. "Packet received with Decrypt error");
  1335. break;
  1336. default:
  1337. dp_err_rl("RXDMA error %d",
  1338. wbm_err_info.rxdma_err_code);
  1339. }
  1340. }
  1341. } else {
  1342. /* Should not come here */
  1343. qdf_assert(0);
  1344. }
  1345. if (peer)
  1346. dp_peer_unref_del_find_by_id(peer);
  1347. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1348. QDF_TRACE_LEVEL_DEBUG);
  1349. qdf_nbuf_free(nbuf);
  1350. nbuf = next;
  1351. }
  1352. return rx_bufs_used; /* Assume no scale factor for now */
  1353. }
  1354. /**
  1355. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  1356. *
  1357. * @soc: core DP main context
  1358. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1359. * @rx_desc: void pointer to rx descriptor
  1360. *
  1361. * Return: void
  1362. */
  1363. static void dup_desc_dbg(struct dp_soc *soc,
  1364. void *rxdma_dst_ring_desc,
  1365. void *rx_desc)
  1366. {
  1367. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  1368. dp_rx_dump_info_and_assert(soc,
  1369. soc->rx_rel_ring.hal_srng,
  1370. rxdma_dst_ring_desc,
  1371. rx_desc);
  1372. }
  1373. /**
  1374. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  1375. *
  1376. * @soc: core DP main context
  1377. * @mac_id: mac id which is one of 3 mac_ids
  1378. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1379. * @head: head of descs list to be freed
  1380. * @tail: tail of decs list to be freed
  1381. * Return: number of msdu in MPDU to be popped
  1382. */
  1383. static inline uint32_t
  1384. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1385. void *rxdma_dst_ring_desc,
  1386. union dp_rx_desc_list_elem_t **head,
  1387. union dp_rx_desc_list_elem_t **tail)
  1388. {
  1389. void *rx_msdu_link_desc;
  1390. qdf_nbuf_t msdu;
  1391. qdf_nbuf_t last;
  1392. struct hal_rx_msdu_list msdu_list;
  1393. uint16_t num_msdus;
  1394. struct hal_buf_info buf_info;
  1395. void *p_buf_addr_info;
  1396. void *p_last_buf_addr_info;
  1397. uint32_t rx_bufs_used = 0;
  1398. uint32_t msdu_cnt;
  1399. uint32_t i;
  1400. uint8_t push_reason;
  1401. uint8_t rxdma_error_code = 0;
  1402. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  1403. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1404. void *ring_desc;
  1405. msdu = 0;
  1406. last = NULL;
  1407. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1408. &p_last_buf_addr_info, &msdu_cnt);
  1409. push_reason =
  1410. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  1411. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1412. rxdma_error_code =
  1413. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  1414. }
  1415. do {
  1416. rx_msdu_link_desc =
  1417. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1418. qdf_assert(rx_msdu_link_desc);
  1419. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1420. &msdu_list, &num_msdus);
  1421. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1422. /* if the msdus belongs to NSS offloaded radio &&
  1423. * the rbm is not SW1_BM then return the msdu_link
  1424. * descriptor without freeing the msdus (nbufs). let
  1425. * these buffers be given to NSS completion ring for
  1426. * NSS to free them.
  1427. * else iterate through the msdu link desc list and
  1428. * free each msdu in the list.
  1429. */
  1430. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  1431. wlan_cfg_get_dp_pdev_nss_enabled(
  1432. pdev->wlan_cfg_ctx))
  1433. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  1434. else {
  1435. for (i = 0; i < num_msdus; i++) {
  1436. struct dp_rx_desc *rx_desc =
  1437. dp_rx_cookie_2_va_rxdma_buf(soc,
  1438. msdu_list.sw_cookie[i]);
  1439. qdf_assert_always(rx_desc);
  1440. msdu = rx_desc->nbuf;
  1441. /*
  1442. * this is a unlikely scenario
  1443. * where the host is reaping
  1444. * a descriptor which
  1445. * it already reaped just a while ago
  1446. * but is yet to replenish
  1447. * it back to HW.
  1448. * In this case host will dump
  1449. * the last 128 descriptors
  1450. * including the software descriptor
  1451. * rx_desc and assert.
  1452. */
  1453. ring_desc = rxdma_dst_ring_desc;
  1454. if (qdf_unlikely(!rx_desc->in_use)) {
  1455. dup_desc_dbg(soc,
  1456. ring_desc,
  1457. rx_desc);
  1458. continue;
  1459. }
  1460. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1461. QDF_DMA_FROM_DEVICE);
  1462. QDF_TRACE(QDF_MODULE_ID_DP,
  1463. QDF_TRACE_LEVEL_DEBUG,
  1464. "[%s][%d] msdu_nbuf=%pK ",
  1465. __func__, __LINE__, msdu);
  1466. qdf_nbuf_free(msdu);
  1467. rx_bufs_used++;
  1468. dp_rx_add_to_free_desc_list(head,
  1469. tail, rx_desc);
  1470. }
  1471. }
  1472. } else {
  1473. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  1474. }
  1475. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  1476. &p_buf_addr_info);
  1477. dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action);
  1478. p_last_buf_addr_info = p_buf_addr_info;
  1479. } while (buf_info.paddr);
  1480. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  1481. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  1482. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1483. "Packet received with Decrypt error");
  1484. }
  1485. return rx_bufs_used;
  1486. }
  1487. /**
  1488. * dp_rxdma_err_process() - RxDMA error processing functionality
  1489. *
  1490. * @soc: core txrx main contex
  1491. * @mac_id: mac id which is one of 3 mac_ids
  1492. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  1493. * @quota: No. of units (packets) that can be serviced in one shot.
  1494. * Return: num of buffers processed
  1495. */
  1496. uint32_t
  1497. dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  1498. {
  1499. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1500. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  1501. void *hal_soc;
  1502. void *rxdma_dst_ring_desc;
  1503. void *err_dst_srng;
  1504. union dp_rx_desc_list_elem_t *head = NULL;
  1505. union dp_rx_desc_list_elem_t *tail = NULL;
  1506. struct dp_srng *dp_rxdma_srng;
  1507. struct rx_desc_pool *rx_desc_pool;
  1508. uint32_t work_done = 0;
  1509. uint32_t rx_bufs_used = 0;
  1510. if (!pdev)
  1511. return 0;
  1512. err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
  1513. if (!err_dst_srng) {
  1514. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1515. "%s %d : HAL Monitor Destination Ring Init \
  1516. Failed -- %pK",
  1517. __func__, __LINE__, err_dst_srng);
  1518. return 0;
  1519. }
  1520. hal_soc = soc->hal_soc;
  1521. qdf_assert(hal_soc);
  1522. if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) {
  1523. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1524. "%s %d : HAL Monitor Destination Ring Init \
  1525. Failed -- %pK",
  1526. __func__, __LINE__, err_dst_srng);
  1527. return 0;
  1528. }
  1529. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  1530. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  1531. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  1532. rxdma_dst_ring_desc,
  1533. &head, &tail);
  1534. }
  1535. hal_srng_access_end(hal_soc, err_dst_srng);
  1536. if (rx_bufs_used) {
  1537. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1538. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1539. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1540. rx_desc_pool, rx_bufs_used, &head, &tail);
  1541. work_done += rx_bufs_used;
  1542. }
  1543. return work_done;
  1544. }