dp_rx_err.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "dp_internal.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #include "dp_rx_defrag.h"
  27. #ifdef FEATURE_WDS
  28. #include "dp_txrx_wds.h"
  29. #endif
  30. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  31. #include "qdf_net_types.h"
  32. /* Max buffer in invalid peer SG list*/
  33. #define DP_MAX_INVALID_BUFFERS 10
  34. /**
  35. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  36. * back on same vap or a different vap.
  37. *
  38. * @soc: core DP main context
  39. * @peer: dp peer handler
  40. * @rx_tlv_hdr: start of the rx TLV header
  41. * @nbuf: pkt buffer
  42. *
  43. * Return: bool (true if it is a looped back pkt else false)
  44. *
  45. */
  46. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  47. struct dp_peer *peer,
  48. uint8_t *rx_tlv_hdr,
  49. qdf_nbuf_t nbuf)
  50. {
  51. struct dp_vdev *vdev = peer->vdev;
  52. struct dp_ast_entry *ase = NULL;
  53. uint16_t sa_idx = 0;
  54. uint8_t *data;
  55. /*
  56. * Multicast Echo Check is required only if vdev is STA and
  57. * received pkt is a multicast/broadcast pkt. otherwise
  58. * skip the MEC check.
  59. */
  60. if (vdev->opmode != wlan_op_mode_sta)
  61. return false;
  62. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  63. return false;
  64. data = qdf_nbuf_data(nbuf);
  65. /*
  66. * if the received pkts src mac addr matches with vdev
  67. * mac address then drop the pkt as it is looped back
  68. */
  69. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  70. vdev->mac_addr.raw,
  71. QDF_MAC_ADDR_SIZE)))
  72. return true;
  73. /*
  74. * In case of qwrap isolation mode, donot drop loopback packets.
  75. * In isolation mode, all packets from the wired stations need to go
  76. * to rootap and loop back to reach the wireless stations and
  77. * vice-versa.
  78. */
  79. if (qdf_unlikely(vdev->isolation_vdev))
  80. return false;
  81. /* if the received pkts src mac addr matches with the
  82. * wired PCs MAC addr which is behind the STA or with
  83. * wireless STAs MAC addr which are behind the Repeater,
  84. * then drop the pkt as it is looped back
  85. */
  86. qdf_spin_lock_bh(&soc->ast_lock);
  87. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  88. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  89. if ((sa_idx < 0) ||
  90. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  91. qdf_spin_unlock_bh(&soc->ast_lock);
  92. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  93. "invalid sa_idx: %d", sa_idx);
  94. qdf_assert_always(0);
  95. }
  96. ase = soc->ast_table[sa_idx];
  97. if (!ase) {
  98. /* We do not get a peer map event for STA and without
  99. * this event we don't know what is STA's sa_idx.
  100. * For this reason the AST is still not associated to
  101. * any index postion in ast_table.
  102. * In these kind of scenarios where sa is valid but
  103. * ast is not in ast_table, we use the below API to get
  104. * AST entry for STA's own mac_address.
  105. */
  106. ase = dp_peer_ast_list_find(soc, peer,
  107. &data[QDF_MAC_ADDR_SIZE]);
  108. if (ase) {
  109. ase->ast_idx = sa_idx;
  110. soc->ast_table[sa_idx] = ase;
  111. ase->is_mapped = TRUE;
  112. }
  113. }
  114. } else {
  115. ase = dp_peer_ast_hash_find_by_pdevid(soc,
  116. &data[QDF_MAC_ADDR_SIZE],
  117. vdev->pdev->pdev_id);
  118. }
  119. if (ase) {
  120. if (ase->pdev_id != vdev->pdev->pdev_id) {
  121. qdf_spin_unlock_bh(&soc->ast_lock);
  122. QDF_TRACE(QDF_MODULE_ID_DP,
  123. QDF_TRACE_LEVEL_INFO,
  124. "Detected DBDC Root AP %pM, %d %d",
  125. &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
  126. ase->pdev_id);
  127. return false;
  128. }
  129. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  130. (ase->peer != peer)) {
  131. qdf_spin_unlock_bh(&soc->ast_lock);
  132. QDF_TRACE(QDF_MODULE_ID_DP,
  133. QDF_TRACE_LEVEL_INFO,
  134. "received pkt with same src mac %pM",
  135. &data[QDF_MAC_ADDR_SIZE]);
  136. return true;
  137. }
  138. }
  139. qdf_spin_unlock_bh(&soc->ast_lock);
  140. return false;
  141. }
  142. /**
  143. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  144. * (WBM) by address
  145. *
  146. * @soc: core DP main context
  147. * @link_desc_addr: link descriptor addr
  148. *
  149. * Return: QDF_STATUS
  150. */
  151. QDF_STATUS
  152. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  153. hal_link_desc_t link_desc_addr,
  154. uint8_t bm_action)
  155. {
  156. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  157. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  158. hal_soc_handle_t hal_soc = soc->hal_soc;
  159. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  160. void *src_srng_desc;
  161. if (!wbm_rel_srng) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. "WBM RELEASE RING not initialized");
  164. return status;
  165. }
  166. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  167. /* TODO */
  168. /*
  169. * Need API to convert from hal_ring pointer to
  170. * Ring Type / Ring Id combo
  171. */
  172. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  173. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  174. wbm_rel_srng);
  175. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  176. goto done;
  177. }
  178. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  179. if (qdf_likely(src_srng_desc)) {
  180. /* Return link descriptor through WBM ring (SW2WBM)*/
  181. hal_rx_msdu_link_desc_set(hal_soc,
  182. src_srng_desc, link_desc_addr, bm_action);
  183. status = QDF_STATUS_SUCCESS;
  184. } else {
  185. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  186. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  187. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  189. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  190. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  191. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  192. }
  193. done:
  194. hal_srng_access_end(hal_soc, wbm_rel_srng);
  195. return status;
  196. }
  197. /**
  198. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  199. * (WBM), following error handling
  200. *
  201. * @soc: core DP main context
  202. * @ring_desc: opaque pointer to the REO error ring descriptor
  203. *
  204. * Return: QDF_STATUS
  205. */
  206. QDF_STATUS
  207. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  208. uint8_t bm_action)
  209. {
  210. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  211. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  212. }
  213. /**
  214. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  215. *
  216. * @soc: core txrx main context
  217. * @ring_desc: opaque pointer to the REO error ring descriptor
  218. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  219. * @head: head of the local descriptor free-list
  220. * @tail: tail of the local descriptor free-list
  221. * @quota: No. of units (packets) that can be serviced in one shot.
  222. *
  223. * This function is used to drop all MSDU in an MPDU
  224. *
  225. * Return: uint32_t: No. of elements processed
  226. */
  227. static uint32_t
  228. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  229. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  230. uint8_t *mac_id,
  231. uint32_t quota)
  232. {
  233. uint32_t rx_bufs_used = 0;
  234. void *link_desc_va;
  235. struct hal_buf_info buf_info;
  236. struct dp_pdev *pdev;
  237. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  238. int i;
  239. uint8_t *rx_tlv_hdr;
  240. uint32_t tid;
  241. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  242. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  243. /* No UNMAP required -- this is "malloc_consistent" memory */
  244. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  245. &mpdu_desc_info->msdu_count);
  246. for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
  247. struct dp_rx_desc *rx_desc =
  248. dp_rx_cookie_2_va_rxdma_buf(soc,
  249. msdu_list.sw_cookie[i]);
  250. qdf_assert_always(rx_desc);
  251. /* all buffers from a MSDU link link belong to same pdev */
  252. *mac_id = rx_desc->pool_id;
  253. pdev = soc->pdev_list[rx_desc->pool_id];
  254. if (!dp_rx_desc_check_magic(rx_desc)) {
  255. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  256. FL("Invalid rx_desc cookie=%d"),
  257. msdu_list.sw_cookie[i]);
  258. return rx_bufs_used;
  259. }
  260. qdf_nbuf_unmap_single(soc->osdev,
  261. rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
  262. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  263. rx_bufs_used++;
  264. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  265. rx_desc->rx_buf_start);
  266. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  267. "Packet received with PN error for tid :%d", tid);
  268. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  269. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  270. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  271. /* Just free the buffers */
  272. qdf_nbuf_free(rx_desc->nbuf);
  273. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  274. &pdev->free_list_tail, rx_desc);
  275. }
  276. /* Return link descriptor through WBM ring (SW2WBM)*/
  277. dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  278. return rx_bufs_used;
  279. }
  280. /**
  281. * dp_rx_pn_error_handle() - Handles PN check errors
  282. *
  283. * @soc: core txrx main context
  284. * @ring_desc: opaque pointer to the REO error ring descriptor
  285. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  286. * @head: head of the local descriptor free-list
  287. * @tail: tail of the local descriptor free-list
  288. * @quota: No. of units (packets) that can be serviced in one shot.
  289. *
  290. * This function implements PN error handling
  291. * If the peer is configured to ignore the PN check errors
  292. * or if DP feels, that this frame is still OK, the frame can be
  293. * re-injected back to REO to use some of the other features
  294. * of REO e.g. duplicate detection/routing to other cores
  295. *
  296. * Return: uint32_t: No. of elements processed
  297. */
  298. static uint32_t
  299. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  300. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  301. uint8_t *mac_id,
  302. uint32_t quota)
  303. {
  304. uint16_t peer_id;
  305. uint32_t rx_bufs_used = 0;
  306. struct dp_peer *peer;
  307. bool peer_pn_policy = false;
  308. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  309. mpdu_desc_info->peer_meta_data);
  310. peer = dp_peer_find_by_id(soc, peer_id);
  311. if (qdf_likely(peer)) {
  312. /*
  313. * TODO: Check for peer specific policies & set peer_pn_policy
  314. */
  315. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  316. "discard rx due to PN error for peer %pK "
  317. "(%02x:%02x:%02x:%02x:%02x:%02x)",
  318. peer,
  319. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  320. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  321. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  322. dp_peer_unref_del_find_by_id(peer);
  323. }
  324. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  325. "Packet received with PN error");
  326. /* No peer PN policy -- definitely drop */
  327. if (!peer_pn_policy)
  328. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  329. mpdu_desc_info,
  330. mac_id, quota);
  331. return rx_bufs_used;
  332. }
  333. /**
  334. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  335. *
  336. * @soc: core txrx main context
  337. * @ring_desc: opaque pointer to the REO error ring descriptor
  338. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  339. * @head: head of the local descriptor free-list
  340. * @tail: tail of the local descriptor free-list
  341. * @quota: No. of units (packets) that can be serviced in one shot.
  342. *
  343. * This function implements the error handling when sequence number
  344. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  345. * need to be handled:
  346. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  347. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  348. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  349. * For case B), the frame is normally dropped, no more action is taken
  350. *
  351. * Return: uint32_t: No. of elements processed
  352. */
  353. static uint32_t
  354. dp_rx_2k_jump_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  355. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  356. uint8_t *mac_id, uint32_t quota)
  357. {
  358. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  359. mac_id, quota);
  360. }
  361. #ifdef DP_INVALID_PEER_ASSERT
  362. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
  363. do { \
  364. qdf_assert_always(!(head)); \
  365. qdf_assert_always(!(tail)); \
  366. } while (0)
  367. #else
  368. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
  369. #endif
  370. /**
  371. * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
  372. * to pdev invalid peer list
  373. *
  374. * @soc: core DP main context
  375. * @nbuf: Buffer pointer
  376. * @rx_tlv_hdr: start of rx tlv header
  377. * @mac_id: mac id
  378. *
  379. * Return: bool: true for last msdu of mpdu
  380. */
  381. static bool
  382. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  383. uint8_t mac_id)
  384. {
  385. bool mpdu_done = false;
  386. qdf_nbuf_t curr_nbuf = NULL;
  387. qdf_nbuf_t tmp_nbuf = NULL;
  388. /* TODO: Currently only single radio is supported, hence
  389. * pdev hard coded to '0' index
  390. */
  391. struct dp_pdev *dp_pdev = soc->pdev_list[mac_id];
  392. /* if invalid peer SG list has max values free the buffers in list
  393. * and treat current buffer as start of list
  394. *
  395. * current logic to detect the last buffer from attn_tlv is not reliable
  396. * in OFDMA UL scenario hence add max buffers check to avoid list pile
  397. * up
  398. */
  399. if (!dp_pdev->first_nbuf ||
  400. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
  401. (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS) {
  402. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  403. dp_pdev->ppdu_id = hal_rx_hw_desc_get_ppduid_get(soc->hal_soc,
  404. rx_tlv_hdr);
  405. dp_pdev->first_nbuf = true;
  406. /* If the new nbuf received is the first msdu of the
  407. * amsdu and there are msdus in the invalid peer msdu
  408. * list, then let us free all the msdus of the invalid
  409. * peer msdu list.
  410. * This scenario can happen when we start receiving
  411. * new a-msdu even before the previous a-msdu is completely
  412. * received.
  413. */
  414. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  415. while (curr_nbuf) {
  416. tmp_nbuf = curr_nbuf->next;
  417. qdf_nbuf_free(curr_nbuf);
  418. curr_nbuf = tmp_nbuf;
  419. }
  420. dp_pdev->invalid_peer_head_msdu = NULL;
  421. dp_pdev->invalid_peer_tail_msdu = NULL;
  422. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
  423. &(dp_pdev->ppdu_info.rx_status));
  424. }
  425. if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
  426. hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  427. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  428. qdf_assert_always(dp_pdev->first_nbuf == true);
  429. dp_pdev->first_nbuf = false;
  430. mpdu_done = true;
  431. }
  432. /*
  433. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  434. * should be NULL here, add the checking for debugging purpose
  435. * in case some corner case.
  436. */
  437. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  438. dp_pdev->invalid_peer_tail_msdu);
  439. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  440. dp_pdev->invalid_peer_tail_msdu,
  441. nbuf);
  442. return mpdu_done;
  443. }
  444. /**
  445. * dp_2k_jump_handle() - Function to handle 2k jump exception
  446. * on WBM ring
  447. *
  448. * @soc: core DP main context
  449. * @nbuf: buffer pointer
  450. * @rx_tlv_hdr: start of rx tlv header
  451. * @peer_id: peer id of first msdu
  452. * @tid: Tid for which exception occurred
  453. *
  454. * This function handles 2k jump violations arising out
  455. * of receiving aggregates in non BA case. This typically
  456. * may happen if aggregates are received on a QOS enabled TID
  457. * while Rx window size is still initialized to value of 2. Or
  458. * it may also happen if negotiated window size is 1 but peer
  459. * sends aggregates.
  460. *
  461. */
  462. void
  463. dp_2k_jump_handle(struct dp_soc *soc,
  464. qdf_nbuf_t nbuf,
  465. uint8_t *rx_tlv_hdr,
  466. uint16_t peer_id,
  467. uint8_t tid)
  468. {
  469. uint32_t ppdu_id;
  470. struct dp_peer *peer = NULL;
  471. struct dp_rx_tid *rx_tid = NULL;
  472. peer = dp_peer_find_by_id(soc, peer_id);
  473. if (!peer || peer->delete_in_progress) {
  474. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  475. "peer not found");
  476. goto free_nbuf;
  477. }
  478. rx_tid = &peer->rx_tid[tid];
  479. if (qdf_unlikely(!rx_tid)) {
  480. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  481. "rx_tid is NULL!!");
  482. goto free_nbuf;
  483. }
  484. qdf_spin_lock_bh(&rx_tid->tid_lock);
  485. ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr);
  486. /*
  487. * If BA session is created and a non-aggregate packet is
  488. * landing here then the issue is with sequence number mismatch.
  489. * Proceed with delba even in that case
  490. */
  491. if (rx_tid->ppdu_id_2k != ppdu_id &&
  492. rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  493. rx_tid->ppdu_id_2k = ppdu_id;
  494. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  495. goto free_nbuf;
  496. }
  497. if (!rx_tid->delba_tx_status) {
  498. rx_tid->delba_tx_retry++;
  499. rx_tid->delba_tx_status = 1;
  500. rx_tid->delba_rcode =
  501. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  502. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  503. if (soc->cdp_soc.ol_ops->send_delba)
  504. soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev,
  505. peer->ctrl_peer,
  506. peer->mac_addr.raw,
  507. tid,
  508. peer->vdev->ctrl_vdev,
  509. rx_tid->delba_rcode);
  510. } else {
  511. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  512. }
  513. free_nbuf:
  514. if (peer)
  515. dp_peer_unref_del_find_by_id(peer);
  516. qdf_nbuf_free(nbuf);
  517. return;
  518. }
  519. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
  520. /**
  521. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  522. * @soc: pointer to dp_soc struct
  523. * @pool_id: Pool id to find dp_pdev
  524. * @rx_tlv_hdr: TLV header of received packet
  525. * @nbuf: SKB
  526. *
  527. * In certain types of packets if peer_id is not correct then
  528. * driver may not be able find. Try finding peer by addr_2 of
  529. * received MPDU. If you find the peer then most likely sw_peer_id &
  530. * ast_idx is corrupted.
  531. *
  532. * Return: True if you find the peer by addr_2 of received MPDU else false
  533. */
  534. static bool
  535. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  536. uint8_t pool_id,
  537. uint8_t *rx_tlv_hdr,
  538. qdf_nbuf_t nbuf)
  539. {
  540. uint8_t local_id;
  541. struct dp_peer *peer = NULL;
  542. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  543. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  544. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  545. /*
  546. * WAR- In certain types of packets if peer_id is not correct then
  547. * driver may not be able find. Try finding peer by addr_2 of
  548. * received MPDU
  549. */
  550. if (wh)
  551. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
  552. wh->i_addr2, &local_id);
  553. if (peer) {
  554. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  555. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  556. QDF_TRACE_LEVEL_DEBUG);
  557. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  558. 1, qdf_nbuf_len(nbuf));
  559. qdf_nbuf_free(nbuf);
  560. return true;
  561. }
  562. return false;
  563. }
  564. /**
  565. * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity
  566. * @soc: DP SOC context
  567. * @pkt_len: computed length of the pkt from caller in bytes
  568. *
  569. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  570. *
  571. */
  572. static inline
  573. bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
  574. {
  575. if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) {
  576. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  577. 1, pkt_len);
  578. return true;
  579. } else {
  580. return false;
  581. }
  582. }
  583. #else
  584. static inline bool
  585. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  586. uint8_t pool_id,
  587. uint8_t *rx_tlv_hdr,
  588. qdf_nbuf_t nbuf)
  589. {
  590. return false;
  591. }
  592. static inline
  593. bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
  594. {
  595. return false;
  596. }
  597. #endif
  598. /**
  599. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  600. * descriptor violation on either a
  601. * REO or WBM ring
  602. *
  603. * @soc: core DP main context
  604. * @nbuf: buffer pointer
  605. * @rx_tlv_hdr: start of rx tlv header
  606. * @pool_id: mac id
  607. * @peer: peer handle
  608. *
  609. * This function handles NULL queue descriptor violations arising out
  610. * a missing REO queue for a given peer or a given TID. This typically
  611. * may happen if a packet is received on a QOS enabled TID before the
  612. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  613. * it may also happen for MC/BC frames if they are not routed to the
  614. * non-QOS TID queue, in the absence of any other default TID queue.
  615. * This error can show up both in a REO destination or WBM release ring.
  616. *
  617. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  618. * if nbuf could not be handled or dropped.
  619. */
  620. static QDF_STATUS
  621. dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  622. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  623. struct dp_peer *peer)
  624. {
  625. uint32_t pkt_len, l2_hdr_offset;
  626. uint16_t msdu_len;
  627. struct dp_vdev *vdev;
  628. uint8_t tid;
  629. qdf_ether_header_t *eh;
  630. qdf_nbuf_set_rx_chfrag_start(nbuf,
  631. hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  632. rx_tlv_hdr));
  633. qdf_nbuf_set_rx_chfrag_end(nbuf,
  634. hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
  635. rx_tlv_hdr));
  636. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  637. rx_tlv_hdr));
  638. qdf_nbuf_set_da_valid(nbuf,
  639. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  640. rx_tlv_hdr));
  641. qdf_nbuf_set_sa_valid(nbuf,
  642. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  643. rx_tlv_hdr));
  644. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  645. rx_tlv_hdr);
  646. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  647. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  648. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  649. if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len))
  650. goto drop_nbuf;
  651. /* Set length in nbuf */
  652. qdf_nbuf_set_pktlen(nbuf,
  653. qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE));
  654. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  655. }
  656. /*
  657. * Check if DMA completed -- msdu_done is the last bit
  658. * to be written
  659. */
  660. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  661. dp_err_rl("MSDU DONE failure");
  662. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  663. QDF_TRACE_LEVEL_INFO);
  664. qdf_assert(0);
  665. }
  666. if (!peer &&
  667. dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
  668. rx_tlv_hdr, nbuf))
  669. return QDF_STATUS_E_FAILURE;
  670. if (!peer) {
  671. bool mpdu_done = false;
  672. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  673. dp_err_rl("peer is NULL");
  674. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  675. qdf_nbuf_len(nbuf));
  676. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  677. /* Trigger invalid peer handler wrapper */
  678. dp_rx_process_invalid_peer_wrapper(soc,
  679. pdev->invalid_peer_head_msdu,
  680. mpdu_done, pool_id);
  681. if (mpdu_done) {
  682. pdev->invalid_peer_head_msdu = NULL;
  683. pdev->invalid_peer_tail_msdu = NULL;
  684. }
  685. return QDF_STATUS_E_FAILURE;
  686. }
  687. vdev = peer->vdev;
  688. if (!vdev) {
  689. dp_err_rl("Null vdev!");
  690. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  691. goto drop_nbuf;
  692. }
  693. /*
  694. * Advance the packet start pointer by total size of
  695. * pre-header TLV's
  696. */
  697. if (qdf_nbuf_is_frag(nbuf))
  698. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  699. else
  700. qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
  701. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  702. /* this is a looped back MCBC pkt, drop it */
  703. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  704. goto drop_nbuf;
  705. }
  706. /*
  707. * In qwrap mode if the received packet matches with any of the vdev
  708. * mac addresses, drop it. Donot receive multicast packets originated
  709. * from any proxysta.
  710. */
  711. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  712. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  713. goto drop_nbuf;
  714. }
  715. if (qdf_unlikely((peer->nawds_enabled == true) &&
  716. hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  717. rx_tlv_hdr))) {
  718. dp_err_rl("free buffer for multicast packet");
  719. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  720. goto drop_nbuf;
  721. }
  722. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
  723. dp_err_rl("mcast Policy Check Drop pkt");
  724. goto drop_nbuf;
  725. }
  726. /* WDS Source Port Learning */
  727. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  728. vdev->wds_enabled))
  729. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
  730. if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
  731. tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
  732. if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
  733. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  734. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  735. }
  736. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  737. qdf_nbuf_set_next(nbuf, NULL);
  738. dp_rx_deliver_raw(vdev, nbuf, peer);
  739. } else {
  740. if (vdev->osif_rx) {
  741. qdf_nbuf_set_next(nbuf, NULL);
  742. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  743. qdf_nbuf_len(nbuf));
  744. /*
  745. * Update the protocol tag in SKB based on
  746. * CCE metadata
  747. */
  748. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  749. EXCEPTION_DEST_RING_ID,
  750. true, true);
  751. /* Update the flow tag in SKB based on FSE metadata */
  752. dp_rx_update_flow_tag(soc, vdev, nbuf,
  753. rx_tlv_hdr, true);
  754. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
  755. soc->hal_soc, rx_tlv_hdr) &&
  756. (vdev->rx_decap_type ==
  757. htt_cmn_pkt_type_ethernet))) {
  758. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  759. DP_STATS_INC_PKT(peer, rx.multicast, 1,
  760. qdf_nbuf_len(nbuf));
  761. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  762. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  763. qdf_nbuf_len(nbuf));
  764. }
  765. }
  766. vdev->osif_rx(vdev->osif_vdev, nbuf);
  767. } else {
  768. dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
  769. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  770. goto drop_nbuf;
  771. }
  772. }
  773. return QDF_STATUS_SUCCESS;
  774. drop_nbuf:
  775. qdf_nbuf_free(nbuf);
  776. return QDF_STATUS_E_FAILURE;
  777. }
  778. /**
  779. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  780. * frames to OS or wifi parse errors.
  781. * @soc: core DP main context
  782. * @nbuf: buffer pointer
  783. * @rx_tlv_hdr: start of rx tlv header
  784. * @peer: peer reference
  785. * @err_code: rxdma err code
  786. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  787. * pool_id has same mapping)
  788. *
  789. * Return: None
  790. */
  791. void
  792. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  793. uint8_t *rx_tlv_hdr, struct dp_peer *peer,
  794. uint8_t err_code, uint8_t mac_id)
  795. {
  796. uint32_t pkt_len, l2_hdr_offset;
  797. uint16_t msdu_len;
  798. struct dp_vdev *vdev;
  799. qdf_ether_header_t *eh;
  800. bool is_broadcast;
  801. /*
  802. * Check if DMA completed -- msdu_done is the last bit
  803. * to be written
  804. */
  805. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  806. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  807. FL("MSDU DONE failure"));
  808. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  809. QDF_TRACE_LEVEL_INFO);
  810. qdf_assert(0);
  811. }
  812. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  813. rx_tlv_hdr);
  814. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  815. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  816. /* Set length in nbuf */
  817. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  818. qdf_nbuf_set_next(nbuf, NULL);
  819. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  820. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  821. if (!peer) {
  822. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
  823. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  824. qdf_nbuf_len(nbuf));
  825. /* Trigger invalid peer handler wrapper */
  826. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  827. return;
  828. }
  829. vdev = peer->vdev;
  830. if (!vdev) {
  831. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  832. FL("INVALID vdev %pK OR osif_rx"), vdev);
  833. /* Drop & free packet */
  834. qdf_nbuf_free(nbuf);
  835. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  836. return;
  837. }
  838. /*
  839. * Advance the packet start pointer by total size of
  840. * pre-header TLV's
  841. */
  842. qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN);
  843. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  844. uint8_t *pkt_type;
  845. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  846. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  847. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  848. htons(QDF_LLC_STP)) {
  849. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  850. goto process_mesh;
  851. } else {
  852. goto process_rx;
  853. }
  854. }
  855. }
  856. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  857. goto process_mesh;
  858. /*
  859. * WAPI cert AP sends rekey frames as unencrypted.
  860. * Thus RXDMA will report unencrypted frame error.
  861. * To pass WAPI cert case, SW needs to pass unencrypted
  862. * rekey frame to stack.
  863. */
  864. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  865. goto process_rx;
  866. }
  867. /*
  868. * In dynamic WEP case rekey frames are not encrypted
  869. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  870. * key install is already done
  871. */
  872. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  873. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  874. goto process_rx;
  875. process_mesh:
  876. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  877. qdf_nbuf_free(nbuf);
  878. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  879. return;
  880. }
  881. if (vdev->mesh_vdev) {
  882. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  883. == QDF_STATUS_SUCCESS) {
  884. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  885. FL("mesh pkt filtered"));
  886. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  887. qdf_nbuf_free(nbuf);
  888. return;
  889. }
  890. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  891. }
  892. process_rx:
  893. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  894. rx_tlv_hdr) &&
  895. (vdev->rx_decap_type ==
  896. htt_cmn_pkt_type_ethernet))) {
  897. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  898. is_broadcast = (QDF_IS_ADDR_BROADCAST
  899. (eh->ether_dhost)) ? 1 : 0 ;
  900. DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
  901. if (is_broadcast) {
  902. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  903. qdf_nbuf_len(nbuf));
  904. }
  905. }
  906. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  907. dp_rx_deliver_raw(vdev, nbuf, peer);
  908. } else {
  909. /* Update the protocol tag in SKB based on CCE metadata */
  910. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  911. EXCEPTION_DEST_RING_ID, true, true);
  912. /* Update the flow tag in SKB based on FSE metadata */
  913. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  914. DP_STATS_INC(peer, rx.to_stack.num, 1);
  915. vdev->osif_rx(vdev->osif_vdev, nbuf);
  916. }
  917. return;
  918. }
  919. /**
  920. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  921. * @soc: core DP main context
  922. * @nbuf: buffer pointer
  923. * @rx_tlv_hdr: start of rx tlv header
  924. * @peer: peer handle
  925. *
  926. * return: void
  927. */
  928. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  929. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  930. {
  931. struct dp_vdev *vdev = NULL;
  932. struct dp_pdev *pdev = NULL;
  933. struct ol_if_ops *tops = NULL;
  934. uint16_t rx_seq, fragno;
  935. uint8_t is_raw;
  936. unsigned int tid;
  937. QDF_STATUS status;
  938. struct cdp_rx_mic_err_info mic_failure_info;
  939. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  940. rx_tlv_hdr))
  941. return;
  942. if (!peer) {
  943. dp_err_rl("peer not found");
  944. goto fail;
  945. }
  946. vdev = peer->vdev;
  947. if (!vdev) {
  948. dp_err_rl("VDEV not found");
  949. goto fail;
  950. }
  951. pdev = vdev->pdev;
  952. if (!pdev) {
  953. dp_err_rl("PDEV not found");
  954. goto fail;
  955. }
  956. is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
  957. if (is_raw) {
  958. fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
  959. /* Can get only last fragment */
  960. if (fragno) {
  961. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  962. qdf_nbuf_data(nbuf));
  963. rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
  964. qdf_nbuf_data(nbuf));
  965. status = dp_rx_defrag_add_last_frag(soc, peer,
  966. tid, rx_seq, nbuf);
  967. dp_info_rl("Frag pkt seq# %d frag# %d consumed "
  968. "status %d !", rx_seq, fragno, status);
  969. return;
  970. }
  971. }
  972. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  973. &mic_failure_info.da_mac_addr.bytes[0])) {
  974. dp_err_rl("Failed to get da_mac_addr");
  975. goto fail;
  976. }
  977. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  978. &mic_failure_info.ta_mac_addr.bytes[0])) {
  979. dp_err_rl("Failed to get ta_mac_addr");
  980. goto fail;
  981. }
  982. mic_failure_info.key_id = 0;
  983. mic_failure_info.multicast =
  984. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  985. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  986. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  987. mic_failure_info.data = NULL;
  988. mic_failure_info.vdev_id = vdev->vdev_id;
  989. tops = pdev->soc->cdp_soc.ol_ops;
  990. if (tops->rx_mic_error)
  991. tops->rx_mic_error(pdev->ctrl_pdev, &mic_failure_info);
  992. fail:
  993. qdf_nbuf_free(nbuf);
  994. return;
  995. }
  996. uint32_t
  997. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  998. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  999. {
  1000. hal_ring_desc_t ring_desc;
  1001. hal_soc_handle_t hal_soc;
  1002. uint32_t count = 0;
  1003. uint32_t rx_bufs_used = 0;
  1004. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1005. uint8_t mac_id = 0;
  1006. uint8_t buf_type;
  1007. uint8_t error, rbm;
  1008. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1009. struct hal_buf_info hbi;
  1010. struct dp_pdev *dp_pdev;
  1011. struct dp_srng *dp_rxdma_srng;
  1012. struct rx_desc_pool *rx_desc_pool;
  1013. uint32_t cookie = 0;
  1014. void *link_desc_va;
  1015. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1016. uint16_t num_msdus;
  1017. struct dp_rx_desc *rx_desc = NULL;
  1018. /* Debug -- Remove later */
  1019. qdf_assert(soc && hal_ring_hdl);
  1020. hal_soc = soc->hal_soc;
  1021. /* Debug -- Remove later */
  1022. qdf_assert(hal_soc);
  1023. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1024. /* TODO */
  1025. /*
  1026. * Need API to convert from hal_ring pointer to
  1027. * Ring Type / Ring Id combo
  1028. */
  1029. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1030. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1031. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1032. goto done;
  1033. }
  1034. while (qdf_likely(quota-- && (ring_desc =
  1035. hal_srng_dst_get_next(hal_soc,
  1036. hal_ring_hdl)))) {
  1037. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1038. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1039. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  1040. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  1041. /*
  1042. * For REO error ring, expect only MSDU LINK DESC
  1043. */
  1044. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  1045. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1046. /*
  1047. * check for the magic number in the sw cookie
  1048. */
  1049. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  1050. LINK_DESC_ID_START);
  1051. /*
  1052. * Check if the buffer is to be processed on this processor
  1053. */
  1054. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1055. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1056. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1057. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1058. &num_msdus);
  1059. if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
  1060. (msdu_list.rbm[0] !=
  1061. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) {
  1062. /* TODO */
  1063. /* Call appropriate handler */
  1064. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1065. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1066. QDF_TRACE(QDF_MODULE_ID_DP,
  1067. QDF_TRACE_LEVEL_ERROR,
  1068. FL("Invalid RBM %d"),
  1069. msdu_list.rbm[0]);
  1070. }
  1071. /* Return link descriptor through WBM ring (SW2WBM)*/
  1072. dp_rx_link_desc_return(soc, ring_desc,
  1073. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1074. continue;
  1075. }
  1076. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
  1077. msdu_list.sw_cookie[0]);
  1078. qdf_assert_always(rx_desc);
  1079. mac_id = rx_desc->pool_id;
  1080. /* Get the MPDU DESC info */
  1081. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1082. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1083. /*
  1084. * We only handle one msdu per link desc for fragmented
  1085. * case. We drop the msdus and release the link desc
  1086. * back if there are more than one msdu in link desc.
  1087. */
  1088. if (qdf_unlikely(num_msdus > 1)) {
  1089. count = dp_rx_msdus_drop(soc, ring_desc,
  1090. &mpdu_desc_info,
  1091. &mac_id, quota);
  1092. rx_bufs_reaped[mac_id] += count;
  1093. continue;
  1094. }
  1095. count = dp_rx_frag_handle(soc,
  1096. ring_desc, &mpdu_desc_info,
  1097. rx_desc, &mac_id, quota);
  1098. rx_bufs_reaped[mac_id] += count;
  1099. DP_STATS_INC(soc, rx.rx_frags, 1);
  1100. continue;
  1101. }
  1102. if (hal_rx_reo_is_pn_error(ring_desc)) {
  1103. /* TOD0 */
  1104. DP_STATS_INC(soc,
  1105. rx.err.
  1106. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  1107. 1);
  1108. /* increment @pdev level */
  1109. dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1110. if (dp_pdev)
  1111. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1112. count = dp_rx_pn_error_handle(soc,
  1113. ring_desc,
  1114. &mpdu_desc_info, &mac_id,
  1115. quota);
  1116. rx_bufs_reaped[mac_id] += count;
  1117. continue;
  1118. }
  1119. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  1120. /* TOD0 */
  1121. DP_STATS_INC(soc,
  1122. rx.err.
  1123. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  1124. 1);
  1125. /* increment @pdev level */
  1126. dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1127. if (dp_pdev)
  1128. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1129. count = dp_rx_2k_jump_handle(soc,
  1130. ring_desc, &mpdu_desc_info,
  1131. &mac_id, quota);
  1132. rx_bufs_reaped[mac_id] += count;
  1133. continue;
  1134. }
  1135. }
  1136. done:
  1137. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1138. if (soc->rx.flags.defrag_timeout_check) {
  1139. uint32_t now_ms =
  1140. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1141. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1142. dp_rx_defrag_waitlist_flush(soc);
  1143. }
  1144. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1145. if (rx_bufs_reaped[mac_id]) {
  1146. dp_pdev = soc->pdev_list[mac_id];
  1147. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1148. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1149. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1150. rx_desc_pool,
  1151. rx_bufs_reaped[mac_id],
  1152. &dp_pdev->free_list_head,
  1153. &dp_pdev->free_list_tail);
  1154. rx_bufs_used += rx_bufs_reaped[mac_id];
  1155. }
  1156. }
  1157. return rx_bufs_used; /* Assume no scale factor for now */
  1158. }
  1159. uint32_t
  1160. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1161. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1162. {
  1163. hal_ring_desc_t ring_desc;
  1164. hal_soc_handle_t hal_soc;
  1165. struct dp_rx_desc *rx_desc;
  1166. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1167. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1168. uint32_t rx_bufs_used = 0;
  1169. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1170. uint8_t buf_type, rbm;
  1171. uint32_t rx_buf_cookie;
  1172. uint8_t mac_id;
  1173. struct dp_pdev *dp_pdev;
  1174. struct dp_srng *dp_rxdma_srng;
  1175. struct rx_desc_pool *rx_desc_pool;
  1176. uint8_t *rx_tlv_hdr;
  1177. qdf_nbuf_t nbuf_head = NULL;
  1178. qdf_nbuf_t nbuf_tail = NULL;
  1179. qdf_nbuf_t nbuf, next;
  1180. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  1181. uint8_t pool_id;
  1182. uint8_t tid = 0;
  1183. /* Debug -- Remove later */
  1184. qdf_assert(soc && hal_ring_hdl);
  1185. hal_soc = soc->hal_soc;
  1186. /* Debug -- Remove later */
  1187. qdf_assert(hal_soc);
  1188. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1189. /* TODO */
  1190. /*
  1191. * Need API to convert from hal_ring pointer to
  1192. * Ring Type / Ring Id combo
  1193. */
  1194. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1195. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1196. goto done;
  1197. }
  1198. while (qdf_likely(quota-- && (ring_desc =
  1199. hal_srng_dst_get_next(hal_soc,
  1200. hal_ring_hdl)))) {
  1201. /* XXX */
  1202. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  1203. /*
  1204. * For WBM ring, expect only MSDU buffers
  1205. */
  1206. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  1207. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1208. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  1209. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1210. == HAL_RX_WBM_ERR_SRC_REO));
  1211. /*
  1212. * Check if the buffer is to be processed on this processor
  1213. */
  1214. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1215. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  1216. /* TODO */
  1217. /* Call appropriate handler */
  1218. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1219. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1220. FL("Invalid RBM %d"), rbm);
  1221. continue;
  1222. }
  1223. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  1224. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1225. qdf_assert_always(rx_desc);
  1226. if (!dp_rx_desc_check_magic(rx_desc)) {
  1227. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1228. FL("Invalid rx_desc cookie=%d"),
  1229. rx_buf_cookie);
  1230. continue;
  1231. }
  1232. /*
  1233. * this is a unlikely scenario where the host is reaping
  1234. * a descriptor which it already reaped just a while ago
  1235. * but is yet to replenish it back to HW.
  1236. * In this case host will dump the last 128 descriptors
  1237. * including the software descriptor rx_desc and assert.
  1238. */
  1239. if (qdf_unlikely(!rx_desc->in_use)) {
  1240. DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
  1241. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1242. ring_desc, rx_desc);
  1243. }
  1244. nbuf = rx_desc->nbuf;
  1245. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE);
  1246. /*
  1247. * save the wbm desc info in nbuf TLV. We will need this
  1248. * info when we do the actual nbuf processing
  1249. */
  1250. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
  1251. wbm_err_info.pool_id = rx_desc->pool_id;
  1252. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  1253. &wbm_err_info);
  1254. rx_bufs_reaped[rx_desc->pool_id]++;
  1255. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1256. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1257. &tail[rx_desc->pool_id],
  1258. rx_desc);
  1259. }
  1260. done:
  1261. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1262. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1263. if (rx_bufs_reaped[mac_id]) {
  1264. dp_pdev = soc->pdev_list[mac_id];
  1265. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1266. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1267. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1268. rx_desc_pool, rx_bufs_reaped[mac_id],
  1269. &head[mac_id], &tail[mac_id]);
  1270. rx_bufs_used += rx_bufs_reaped[mac_id];
  1271. }
  1272. }
  1273. nbuf = nbuf_head;
  1274. while (nbuf) {
  1275. struct dp_peer *peer;
  1276. uint16_t peer_id;
  1277. uint8_t e_code;
  1278. uint8_t *tlv_hdr;
  1279. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1280. peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1281. rx_tlv_hdr);
  1282. peer = dp_peer_find_by_id(soc, peer_id);
  1283. /*
  1284. * retrieve the wbm desc info from nbuf TLV, so we can
  1285. * handle error cases appropriately
  1286. */
  1287. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  1288. /* Set queue_mapping in nbuf to 0 */
  1289. dp_set_rx_queue(nbuf, 0);
  1290. next = nbuf->next;
  1291. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  1292. if (wbm_err_info.reo_psh_rsn
  1293. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  1294. DP_STATS_INC(soc,
  1295. rx.err.reo_error
  1296. [wbm_err_info.reo_err_code], 1);
  1297. /* increment @pdev level */
  1298. pool_id = wbm_err_info.pool_id;
  1299. dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id);
  1300. if (dp_pdev)
  1301. DP_STATS_INC(dp_pdev, err.reo_error,
  1302. 1);
  1303. switch (wbm_err_info.reo_err_code) {
  1304. /*
  1305. * Handling for packets which have NULL REO
  1306. * queue descriptor
  1307. */
  1308. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1309. pool_id = wbm_err_info.pool_id;
  1310. dp_rx_null_q_desc_handle(soc, nbuf,
  1311. rx_tlv_hdr,
  1312. pool_id, peer);
  1313. nbuf = next;
  1314. if (peer)
  1315. dp_peer_unref_del_find_by_id(
  1316. peer);
  1317. continue;
  1318. /* TODO */
  1319. /* Add per error code accounting */
  1320. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1321. pool_id = wbm_err_info.pool_id;
  1322. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1323. rx_tlv_hdr)) {
  1324. peer_id =
  1325. hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1326. rx_tlv_hdr);
  1327. tid =
  1328. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  1329. }
  1330. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr,
  1331. peer_id, tid);
  1332. nbuf = next;
  1333. if (peer)
  1334. dp_peer_unref_del_find_by_id(
  1335. peer);
  1336. continue;
  1337. default:
  1338. dp_err_rl("Got pkt with REO ERROR: %d",
  1339. wbm_err_info.reo_err_code);
  1340. break;
  1341. }
  1342. }
  1343. } else if (wbm_err_info.wbm_err_src ==
  1344. HAL_RX_WBM_ERR_SRC_RXDMA) {
  1345. if (wbm_err_info.rxdma_psh_rsn
  1346. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1347. DP_STATS_INC(soc,
  1348. rx.err.rxdma_error
  1349. [wbm_err_info.rxdma_err_code], 1);
  1350. /* increment @pdev level */
  1351. pool_id = wbm_err_info.pool_id;
  1352. dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id);
  1353. if (dp_pdev)
  1354. DP_STATS_INC(dp_pdev,
  1355. err.rxdma_error, 1);
  1356. switch (wbm_err_info.rxdma_err_code) {
  1357. case HAL_RXDMA_ERR_UNENCRYPTED:
  1358. case HAL_RXDMA_ERR_WIFI_PARSE:
  1359. pool_id = wbm_err_info.pool_id;
  1360. dp_rx_process_rxdma_err(soc, nbuf,
  1361. rx_tlv_hdr,
  1362. peer,
  1363. wbm_err_info.
  1364. rxdma_err_code,
  1365. pool_id);
  1366. nbuf = next;
  1367. if (peer)
  1368. dp_peer_unref_del_find_by_id(peer);
  1369. continue;
  1370. case HAL_RXDMA_ERR_TKIP_MIC:
  1371. dp_rx_process_mic_error(soc, nbuf,
  1372. rx_tlv_hdr,
  1373. peer);
  1374. nbuf = next;
  1375. if (peer) {
  1376. DP_STATS_INC(peer, rx.err.mic_err, 1);
  1377. dp_peer_unref_del_find_by_id(
  1378. peer);
  1379. }
  1380. continue;
  1381. case HAL_RXDMA_ERR_DECRYPT:
  1382. pool_id = wbm_err_info.pool_id;
  1383. e_code = wbm_err_info.rxdma_err_code;
  1384. tlv_hdr = rx_tlv_hdr;
  1385. if (peer) {
  1386. DP_STATS_INC(peer, rx.err.
  1387. decrypt_err, 1);
  1388. } else {
  1389. dp_rx_process_rxdma_err(soc,
  1390. nbuf,
  1391. tlv_hdr,
  1392. NULL,
  1393. e_code,
  1394. pool_id
  1395. );
  1396. nbuf = next;
  1397. continue;
  1398. }
  1399. QDF_TRACE(QDF_MODULE_ID_DP,
  1400. QDF_TRACE_LEVEL_DEBUG,
  1401. "Packet received with Decrypt error");
  1402. break;
  1403. default:
  1404. dp_err_rl("RXDMA error %d",
  1405. wbm_err_info.rxdma_err_code);
  1406. }
  1407. }
  1408. } else {
  1409. /* Should not come here */
  1410. qdf_assert(0);
  1411. }
  1412. if (peer)
  1413. dp_peer_unref_del_find_by_id(peer);
  1414. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1415. QDF_TRACE_LEVEL_DEBUG);
  1416. qdf_nbuf_free(nbuf);
  1417. nbuf = next;
  1418. }
  1419. return rx_bufs_used; /* Assume no scale factor for now */
  1420. }
  1421. /**
  1422. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  1423. *
  1424. * @soc: core DP main context
  1425. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1426. * @rx_desc: void pointer to rx descriptor
  1427. *
  1428. * Return: void
  1429. */
  1430. static void dup_desc_dbg(struct dp_soc *soc,
  1431. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1432. void *rx_desc)
  1433. {
  1434. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  1435. dp_rx_dump_info_and_assert(
  1436. soc,
  1437. soc->rx_rel_ring.hal_srng,
  1438. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  1439. rx_desc);
  1440. }
  1441. /**
  1442. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  1443. *
  1444. * @soc: core DP main context
  1445. * @mac_id: mac id which is one of 3 mac_ids
  1446. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1447. * @head: head of descs list to be freed
  1448. * @tail: tail of decs list to be freed
  1449. * Return: number of msdu in MPDU to be popped
  1450. */
  1451. static inline uint32_t
  1452. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1453. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1454. union dp_rx_desc_list_elem_t **head,
  1455. union dp_rx_desc_list_elem_t **tail)
  1456. {
  1457. void *rx_msdu_link_desc;
  1458. qdf_nbuf_t msdu;
  1459. qdf_nbuf_t last;
  1460. struct hal_rx_msdu_list msdu_list;
  1461. uint16_t num_msdus;
  1462. struct hal_buf_info buf_info;
  1463. void *p_buf_addr_info;
  1464. void *p_last_buf_addr_info;
  1465. uint32_t rx_bufs_used = 0;
  1466. uint32_t msdu_cnt;
  1467. uint32_t i;
  1468. uint8_t push_reason;
  1469. uint8_t rxdma_error_code = 0;
  1470. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  1471. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1472. hal_rxdma_desc_t ring_desc;
  1473. msdu = 0;
  1474. last = NULL;
  1475. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1476. &p_last_buf_addr_info, &msdu_cnt);
  1477. push_reason =
  1478. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  1479. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1480. rxdma_error_code =
  1481. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  1482. }
  1483. do {
  1484. rx_msdu_link_desc =
  1485. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1486. qdf_assert(rx_msdu_link_desc);
  1487. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1488. &msdu_list, &num_msdus);
  1489. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1490. /* if the msdus belongs to NSS offloaded radio &&
  1491. * the rbm is not SW1_BM then return the msdu_link
  1492. * descriptor without freeing the msdus (nbufs). let
  1493. * these buffers be given to NSS completion ring for
  1494. * NSS to free them.
  1495. * else iterate through the msdu link desc list and
  1496. * free each msdu in the list.
  1497. */
  1498. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  1499. wlan_cfg_get_dp_pdev_nss_enabled(
  1500. pdev->wlan_cfg_ctx))
  1501. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  1502. else {
  1503. for (i = 0; i < num_msdus; i++) {
  1504. struct dp_rx_desc *rx_desc =
  1505. dp_rx_cookie_2_va_rxdma_buf(soc,
  1506. msdu_list.sw_cookie[i]);
  1507. qdf_assert_always(rx_desc);
  1508. msdu = rx_desc->nbuf;
  1509. /*
  1510. * this is a unlikely scenario
  1511. * where the host is reaping
  1512. * a descriptor which
  1513. * it already reaped just a while ago
  1514. * but is yet to replenish
  1515. * it back to HW.
  1516. * In this case host will dump
  1517. * the last 128 descriptors
  1518. * including the software descriptor
  1519. * rx_desc and assert.
  1520. */
  1521. ring_desc = rxdma_dst_ring_desc;
  1522. if (qdf_unlikely(!rx_desc->in_use)) {
  1523. dup_desc_dbg(soc,
  1524. ring_desc,
  1525. rx_desc);
  1526. continue;
  1527. }
  1528. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1529. QDF_DMA_FROM_DEVICE);
  1530. QDF_TRACE(QDF_MODULE_ID_DP,
  1531. QDF_TRACE_LEVEL_DEBUG,
  1532. "[%s][%d] msdu_nbuf=%pK ",
  1533. __func__, __LINE__, msdu);
  1534. qdf_nbuf_free(msdu);
  1535. rx_bufs_used++;
  1536. dp_rx_add_to_free_desc_list(head,
  1537. tail, rx_desc);
  1538. }
  1539. }
  1540. } else {
  1541. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  1542. }
  1543. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  1544. &p_buf_addr_info);
  1545. dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action);
  1546. p_last_buf_addr_info = p_buf_addr_info;
  1547. } while (buf_info.paddr);
  1548. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  1549. if (pdev)
  1550. DP_STATS_INC(pdev, err.rxdma_error, 1);
  1551. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  1552. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1553. "Packet received with Decrypt error");
  1554. }
  1555. return rx_bufs_used;
  1556. }
  1557. uint32_t
  1558. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1559. uint32_t mac_id, uint32_t quota)
  1560. {
  1561. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1562. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  1563. hal_rxdma_desc_t rxdma_dst_ring_desc;
  1564. hal_soc_handle_t hal_soc;
  1565. void *err_dst_srng;
  1566. union dp_rx_desc_list_elem_t *head = NULL;
  1567. union dp_rx_desc_list_elem_t *tail = NULL;
  1568. struct dp_srng *dp_rxdma_srng;
  1569. struct rx_desc_pool *rx_desc_pool;
  1570. uint32_t work_done = 0;
  1571. uint32_t rx_bufs_used = 0;
  1572. if (!pdev)
  1573. return 0;
  1574. err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
  1575. if (!err_dst_srng) {
  1576. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1577. "%s %d : HAL Monitor Destination Ring Init \
  1578. Failed -- %pK",
  1579. __func__, __LINE__, err_dst_srng);
  1580. return 0;
  1581. }
  1582. hal_soc = soc->hal_soc;
  1583. qdf_assert(hal_soc);
  1584. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  1585. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1586. "%s %d : HAL Monitor Destination Ring Init \
  1587. Failed -- %pK",
  1588. __func__, __LINE__, err_dst_srng);
  1589. return 0;
  1590. }
  1591. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  1592. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  1593. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  1594. rxdma_dst_ring_desc,
  1595. &head, &tail);
  1596. }
  1597. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  1598. if (rx_bufs_used) {
  1599. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1600. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1601. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1602. rx_desc_pool, rx_bufs_used, &head, &tail);
  1603. work_done += rx_bufs_used;
  1604. }
  1605. return work_done;
  1606. }
  1607. static inline uint32_t
  1608. dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1609. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1610. union dp_rx_desc_list_elem_t **head,
  1611. union dp_rx_desc_list_elem_t **tail)
  1612. {
  1613. void *rx_msdu_link_desc;
  1614. qdf_nbuf_t msdu;
  1615. qdf_nbuf_t last;
  1616. struct hal_rx_msdu_list msdu_list;
  1617. uint16_t num_msdus;
  1618. struct hal_buf_info buf_info;
  1619. void *p_buf_addr_info;
  1620. void *p_last_buf_addr_info;
  1621. uint32_t rx_bufs_used = 0;
  1622. uint32_t msdu_cnt;
  1623. uint32_t i;
  1624. msdu = 0;
  1625. last = NULL;
  1626. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1627. &p_last_buf_addr_info, &msdu_cnt);
  1628. do {
  1629. rx_msdu_link_desc =
  1630. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1631. if (!rx_msdu_link_desc) {
  1632. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
  1633. break;
  1634. }
  1635. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1636. &msdu_list, &num_msdus);
  1637. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1638. for (i = 0; i < num_msdus; i++) {
  1639. struct dp_rx_desc *rx_desc =
  1640. dp_rx_cookie_2_va_rxdma_buf(
  1641. soc,
  1642. msdu_list.sw_cookie[i]);
  1643. qdf_assert_always(rx_desc);
  1644. msdu = rx_desc->nbuf;
  1645. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1646. QDF_DMA_FROM_DEVICE);
  1647. qdf_nbuf_free(msdu);
  1648. rx_bufs_used++;
  1649. dp_rx_add_to_free_desc_list(head,
  1650. tail, rx_desc);
  1651. }
  1652. }
  1653. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  1654. &p_buf_addr_info);
  1655. dp_rx_link_desc_return(soc, p_last_buf_addr_info,
  1656. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1657. p_last_buf_addr_info = p_buf_addr_info;
  1658. } while (buf_info.paddr);
  1659. return rx_bufs_used;
  1660. }
  1661. /*
  1662. *
  1663. * dp_handle_wbm_internal_error() - handles wbm_internal_error case
  1664. *
  1665. * @soc: core DP main context
  1666. * @hal_desc: hal descriptor
  1667. * @buf_type: indicates if the buffer is of type link disc or msdu
  1668. * Return: None
  1669. *
  1670. * wbm_internal_error is seen in following scenarios :
  1671. *
  1672. * 1. Null pointers detected in WBM_RELEASE_RING descriptors
  1673. * 2. Null pointers detected during delinking process
  1674. *
  1675. * Some null pointer cases:
  1676. *
  1677. * a. MSDU buffer pointer is NULL
  1678. * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
  1679. * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
  1680. */
  1681. void
  1682. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  1683. uint32_t buf_type)
  1684. {
  1685. struct hal_buf_info buf_info = {0};
  1686. struct dp_pdev *dp_pdev;
  1687. struct dp_rx_desc *rx_desc = NULL;
  1688. uint32_t rx_buf_cookie;
  1689. uint32_t rx_bufs_reaped = 0;
  1690. union dp_rx_desc_list_elem_t *head = NULL;
  1691. union dp_rx_desc_list_elem_t *tail = NULL;
  1692. uint8_t pool_id;
  1693. hal_rx_reo_buf_paddr_get(hal_desc, &buf_info);
  1694. if (!buf_info.paddr) {
  1695. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
  1696. return;
  1697. }
  1698. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc);
  1699. pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie);
  1700. if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
  1701. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
  1702. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1703. if (rx_desc && rx_desc->nbuf) {
  1704. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  1705. QDF_DMA_FROM_DEVICE);
  1706. rx_desc->unmapped = 1;
  1707. qdf_nbuf_free(rx_desc->nbuf);
  1708. dp_rx_add_to_free_desc_list(&head,
  1709. &tail,
  1710. rx_desc);
  1711. rx_bufs_reaped++;
  1712. }
  1713. } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
  1714. rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id,
  1715. hal_desc,
  1716. &head, &tail);
  1717. }
  1718. if (rx_bufs_reaped) {
  1719. struct rx_desc_pool *rx_desc_pool;
  1720. struct dp_srng *dp_rxdma_srng;
  1721. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
  1722. dp_pdev = soc->pdev_list[pool_id];
  1723. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1724. rx_desc_pool = &soc->rx_desc_buf[pool_id];
  1725. dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng,
  1726. rx_desc_pool,
  1727. rx_bufs_reaped,
  1728. &head, &tail);
  1729. }
  1730. }