dp_rx_err.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "dp_internal.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #include "dp_rx_defrag.h"
  27. #ifdef FEATURE_WDS
  28. #include "dp_txrx_wds.h"
  29. #endif
  30. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  31. #include "qdf_net_types.h"
  32. /* Max buffer in invalid peer SG list*/
  33. #define DP_MAX_INVALID_BUFFERS 10
  34. /**
  35. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  36. * back on same vap or a different vap.
  37. *
  38. * @soc: core DP main context
  39. * @peer: dp peer handler
  40. * @rx_tlv_hdr: start of the rx TLV header
  41. * @nbuf: pkt buffer
  42. *
  43. * Return: bool (true if it is a looped back pkt else false)
  44. *
  45. */
  46. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  47. struct dp_peer *peer,
  48. uint8_t *rx_tlv_hdr,
  49. qdf_nbuf_t nbuf)
  50. {
  51. struct dp_vdev *vdev = peer->vdev;
  52. struct dp_ast_entry *ase = NULL;
  53. uint16_t sa_idx = 0;
  54. uint8_t *data;
  55. /*
  56. * Multicast Echo Check is required only if vdev is STA and
  57. * received pkt is a multicast/broadcast pkt. otherwise
  58. * skip the MEC check.
  59. */
  60. if (vdev->opmode != wlan_op_mode_sta)
  61. return false;
  62. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  63. return false;
  64. data = qdf_nbuf_data(nbuf);
  65. /*
  66. * if the received pkts src mac addr matches with vdev
  67. * mac address then drop the pkt as it is looped back
  68. */
  69. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  70. vdev->mac_addr.raw,
  71. QDF_MAC_ADDR_SIZE)))
  72. return true;
  73. /*
  74. * In case of qwrap isolation mode, donot drop loopback packets.
  75. * In isolation mode, all packets from the wired stations need to go
  76. * to rootap and loop back to reach the wireless stations and
  77. * vice-versa.
  78. */
  79. if (qdf_unlikely(vdev->isolation_vdev))
  80. return false;
  81. /* if the received pkts src mac addr matches with the
  82. * wired PCs MAC addr which is behind the STA or with
  83. * wireless STAs MAC addr which are behind the Repeater,
  84. * then drop the pkt as it is looped back
  85. */
  86. qdf_spin_lock_bh(&soc->ast_lock);
  87. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  88. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  89. if ((sa_idx < 0) ||
  90. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  91. qdf_spin_unlock_bh(&soc->ast_lock);
  92. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  93. "invalid sa_idx: %d", sa_idx);
  94. qdf_assert_always(0);
  95. }
  96. ase = soc->ast_table[sa_idx];
  97. if (!ase) {
  98. /* We do not get a peer map event for STA and without
  99. * this event we don't know what is STA's sa_idx.
  100. * For this reason the AST is still not associated to
  101. * any index postion in ast_table.
  102. * In these kind of scenarios where sa is valid but
  103. * ast is not in ast_table, we use the below API to get
  104. * AST entry for STA's own mac_address.
  105. */
  106. ase = dp_peer_ast_list_find(soc, peer,
  107. &data[QDF_MAC_ADDR_SIZE]);
  108. if (ase) {
  109. ase->ast_idx = sa_idx;
  110. soc->ast_table[sa_idx] = ase;
  111. ase->is_mapped = TRUE;
  112. }
  113. }
  114. } else {
  115. ase = dp_peer_ast_hash_find_by_pdevid(soc,
  116. &data[QDF_MAC_ADDR_SIZE],
  117. vdev->pdev->pdev_id);
  118. }
  119. if (ase) {
  120. if (ase->pdev_id != vdev->pdev->pdev_id) {
  121. qdf_spin_unlock_bh(&soc->ast_lock);
  122. QDF_TRACE(QDF_MODULE_ID_DP,
  123. QDF_TRACE_LEVEL_INFO,
  124. "Detected DBDC Root AP %pM, %d %d",
  125. &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
  126. ase->pdev_id);
  127. return false;
  128. }
  129. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  130. (ase->peer != peer)) {
  131. qdf_spin_unlock_bh(&soc->ast_lock);
  132. QDF_TRACE(QDF_MODULE_ID_DP,
  133. QDF_TRACE_LEVEL_INFO,
  134. "received pkt with same src mac %pM",
  135. &data[QDF_MAC_ADDR_SIZE]);
  136. return true;
  137. }
  138. }
  139. qdf_spin_unlock_bh(&soc->ast_lock);
  140. return false;
  141. }
  142. /**
  143. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  144. * (WBM) by address
  145. *
  146. * @soc: core DP main context
  147. * @link_desc_addr: link descriptor addr
  148. *
  149. * Return: QDF_STATUS
  150. */
  151. QDF_STATUS
  152. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  153. hal_buff_addrinfo_t link_desc_addr,
  154. uint8_t bm_action)
  155. {
  156. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  157. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  158. hal_soc_handle_t hal_soc = soc->hal_soc;
  159. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  160. void *src_srng_desc;
  161. if (!wbm_rel_srng) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. "WBM RELEASE RING not initialized");
  164. return status;
  165. }
  166. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  167. /* TODO */
  168. /*
  169. * Need API to convert from hal_ring pointer to
  170. * Ring Type / Ring Id combo
  171. */
  172. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  173. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  174. wbm_rel_srng);
  175. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  176. goto done;
  177. }
  178. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  179. if (qdf_likely(src_srng_desc)) {
  180. /* Return link descriptor through WBM ring (SW2WBM)*/
  181. hal_rx_msdu_link_desc_set(hal_soc,
  182. src_srng_desc, link_desc_addr, bm_action);
  183. status = QDF_STATUS_SUCCESS;
  184. } else {
  185. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  186. DP_STATS_INC(soc, rx.err.hal_ring_access_full_fail, 1);
  187. dp_info_rl("WBM Release Ring (Id %d) Full(Fail CNT %u)",
  188. srng->ring_id,
  189. soc->stats.rx.err.hal_ring_access_full_fail);
  190. dp_info_rl("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  191. *srng->u.src_ring.hp_addr,
  192. srng->u.src_ring.reap_hp,
  193. *srng->u.src_ring.tp_addr,
  194. srng->u.src_ring.cached_tp);
  195. QDF_BUG(0);
  196. }
  197. done:
  198. hal_srng_access_end(hal_soc, wbm_rel_srng);
  199. return status;
  200. }
  201. /**
  202. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  203. * (WBM), following error handling
  204. *
  205. * @soc: core DP main context
  206. * @ring_desc: opaque pointer to the REO error ring descriptor
  207. *
  208. * Return: QDF_STATUS
  209. */
  210. QDF_STATUS
  211. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  212. uint8_t bm_action)
  213. {
  214. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  215. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  216. }
  217. /**
  218. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  219. *
  220. * @soc: core txrx main context
  221. * @ring_desc: opaque pointer to the REO error ring descriptor
  222. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  223. * @head: head of the local descriptor free-list
  224. * @tail: tail of the local descriptor free-list
  225. * @quota: No. of units (packets) that can be serviced in one shot.
  226. *
  227. * This function is used to drop all MSDU in an MPDU
  228. *
  229. * Return: uint32_t: No. of elements processed
  230. */
  231. static uint32_t
  232. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  233. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  234. uint8_t *mac_id,
  235. uint32_t quota)
  236. {
  237. uint32_t rx_bufs_used = 0;
  238. void *link_desc_va;
  239. struct hal_buf_info buf_info;
  240. struct dp_pdev *pdev;
  241. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  242. int i;
  243. uint8_t *rx_tlv_hdr;
  244. uint32_t tid;
  245. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  246. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  247. /* No UNMAP required -- this is "malloc_consistent" memory */
  248. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  249. &mpdu_desc_info->msdu_count);
  250. for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
  251. struct dp_rx_desc *rx_desc =
  252. dp_rx_cookie_2_va_rxdma_buf(soc,
  253. msdu_list.sw_cookie[i]);
  254. qdf_assert_always(rx_desc);
  255. /* all buffers from a MSDU link link belong to same pdev */
  256. *mac_id = rx_desc->pool_id;
  257. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  258. if (!pdev) {
  259. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  260. "pdev is null for pool_id = %d",
  261. rx_desc->pool_id);
  262. return rx_bufs_used;
  263. }
  264. if (!dp_rx_desc_check_magic(rx_desc)) {
  265. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  266. FL("Invalid rx_desc cookie=%d"),
  267. msdu_list.sw_cookie[i]);
  268. return rx_bufs_used;
  269. }
  270. qdf_nbuf_unmap_single(soc->osdev,
  271. rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
  272. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  273. rx_bufs_used++;
  274. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  275. rx_desc->rx_buf_start);
  276. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  277. "Packet received with PN error for tid :%d", tid);
  278. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  279. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  280. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  281. /* Just free the buffers */
  282. qdf_nbuf_free(rx_desc->nbuf);
  283. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  284. &pdev->free_list_tail, rx_desc);
  285. }
  286. /* Return link descriptor through WBM ring (SW2WBM)*/
  287. dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  288. return rx_bufs_used;
  289. }
  290. /**
  291. * dp_rx_pn_error_handle() - Handles PN check errors
  292. *
  293. * @soc: core txrx main context
  294. * @ring_desc: opaque pointer to the REO error ring descriptor
  295. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  296. * @head: head of the local descriptor free-list
  297. * @tail: tail of the local descriptor free-list
  298. * @quota: No. of units (packets) that can be serviced in one shot.
  299. *
  300. * This function implements PN error handling
  301. * If the peer is configured to ignore the PN check errors
  302. * or if DP feels, that this frame is still OK, the frame can be
  303. * re-injected back to REO to use some of the other features
  304. * of REO e.g. duplicate detection/routing to other cores
  305. *
  306. * Return: uint32_t: No. of elements processed
  307. */
  308. static uint32_t
  309. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  310. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  311. uint8_t *mac_id,
  312. uint32_t quota)
  313. {
  314. uint16_t peer_id;
  315. uint32_t rx_bufs_used = 0;
  316. struct dp_peer *peer;
  317. bool peer_pn_policy = false;
  318. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  319. mpdu_desc_info->peer_meta_data);
  320. peer = dp_peer_find_by_id(soc, peer_id);
  321. if (qdf_likely(peer)) {
  322. /*
  323. * TODO: Check for peer specific policies & set peer_pn_policy
  324. */
  325. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  326. "discard rx due to PN error for peer %pK %pM",
  327. peer, peer->mac_addr.raw);
  328. dp_peer_unref_del_find_by_id(peer);
  329. }
  330. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  331. "Packet received with PN error");
  332. /* No peer PN policy -- definitely drop */
  333. if (!peer_pn_policy)
  334. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  335. mpdu_desc_info,
  336. mac_id, quota);
  337. return rx_bufs_used;
  338. }
  339. /**
  340. * dp_rx_oor_handle() - Handles the msdu which is OOR error
  341. *
  342. * @soc: core txrx main context
  343. * @nbuf: pointer to msdu skb
  344. * @peer_id: dp peer ID
  345. * @rx_tlv_hdr: start of rx tlv header
  346. *
  347. * This function process the msdu delivered from REO2TCL
  348. * ring with error type OOR
  349. *
  350. * Return: None
  351. */
  352. static void
  353. dp_rx_oor_handle(struct dp_soc *soc,
  354. qdf_nbuf_t nbuf,
  355. uint16_t peer_id,
  356. uint8_t *rx_tlv_hdr)
  357. {
  358. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  359. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  360. struct dp_peer *peer = NULL;
  361. peer = dp_peer_find_by_id(soc, peer_id);
  362. if (!peer) {
  363. dp_info_rl("peer not found");
  364. goto free_nbuf;
  365. }
  366. if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
  367. rx_tlv_hdr)) {
  368. DP_STATS_INC(soc, rx.err.reo_err_oor_to_stack, 1);
  369. dp_peer_unref_del_find_by_id(peer);
  370. return;
  371. }
  372. free_nbuf:
  373. if (peer)
  374. dp_peer_unref_del_find_by_id(peer);
  375. DP_STATS_INC(soc, rx.err.reo_err_oor_drop, 1);
  376. qdf_nbuf_free(nbuf);
  377. }
  378. /**
  379. * dp_rx_reo_err_entry_process() - Handles for REO error entry processing
  380. *
  381. * @soc: core txrx main context
  382. * @ring_desc: opaque pointer to the REO error ring descriptor
  383. * @mpdu_desc_info: pointer to mpdu level description info
  384. * @link_desc_va: pointer to msdu_link_desc virtual address
  385. * @err_code: reo erro code fetched from ring entry
  386. *
  387. * Function to handle msdus fetched from msdu link desc, currently
  388. * only support 2K jump, OOR error.
  389. *
  390. * Return: msdu count processed.
  391. */
  392. static uint32_t
  393. dp_rx_reo_err_entry_process(struct dp_soc *soc,
  394. void *ring_desc,
  395. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  396. void *link_desc_va,
  397. enum hal_reo_error_code err_code)
  398. {
  399. uint32_t rx_bufs_used = 0;
  400. struct dp_pdev *pdev;
  401. int i;
  402. uint8_t *rx_tlv_hdr_first;
  403. uint8_t *rx_tlv_hdr_last;
  404. uint32_t tid = DP_MAX_TIDS;
  405. uint16_t peer_id;
  406. struct dp_rx_desc *rx_desc;
  407. qdf_nbuf_t nbuf;
  408. struct hal_buf_info buf_info;
  409. struct hal_rx_msdu_list msdu_list;
  410. uint16_t num_msdus;
  411. struct buffer_addr_info cur_link_desc_addr_info = { 0 };
  412. struct buffer_addr_info next_link_desc_addr_info = { 0 };
  413. /* First field in REO Dst ring Desc is buffer_addr_info */
  414. void *buf_addr_info = ring_desc;
  415. qdf_nbuf_t head_nbuf = NULL;
  416. qdf_nbuf_t tail_nbuf = NULL;
  417. uint16_t msdu_processed = 0;
  418. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  419. mpdu_desc_info->peer_meta_data);
  420. more_msdu_link_desc:
  421. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  422. &num_msdus);
  423. for (i = 0; i < num_msdus; i++) {
  424. rx_desc = dp_rx_cookie_2_va_rxdma_buf(
  425. soc,
  426. msdu_list.sw_cookie[i]);
  427. qdf_assert_always(rx_desc);
  428. /* all buffers from a MSDU link belong to same pdev */
  429. pdev = dp_get_pdev_for_lmac_id(soc, rx_desc->pool_id);
  430. nbuf = rx_desc->nbuf;
  431. qdf_nbuf_unmap_single(soc->osdev,
  432. nbuf, QDF_DMA_FROM_DEVICE);
  433. QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_list.msdu_info[i].msdu_len;
  434. rx_bufs_used++;
  435. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  436. &pdev->free_list_tail, rx_desc);
  437. DP_RX_LIST_APPEND(head_nbuf, tail_nbuf, nbuf);
  438. if (qdf_unlikely(msdu_list.msdu_info[i].msdu_flags &
  439. HAL_MSDU_F_MSDU_CONTINUATION))
  440. continue;
  441. rx_tlv_hdr_first = qdf_nbuf_data(head_nbuf);
  442. rx_tlv_hdr_last = qdf_nbuf_data(tail_nbuf);
  443. if (qdf_unlikely(head_nbuf != tail_nbuf)) {
  444. nbuf = dp_rx_sg_create(head_nbuf);
  445. qdf_nbuf_set_is_frag(nbuf, 1);
  446. DP_STATS_INC(soc, rx.err.reo_err_oor_sg_count, 1);
  447. }
  448. switch (err_code) {
  449. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  450. /*
  451. * only first msdu, mpdu start description tlv valid?
  452. * and use it for following msdu.
  453. */
  454. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  455. rx_tlv_hdr_last))
  456. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  457. rx_tlv_hdr_first);
  458. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr_last,
  459. peer_id, tid);
  460. break;
  461. case HAL_REO_ERR_REGULAR_FRAME_OOR:
  462. dp_rx_oor_handle(soc, nbuf, peer_id, rx_tlv_hdr_last);
  463. break;
  464. default:
  465. dp_err_rl("Non-support error code %d", err_code);
  466. qdf_nbuf_free(nbuf);
  467. }
  468. msdu_processed++;
  469. head_nbuf = NULL;
  470. tail_nbuf = NULL;
  471. }
  472. if (msdu_processed < mpdu_desc_info->msdu_count) {
  473. hal_rx_get_next_msdu_link_desc_buf_addr_info(
  474. link_desc_va,
  475. &next_link_desc_addr_info);
  476. if (hal_rx_is_buf_addr_info_valid(
  477. &next_link_desc_addr_info)) {
  478. dp_rx_link_desc_return_by_addr(
  479. soc,
  480. buf_addr_info,
  481. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  482. hal_rx_buffer_addr_info_get_paddr(
  483. &next_link_desc_addr_info,
  484. &buf_info);
  485. link_desc_va =
  486. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  487. cur_link_desc_addr_info = next_link_desc_addr_info;
  488. buf_addr_info = &cur_link_desc_addr_info;
  489. goto more_msdu_link_desc;
  490. }
  491. }
  492. dp_rx_link_desc_return_by_addr(soc, buf_addr_info,
  493. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  494. QDF_BUG(msdu_processed == mpdu_desc_info->msdu_count);
  495. return rx_bufs_used;
  496. }
  497. #ifdef DP_INVALID_PEER_ASSERT
  498. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
  499. do { \
  500. qdf_assert_always(!(head)); \
  501. qdf_assert_always(!(tail)); \
  502. } while (0)
  503. #else
  504. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
  505. #endif
  506. /**
  507. * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
  508. * to pdev invalid peer list
  509. *
  510. * @soc: core DP main context
  511. * @nbuf: Buffer pointer
  512. * @rx_tlv_hdr: start of rx tlv header
  513. * @mac_id: mac id
  514. *
  515. * Return: bool: true for last msdu of mpdu
  516. */
  517. static bool
  518. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
  519. uint8_t *rx_tlv_hdr, uint8_t mac_id)
  520. {
  521. bool mpdu_done = false;
  522. qdf_nbuf_t curr_nbuf = NULL;
  523. qdf_nbuf_t tmp_nbuf = NULL;
  524. /* TODO: Currently only single radio is supported, hence
  525. * pdev hard coded to '0' index
  526. */
  527. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  528. if (!dp_pdev) {
  529. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  530. "pdev is null for mac_id = %d", mac_id);
  531. return mpdu_done;
  532. }
  533. /* if invalid peer SG list has max values free the buffers in list
  534. * and treat current buffer as start of list
  535. *
  536. * current logic to detect the last buffer from attn_tlv is not reliable
  537. * in OFDMA UL scenario hence add max buffers check to avoid list pile
  538. * up
  539. */
  540. if (!dp_pdev->first_nbuf ||
  541. (dp_pdev->invalid_peer_head_msdu &&
  542. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
  543. (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS)) {
  544. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  545. dp_pdev->ppdu_id = hal_rx_get_ppdu_id(soc->hal_soc,
  546. rx_tlv_hdr);
  547. dp_pdev->first_nbuf = true;
  548. /* If the new nbuf received is the first msdu of the
  549. * amsdu and there are msdus in the invalid peer msdu
  550. * list, then let us free all the msdus of the invalid
  551. * peer msdu list.
  552. * This scenario can happen when we start receiving
  553. * new a-msdu even before the previous a-msdu is completely
  554. * received.
  555. */
  556. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  557. while (curr_nbuf) {
  558. tmp_nbuf = curr_nbuf->next;
  559. qdf_nbuf_free(curr_nbuf);
  560. curr_nbuf = tmp_nbuf;
  561. }
  562. dp_pdev->invalid_peer_head_msdu = NULL;
  563. dp_pdev->invalid_peer_tail_msdu = NULL;
  564. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
  565. &(dp_pdev->ppdu_info.rx_status));
  566. }
  567. if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
  568. hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  569. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  570. qdf_assert_always(dp_pdev->first_nbuf == true);
  571. dp_pdev->first_nbuf = false;
  572. mpdu_done = true;
  573. }
  574. /*
  575. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  576. * should be NULL here, add the checking for debugging purpose
  577. * in case some corner case.
  578. */
  579. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  580. dp_pdev->invalid_peer_tail_msdu);
  581. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  582. dp_pdev->invalid_peer_tail_msdu,
  583. nbuf);
  584. return mpdu_done;
  585. }
  586. static
  587. void dp_rx_wbm_err_handle_bar(struct dp_soc *soc,
  588. struct dp_peer *peer,
  589. qdf_nbuf_t nbuf)
  590. {
  591. uint8_t *rx_tlv_hdr;
  592. unsigned char type, subtype;
  593. uint16_t start_seq_num;
  594. uint32_t tid;
  595. struct ieee80211_frame_bar *bar;
  596. /*
  597. * 1. Is this a BAR frame. If not Discard it.
  598. * 2. If it is, get the peer id, tid, ssn
  599. * 2a Do a tid update
  600. */
  601. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  602. bar = (struct ieee80211_frame_bar *)(rx_tlv_hdr + SIZE_OF_DATA_RX_TLV);
  603. type = bar->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
  604. subtype = bar->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
  605. if (!(type == IEEE80211_FC0_TYPE_CTL &&
  606. subtype == QDF_IEEE80211_FC0_SUBTYPE_BAR)) {
  607. dp_err_rl("Not a BAR frame!");
  608. return;
  609. }
  610. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  611. qdf_assert_always(tid < DP_MAX_TIDS);
  612. start_seq_num = le16toh(bar->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT;
  613. dp_info_rl("tid %u window_size %u start_seq_num %u",
  614. tid, peer->rx_tid[tid].ba_win_size, start_seq_num);
  615. dp_rx_tid_update_wifi3(peer, tid,
  616. peer->rx_tid[tid].ba_win_size,
  617. start_seq_num);
  618. }
  619. /**
  620. * dp_2k_jump_handle() - Function to handle 2k jump exception
  621. * on WBM ring
  622. *
  623. * @soc: core DP main context
  624. * @nbuf: buffer pointer
  625. * @rx_tlv_hdr: start of rx tlv header
  626. * @peer_id: peer id of first msdu
  627. * @tid: Tid for which exception occurred
  628. *
  629. * This function handles 2k jump violations arising out
  630. * of receiving aggregates in non BA case. This typically
  631. * may happen if aggregates are received on a QOS enabled TID
  632. * while Rx window size is still initialized to value of 2. Or
  633. * it may also happen if negotiated window size is 1 but peer
  634. * sends aggregates.
  635. *
  636. */
  637. void
  638. dp_2k_jump_handle(struct dp_soc *soc,
  639. qdf_nbuf_t nbuf,
  640. uint8_t *rx_tlv_hdr,
  641. uint16_t peer_id,
  642. uint8_t tid)
  643. {
  644. struct dp_peer *peer = NULL;
  645. struct dp_rx_tid *rx_tid = NULL;
  646. uint32_t frame_mask = FRAME_MASK_IPV4_ARP;
  647. peer = dp_peer_find_by_id(soc, peer_id);
  648. if (!peer) {
  649. dp_info_rl("peer not found");
  650. goto free_nbuf;
  651. }
  652. if (tid >= DP_MAX_TIDS) {
  653. dp_info_rl("invalid tid");
  654. goto nbuf_deliver;
  655. }
  656. rx_tid = &peer->rx_tid[tid];
  657. qdf_spin_lock_bh(&rx_tid->tid_lock);
  658. /* only if BA session is active, allow send Delba */
  659. if (rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  660. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  661. goto nbuf_deliver;
  662. }
  663. if (!rx_tid->delba_tx_status) {
  664. rx_tid->delba_tx_retry++;
  665. rx_tid->delba_tx_status = 1;
  666. rx_tid->delba_rcode =
  667. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  668. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  669. if (soc->cdp_soc.ol_ops->send_delba) {
  670. DP_STATS_INC(soc, rx.err.rx_2k_jump_delba_sent, 1);
  671. soc->cdp_soc.ol_ops->send_delba(
  672. peer->vdev->pdev->soc->ctrl_psoc,
  673. peer->vdev->vdev_id,
  674. peer->mac_addr.raw,
  675. tid,
  676. rx_tid->delba_rcode);
  677. }
  678. } else {
  679. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  680. }
  681. nbuf_deliver:
  682. if (dp_rx_deliver_special_frame(soc, peer, nbuf, frame_mask,
  683. rx_tlv_hdr)) {
  684. DP_STATS_INC(soc, rx.err.rx_2k_jump_to_stack, 1);
  685. dp_peer_unref_del_find_by_id(peer);
  686. return;
  687. }
  688. free_nbuf:
  689. if (peer)
  690. dp_peer_unref_del_find_by_id(peer);
  691. DP_STATS_INC(soc, rx.err.rx_2k_jump_drop, 1);
  692. qdf_nbuf_free(nbuf);
  693. }
  694. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  695. defined(QCA_WIFI_QCA6750)
  696. /**
  697. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  698. * @soc: pointer to dp_soc struct
  699. * @pool_id: Pool id to find dp_pdev
  700. * @rx_tlv_hdr: TLV header of received packet
  701. * @nbuf: SKB
  702. *
  703. * In certain types of packets if peer_id is not correct then
  704. * driver may not be able find. Try finding peer by addr_2 of
  705. * received MPDU. If you find the peer then most likely sw_peer_id &
  706. * ast_idx is corrupted.
  707. *
  708. * Return: True if you find the peer by addr_2 of received MPDU else false
  709. */
  710. static bool
  711. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  712. uint8_t pool_id,
  713. uint8_t *rx_tlv_hdr,
  714. qdf_nbuf_t nbuf)
  715. {
  716. struct dp_peer *peer = NULL;
  717. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  718. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  719. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  720. if (!pdev) {
  721. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  722. "pdev is null for pool_id = %d", pool_id);
  723. return false;
  724. }
  725. /*
  726. * WAR- In certain types of packets if peer_id is not correct then
  727. * driver may not be able find. Try finding peer by addr_2 of
  728. * received MPDU
  729. */
  730. if (wh)
  731. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
  732. wh->i_addr2);
  733. if (peer) {
  734. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  735. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  736. QDF_TRACE_LEVEL_DEBUG);
  737. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  738. 1, qdf_nbuf_len(nbuf));
  739. qdf_nbuf_free(nbuf);
  740. return true;
  741. }
  742. return false;
  743. }
  744. /**
  745. * dp_rx_check_pkt_len() - Check for pktlen validity
  746. * @soc: DP SOC context
  747. * @pkt_len: computed length of the pkt from caller in bytes
  748. *
  749. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  750. *
  751. */
  752. static inline
  753. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  754. {
  755. if (qdf_unlikely(pkt_len > RX_DATA_BUFFER_SIZE)) {
  756. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  757. 1, pkt_len);
  758. return true;
  759. } else {
  760. return false;
  761. }
  762. }
  763. #else
  764. static inline bool
  765. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  766. uint8_t pool_id,
  767. uint8_t *rx_tlv_hdr,
  768. qdf_nbuf_t nbuf)
  769. {
  770. return false;
  771. }
  772. static inline
  773. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len)
  774. {
  775. return false;
  776. }
  777. #endif
  778. /**
  779. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  780. * descriptor violation on either a
  781. * REO or WBM ring
  782. *
  783. * @soc: core DP main context
  784. * @nbuf: buffer pointer
  785. * @rx_tlv_hdr: start of rx tlv header
  786. * @pool_id: mac id
  787. * @peer: peer handle
  788. *
  789. * This function handles NULL queue descriptor violations arising out
  790. * a missing REO queue for a given peer or a given TID. This typically
  791. * may happen if a packet is received on a QOS enabled TID before the
  792. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  793. * it may also happen for MC/BC frames if they are not routed to the
  794. * non-QOS TID queue, in the absence of any other default TID queue.
  795. * This error can show up both in a REO destination or WBM release ring.
  796. *
  797. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  798. * if nbuf could not be handled or dropped.
  799. */
  800. static QDF_STATUS
  801. dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  802. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  803. struct dp_peer *peer)
  804. {
  805. uint32_t pkt_len;
  806. uint16_t msdu_len;
  807. struct dp_vdev *vdev;
  808. uint8_t tid;
  809. qdf_ether_header_t *eh;
  810. struct hal_rx_msdu_metadata msdu_metadata;
  811. uint16_t sa_idx = 0;
  812. qdf_nbuf_set_rx_chfrag_start(nbuf,
  813. hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  814. rx_tlv_hdr));
  815. qdf_nbuf_set_rx_chfrag_end(nbuf,
  816. hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
  817. rx_tlv_hdr));
  818. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  819. rx_tlv_hdr));
  820. qdf_nbuf_set_da_valid(nbuf,
  821. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  822. rx_tlv_hdr));
  823. qdf_nbuf_set_sa_valid(nbuf,
  824. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  825. rx_tlv_hdr));
  826. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  827. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  828. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + RX_PKT_TLVS_LEN;
  829. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  830. if (dp_rx_check_pkt_len(soc, pkt_len))
  831. goto drop_nbuf;
  832. /* Set length in nbuf */
  833. qdf_nbuf_set_pktlen(
  834. nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
  835. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  836. }
  837. /*
  838. * Check if DMA completed -- msdu_done is the last bit
  839. * to be written
  840. */
  841. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  842. dp_err_rl("MSDU DONE failure");
  843. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  844. QDF_TRACE_LEVEL_INFO);
  845. qdf_assert(0);
  846. }
  847. if (!peer &&
  848. dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
  849. rx_tlv_hdr, nbuf))
  850. return QDF_STATUS_E_FAILURE;
  851. if (!peer) {
  852. bool mpdu_done = false;
  853. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  854. if (!pdev) {
  855. dp_err_rl("pdev is null for pool_id = %d", pool_id);
  856. return QDF_STATUS_E_FAILURE;
  857. }
  858. dp_err_rl("peer is NULL");
  859. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  860. qdf_nbuf_len(nbuf));
  861. /* QCN9000 has the support enabled */
  862. if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
  863. mpdu_done = true;
  864. nbuf->next = NULL;
  865. /* Trigger invalid peer handler wrapper */
  866. dp_rx_process_invalid_peer_wrapper(soc,
  867. nbuf, mpdu_done, pool_id);
  868. } else {
  869. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  870. /* Trigger invalid peer handler wrapper */
  871. dp_rx_process_invalid_peer_wrapper(soc,
  872. pdev->invalid_peer_head_msdu,
  873. mpdu_done, pool_id);
  874. }
  875. if (mpdu_done) {
  876. pdev->invalid_peer_head_msdu = NULL;
  877. pdev->invalid_peer_tail_msdu = NULL;
  878. }
  879. return QDF_STATUS_E_FAILURE;
  880. }
  881. vdev = peer->vdev;
  882. if (!vdev) {
  883. dp_err_rl("Null vdev!");
  884. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  885. goto drop_nbuf;
  886. }
  887. /*
  888. * Advance the packet start pointer by total size of
  889. * pre-header TLV's
  890. */
  891. if (qdf_nbuf_is_frag(nbuf))
  892. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  893. else
  894. qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
  895. RX_PKT_TLVS_LEN));
  896. dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
  897. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  898. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  899. if ((sa_idx < 0) ||
  900. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  901. DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
  902. goto drop_nbuf;
  903. }
  904. }
  905. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  906. /* this is a looped back MCBC pkt, drop it */
  907. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  908. goto drop_nbuf;
  909. }
  910. /*
  911. * In qwrap mode if the received packet matches with any of the vdev
  912. * mac addresses, drop it. Donot receive multicast packets originated
  913. * from any proxysta.
  914. */
  915. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  916. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  917. goto drop_nbuf;
  918. }
  919. if (qdf_unlikely((peer->nawds_enabled == true) &&
  920. hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  921. rx_tlv_hdr))) {
  922. dp_err_rl("free buffer for multicast packet");
  923. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  924. goto drop_nbuf;
  925. }
  926. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
  927. dp_err_rl("mcast Policy Check Drop pkt");
  928. goto drop_nbuf;
  929. }
  930. /* WDS Source Port Learning */
  931. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  932. vdev->wds_enabled))
  933. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf,
  934. msdu_metadata);
  935. if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
  936. tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
  937. if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
  938. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  939. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  940. }
  941. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  942. qdf_nbuf_set_next(nbuf, NULL);
  943. dp_rx_deliver_raw(vdev, nbuf, peer);
  944. } else {
  945. if (vdev->osif_rx) {
  946. qdf_nbuf_set_next(nbuf, NULL);
  947. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  948. qdf_nbuf_len(nbuf));
  949. /*
  950. * Update the protocol tag in SKB based on
  951. * CCE metadata
  952. */
  953. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  954. EXCEPTION_DEST_RING_ID,
  955. true, true);
  956. /* Update the flow tag in SKB based on FSE metadata */
  957. dp_rx_update_flow_tag(soc, vdev, nbuf,
  958. rx_tlv_hdr, true);
  959. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
  960. soc->hal_soc, rx_tlv_hdr) &&
  961. (vdev->rx_decap_type ==
  962. htt_cmn_pkt_type_ethernet))) {
  963. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  964. DP_STATS_INC_PKT(peer, rx.multicast, 1,
  965. qdf_nbuf_len(nbuf));
  966. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  967. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  968. qdf_nbuf_len(nbuf));
  969. }
  970. }
  971. vdev->osif_rx(vdev->osif_vdev, nbuf);
  972. } else {
  973. dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
  974. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  975. goto drop_nbuf;
  976. }
  977. }
  978. return QDF_STATUS_SUCCESS;
  979. drop_nbuf:
  980. qdf_nbuf_free(nbuf);
  981. return QDF_STATUS_E_FAILURE;
  982. }
  983. /**
  984. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  985. * frames to OS or wifi parse errors.
  986. * @soc: core DP main context
  987. * @nbuf: buffer pointer
  988. * @rx_tlv_hdr: start of rx tlv header
  989. * @peer: peer reference
  990. * @err_code: rxdma err code
  991. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  992. * pool_id has same mapping)
  993. *
  994. * Return: None
  995. */
  996. void
  997. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  998. uint8_t *rx_tlv_hdr, struct dp_peer *peer,
  999. uint8_t err_code, uint8_t mac_id)
  1000. {
  1001. uint32_t pkt_len, l2_hdr_offset;
  1002. uint16_t msdu_len;
  1003. struct dp_vdev *vdev;
  1004. qdf_ether_header_t *eh;
  1005. bool is_broadcast;
  1006. /*
  1007. * Check if DMA completed -- msdu_done is the last bit
  1008. * to be written
  1009. */
  1010. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  1011. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1012. FL("MSDU DONE failure"));
  1013. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  1014. QDF_TRACE_LEVEL_INFO);
  1015. qdf_assert(0);
  1016. }
  1017. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  1018. rx_tlv_hdr);
  1019. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  1020. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  1021. if (dp_rx_check_pkt_len(soc, pkt_len)) {
  1022. /* Drop & free packet */
  1023. qdf_nbuf_free(nbuf);
  1024. return;
  1025. }
  1026. /* Set length in nbuf */
  1027. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1028. qdf_nbuf_set_next(nbuf, NULL);
  1029. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  1030. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  1031. if (!peer) {
  1032. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
  1033. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1034. qdf_nbuf_len(nbuf));
  1035. /* Trigger invalid peer handler wrapper */
  1036. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  1037. return;
  1038. }
  1039. vdev = peer->vdev;
  1040. if (!vdev) {
  1041. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1042. FL("INVALID vdev %pK OR osif_rx"), vdev);
  1043. /* Drop & free packet */
  1044. qdf_nbuf_free(nbuf);
  1045. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1046. return;
  1047. }
  1048. /*
  1049. * Advance the packet start pointer by total size of
  1050. * pre-header TLV's
  1051. */
  1052. dp_rx_skip_tlvs(nbuf, l2_hdr_offset);
  1053. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  1054. uint8_t *pkt_type;
  1055. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  1056. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  1057. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  1058. htons(QDF_LLC_STP)) {
  1059. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  1060. goto process_mesh;
  1061. } else {
  1062. goto process_rx;
  1063. }
  1064. }
  1065. }
  1066. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  1067. goto process_mesh;
  1068. /*
  1069. * WAPI cert AP sends rekey frames as unencrypted.
  1070. * Thus RXDMA will report unencrypted frame error.
  1071. * To pass WAPI cert case, SW needs to pass unencrypted
  1072. * rekey frame to stack.
  1073. */
  1074. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  1075. goto process_rx;
  1076. }
  1077. /*
  1078. * In dynamic WEP case rekey frames are not encrypted
  1079. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  1080. * key install is already done
  1081. */
  1082. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  1083. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  1084. goto process_rx;
  1085. process_mesh:
  1086. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  1087. qdf_nbuf_free(nbuf);
  1088. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  1089. return;
  1090. }
  1091. if (vdev->mesh_vdev) {
  1092. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  1093. == QDF_STATUS_SUCCESS) {
  1094. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  1095. FL("mesh pkt filtered"));
  1096. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  1097. qdf_nbuf_free(nbuf);
  1098. return;
  1099. }
  1100. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  1101. }
  1102. process_rx:
  1103. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  1104. rx_tlv_hdr) &&
  1105. (vdev->rx_decap_type ==
  1106. htt_cmn_pkt_type_ethernet))) {
  1107. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1108. is_broadcast = (QDF_IS_ADDR_BROADCAST
  1109. (eh->ether_dhost)) ? 1 : 0 ;
  1110. DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
  1111. if (is_broadcast) {
  1112. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  1113. qdf_nbuf_len(nbuf));
  1114. }
  1115. }
  1116. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  1117. dp_rx_deliver_raw(vdev, nbuf, peer);
  1118. } else {
  1119. /* Update the protocol tag in SKB based on CCE metadata */
  1120. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  1121. EXCEPTION_DEST_RING_ID, true, true);
  1122. /* Update the flow tag in SKB based on FSE metadata */
  1123. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  1124. DP_STATS_INC(peer, rx.to_stack.num, 1);
  1125. dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
  1126. }
  1127. return;
  1128. }
  1129. /**
  1130. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  1131. * @soc: core DP main context
  1132. * @nbuf: buffer pointer
  1133. * @rx_tlv_hdr: start of rx tlv header
  1134. * @peer: peer handle
  1135. *
  1136. * return: void
  1137. */
  1138. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1139. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  1140. {
  1141. struct dp_vdev *vdev = NULL;
  1142. struct dp_pdev *pdev = NULL;
  1143. struct ol_if_ops *tops = NULL;
  1144. uint16_t rx_seq, fragno;
  1145. uint8_t is_raw;
  1146. unsigned int tid;
  1147. QDF_STATUS status;
  1148. struct cdp_rx_mic_err_info mic_failure_info;
  1149. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1150. rx_tlv_hdr))
  1151. return;
  1152. if (!peer) {
  1153. dp_info_rl("peer not found");
  1154. goto fail;
  1155. }
  1156. vdev = peer->vdev;
  1157. if (!vdev) {
  1158. dp_info_rl("VDEV not found");
  1159. goto fail;
  1160. }
  1161. pdev = vdev->pdev;
  1162. if (!pdev) {
  1163. dp_info_rl("PDEV not found");
  1164. goto fail;
  1165. }
  1166. is_raw = HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, qdf_nbuf_data(nbuf));
  1167. if (is_raw) {
  1168. fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
  1169. /* Can get only last fragment */
  1170. if (fragno) {
  1171. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  1172. qdf_nbuf_data(nbuf));
  1173. rx_seq = hal_rx_get_rx_sequence(soc->hal_soc,
  1174. qdf_nbuf_data(nbuf));
  1175. status = dp_rx_defrag_add_last_frag(soc, peer,
  1176. tid, rx_seq, nbuf);
  1177. dp_info_rl("Frag pkt seq# %d frag# %d consumed "
  1178. "status %d !", rx_seq, fragno, status);
  1179. return;
  1180. }
  1181. }
  1182. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  1183. &mic_failure_info.da_mac_addr.bytes[0])) {
  1184. dp_err_rl("Failed to get da_mac_addr");
  1185. goto fail;
  1186. }
  1187. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  1188. &mic_failure_info.ta_mac_addr.bytes[0])) {
  1189. dp_err_rl("Failed to get ta_mac_addr");
  1190. goto fail;
  1191. }
  1192. mic_failure_info.key_id = 0;
  1193. mic_failure_info.multicast =
  1194. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  1195. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  1196. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  1197. mic_failure_info.data = NULL;
  1198. mic_failure_info.vdev_id = vdev->vdev_id;
  1199. tops = pdev->soc->cdp_soc.ol_ops;
  1200. if (tops->rx_mic_error)
  1201. tops->rx_mic_error(soc->ctrl_psoc, pdev->pdev_id,
  1202. &mic_failure_info);
  1203. fail:
  1204. qdf_nbuf_free(nbuf);
  1205. return;
  1206. }
  1207. uint32_t
  1208. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1209. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1210. {
  1211. hal_ring_desc_t ring_desc;
  1212. hal_soc_handle_t hal_soc;
  1213. uint32_t count = 0;
  1214. uint32_t rx_bufs_used = 0;
  1215. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1216. uint8_t mac_id = 0;
  1217. uint8_t buf_type;
  1218. uint8_t error, rbm;
  1219. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1220. struct hal_buf_info hbi;
  1221. struct dp_pdev *dp_pdev;
  1222. struct dp_srng *dp_rxdma_srng;
  1223. struct rx_desc_pool *rx_desc_pool;
  1224. uint32_t cookie = 0;
  1225. void *link_desc_va;
  1226. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1227. uint16_t num_msdus;
  1228. struct dp_rx_desc *rx_desc = NULL;
  1229. /* Debug -- Remove later */
  1230. qdf_assert(soc && hal_ring_hdl);
  1231. hal_soc = soc->hal_soc;
  1232. /* Debug -- Remove later */
  1233. qdf_assert(hal_soc);
  1234. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1235. /* TODO */
  1236. /*
  1237. * Need API to convert from hal_ring pointer to
  1238. * Ring Type / Ring Id combo
  1239. */
  1240. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1241. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1242. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1243. goto done;
  1244. }
  1245. while (qdf_likely(quota-- && (ring_desc =
  1246. hal_srng_dst_get_next(hal_soc,
  1247. hal_ring_hdl)))) {
  1248. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1249. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1250. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  1251. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  1252. /*
  1253. * For REO error ring, expect only MSDU LINK DESC
  1254. */
  1255. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  1256. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1257. /*
  1258. * check for the magic number in the sw cookie
  1259. */
  1260. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  1261. LINK_DESC_ID_START);
  1262. /*
  1263. * Check if the buffer is to be processed on this processor
  1264. */
  1265. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1266. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1267. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1268. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1269. &num_msdus);
  1270. if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
  1271. (msdu_list.rbm[0] !=
  1272. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST) &&
  1273. (msdu_list.rbm[0] != DP_DEFRAG_RBM))) {
  1274. /* TODO */
  1275. /* Call appropriate handler */
  1276. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1277. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1278. QDF_TRACE(QDF_MODULE_ID_DP,
  1279. QDF_TRACE_LEVEL_ERROR,
  1280. FL("Invalid RBM %d"),
  1281. msdu_list.rbm[0]);
  1282. }
  1283. /* Return link descriptor through WBM ring (SW2WBM)*/
  1284. dp_rx_link_desc_return(soc, ring_desc,
  1285. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1286. continue;
  1287. }
  1288. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
  1289. msdu_list.sw_cookie[0]);
  1290. qdf_assert_always(rx_desc);
  1291. mac_id = rx_desc->pool_id;
  1292. /* Get the MPDU DESC info */
  1293. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1294. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1295. /*
  1296. * We only handle one msdu per link desc for fragmented
  1297. * case. We drop the msdus and release the link desc
  1298. * back if there are more than one msdu in link desc.
  1299. */
  1300. if (qdf_unlikely(num_msdus > 1)) {
  1301. count = dp_rx_msdus_drop(soc, ring_desc,
  1302. &mpdu_desc_info,
  1303. &mac_id, quota);
  1304. rx_bufs_reaped[mac_id] += count;
  1305. continue;
  1306. }
  1307. count = dp_rx_frag_handle(soc,
  1308. ring_desc, &mpdu_desc_info,
  1309. rx_desc, &mac_id, quota);
  1310. rx_bufs_reaped[mac_id] += count;
  1311. DP_STATS_INC(soc, rx.rx_frags, 1);
  1312. continue;
  1313. }
  1314. if (hal_rx_reo_is_pn_error(ring_desc)) {
  1315. /* TOD0 */
  1316. DP_STATS_INC(soc,
  1317. rx.err.
  1318. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  1319. 1);
  1320. /* increment @pdev level */
  1321. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1322. if (dp_pdev)
  1323. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1324. count = dp_rx_pn_error_handle(soc,
  1325. ring_desc,
  1326. &mpdu_desc_info, &mac_id,
  1327. quota);
  1328. rx_bufs_reaped[mac_id] += count;
  1329. continue;
  1330. }
  1331. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  1332. /* TOD0 */
  1333. DP_STATS_INC(soc,
  1334. rx.err.
  1335. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  1336. 1);
  1337. /* increment @pdev level */
  1338. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1339. if (dp_pdev)
  1340. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1341. count = dp_rx_reo_err_entry_process(
  1342. soc,
  1343. ring_desc,
  1344. &mpdu_desc_info,
  1345. link_desc_va,
  1346. HAL_REO_ERR_REGULAR_FRAME_2K_JUMP);
  1347. rx_bufs_reaped[mac_id] += count;
  1348. continue;
  1349. }
  1350. if (hal_rx_reo_is_oor_error(ring_desc)) {
  1351. DP_STATS_INC(
  1352. soc,
  1353. rx.err.
  1354. reo_error[HAL_REO_ERR_REGULAR_FRAME_OOR],
  1355. 1);
  1356. /* increment @pdev level */
  1357. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1358. if (dp_pdev)
  1359. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1360. count = dp_rx_reo_err_entry_process(
  1361. soc,
  1362. ring_desc,
  1363. &mpdu_desc_info,
  1364. link_desc_va,
  1365. HAL_REO_ERR_REGULAR_FRAME_OOR);
  1366. rx_bufs_reaped[mac_id] += count;
  1367. continue;
  1368. }
  1369. }
  1370. done:
  1371. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1372. if (soc->rx.flags.defrag_timeout_check) {
  1373. uint32_t now_ms =
  1374. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1375. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1376. dp_rx_defrag_waitlist_flush(soc);
  1377. }
  1378. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1379. if (rx_bufs_reaped[mac_id]) {
  1380. dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1381. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1382. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1383. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1384. rx_desc_pool,
  1385. rx_bufs_reaped[mac_id],
  1386. &dp_pdev->free_list_head,
  1387. &dp_pdev->free_list_tail);
  1388. rx_bufs_used += rx_bufs_reaped[mac_id];
  1389. }
  1390. }
  1391. return rx_bufs_used; /* Assume no scale factor for now */
  1392. }
  1393. #ifdef DROP_RXDMA_DECRYPT_ERR
  1394. /**
  1395. * dp_handle_rxdma_decrypt_err() - Check if decrypt err frames can be handled
  1396. *
  1397. * Return: true if rxdma decrypt err frames are handled and false otheriwse
  1398. */
  1399. static inline bool dp_handle_rxdma_decrypt_err(void)
  1400. {
  1401. return false;
  1402. }
  1403. #else
  1404. static inline bool dp_handle_rxdma_decrypt_err(void)
  1405. {
  1406. return true;
  1407. }
  1408. #endif
  1409. uint32_t
  1410. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1411. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1412. {
  1413. hal_ring_desc_t ring_desc;
  1414. hal_soc_handle_t hal_soc;
  1415. struct dp_rx_desc *rx_desc;
  1416. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1417. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1418. uint32_t rx_bufs_used = 0;
  1419. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1420. uint8_t buf_type, rbm;
  1421. uint32_t rx_buf_cookie;
  1422. uint8_t mac_id;
  1423. struct dp_pdev *dp_pdev;
  1424. struct dp_srng *dp_rxdma_srng;
  1425. struct rx_desc_pool *rx_desc_pool;
  1426. uint8_t *rx_tlv_hdr;
  1427. qdf_nbuf_t nbuf_head = NULL;
  1428. qdf_nbuf_t nbuf_tail = NULL;
  1429. qdf_nbuf_t nbuf, next;
  1430. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  1431. uint8_t pool_id;
  1432. uint8_t tid = 0;
  1433. uint8_t msdu_continuation = 0;
  1434. bool first_msdu_in_sg = false;
  1435. uint32_t msdu_len = 0;
  1436. /* Debug -- Remove later */
  1437. qdf_assert(soc && hal_ring_hdl);
  1438. hal_soc = soc->hal_soc;
  1439. /* Debug -- Remove later */
  1440. qdf_assert(hal_soc);
  1441. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1442. /* TODO */
  1443. /*
  1444. * Need API to convert from hal_ring pointer to
  1445. * Ring Type / Ring Id combo
  1446. */
  1447. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1448. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1449. goto done;
  1450. }
  1451. while (qdf_likely(quota)) {
  1452. ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  1453. if (qdf_unlikely(!ring_desc)) {
  1454. /* Check hw hp in case of SG support */
  1455. if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
  1456. /*
  1457. * Update the cached hp from hw hp
  1458. * This is required for partially created
  1459. * SG packets while quote is still left
  1460. */
  1461. hal_srng_sync_cachedhp(hal_soc, hal_ring_hdl);
  1462. ring_desc = hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  1463. if (!ring_desc) {
  1464. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1465. FL("No Rx Hw Desc for intermediate sg -- %pK"),
  1466. hal_ring_hdl);
  1467. break;
  1468. }
  1469. } else {
  1470. /* Come out of the loop in Non SG support cases */
  1471. break;
  1472. }
  1473. }
  1474. /* XXX */
  1475. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  1476. /*
  1477. * For WBM ring, expect only MSDU buffers
  1478. */
  1479. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  1480. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1481. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  1482. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1483. == HAL_RX_WBM_ERR_SRC_REO));
  1484. /*
  1485. * Check if the buffer is to be processed on this processor
  1486. */
  1487. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1488. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  1489. /* TODO */
  1490. /* Call appropriate handler */
  1491. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1492. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1493. FL("Invalid RBM %d"), rbm);
  1494. continue;
  1495. }
  1496. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  1497. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1498. qdf_assert_always(rx_desc);
  1499. if (!dp_rx_desc_check_magic(rx_desc)) {
  1500. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1501. FL("Invalid rx_desc cookie=%d"),
  1502. rx_buf_cookie);
  1503. continue;
  1504. }
  1505. /*
  1506. * this is a unlikely scenario where the host is reaping
  1507. * a descriptor which it already reaped just a while ago
  1508. * but is yet to replenish it back to HW.
  1509. * In this case host will dump the last 128 descriptors
  1510. * including the software descriptor rx_desc and assert.
  1511. */
  1512. if (qdf_unlikely(!rx_desc->in_use)) {
  1513. DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
  1514. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1515. ring_desc, rx_desc);
  1516. }
  1517. if (qdf_unlikely(soc->wbm_release_desc_rx_sg_support)) {
  1518. /* SG is detected from continuation bit */
  1519. msdu_continuation = hal_rx_wbm_err_msdu_continuation_get(hal_soc,
  1520. ring_desc);
  1521. if (msdu_continuation && !first_msdu_in_sg) {
  1522. /* Update length from first buffer in SG */
  1523. msdu_len = hal_rx_msdu_start_msdu_len_get(
  1524. qdf_nbuf_data(rx_desc->nbuf));
  1525. first_msdu_in_sg = true;
  1526. QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_len;
  1527. }
  1528. if (msdu_continuation) {
  1529. /* MSDU continued packets */
  1530. qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
  1531. QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_len;
  1532. } else {
  1533. /* This is the terminal packet in SG */
  1534. qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
  1535. qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
  1536. QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_len;
  1537. first_msdu_in_sg = false;
  1538. }
  1539. }
  1540. nbuf = rx_desc->nbuf;
  1541. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE);
  1542. /*
  1543. * save the wbm desc info in nbuf TLV. We will need this
  1544. * info when we do the actual nbuf processing
  1545. */
  1546. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
  1547. wbm_err_info.pool_id = rx_desc->pool_id;
  1548. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  1549. &wbm_err_info);
  1550. rx_bufs_reaped[rx_desc->pool_id]++;
  1551. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1552. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1553. &tail[rx_desc->pool_id],
  1554. rx_desc);
  1555. /*
  1556. * if continuation bit is set then we have MSDU spread
  1557. * across multiple buffers, let us not decrement quota
  1558. * till we reap all buffers of that MSDU.
  1559. */
  1560. if (qdf_likely(!msdu_continuation))
  1561. quota -= 1;
  1562. }
  1563. done:
  1564. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1565. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1566. if (rx_bufs_reaped[mac_id]) {
  1567. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1568. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1569. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1570. rx_desc_pool, rx_bufs_reaped[mac_id],
  1571. &head[mac_id], &tail[mac_id]);
  1572. rx_bufs_used += rx_bufs_reaped[mac_id];
  1573. }
  1574. }
  1575. nbuf = nbuf_head;
  1576. while (nbuf) {
  1577. struct dp_peer *peer;
  1578. uint16_t peer_id;
  1579. uint8_t err_code;
  1580. uint8_t *tlv_hdr;
  1581. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1582. /*
  1583. * retrieve the wbm desc info from nbuf TLV, so we can
  1584. * handle error cases appropriately
  1585. */
  1586. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  1587. peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1588. rx_tlv_hdr);
  1589. peer = dp_peer_find_by_id(soc, peer_id);
  1590. if (!peer)
  1591. dp_info_rl("peer is null peer_id%u err_src%u err_rsn%u",
  1592. peer_id, wbm_err_info.wbm_err_src,
  1593. wbm_err_info.reo_psh_rsn);
  1594. /* Set queue_mapping in nbuf to 0 */
  1595. dp_set_rx_queue(nbuf, 0);
  1596. next = nbuf->next;
  1597. /*
  1598. * Form the SG for msdu continued buffers
  1599. * QCN9000 has this support
  1600. */
  1601. if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  1602. nbuf = dp_rx_sg_create(nbuf);
  1603. next = nbuf->next;
  1604. /*
  1605. * SG error handling is not done correctly,
  1606. * drop SG frames for now.
  1607. */
  1608. qdf_nbuf_free(nbuf);
  1609. dp_info_rl("scattered msdu dropped");
  1610. nbuf = next;
  1611. continue;
  1612. }
  1613. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  1614. if (wbm_err_info.reo_psh_rsn
  1615. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  1616. DP_STATS_INC(soc,
  1617. rx.err.reo_error
  1618. [wbm_err_info.reo_err_code], 1);
  1619. /* increment @pdev level */
  1620. pool_id = wbm_err_info.pool_id;
  1621. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1622. if (dp_pdev)
  1623. DP_STATS_INC(dp_pdev, err.reo_error,
  1624. 1);
  1625. switch (wbm_err_info.reo_err_code) {
  1626. /*
  1627. * Handling for packets which have NULL REO
  1628. * queue descriptor
  1629. */
  1630. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1631. pool_id = wbm_err_info.pool_id;
  1632. dp_rx_null_q_desc_handle(soc, nbuf,
  1633. rx_tlv_hdr,
  1634. pool_id, peer);
  1635. nbuf = next;
  1636. if (peer)
  1637. dp_peer_unref_del_find_by_id(
  1638. peer);
  1639. continue;
  1640. /* TODO */
  1641. /* Add per error code accounting */
  1642. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1643. pool_id = wbm_err_info.pool_id;
  1644. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1645. rx_tlv_hdr)) {
  1646. peer_id =
  1647. hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1648. rx_tlv_hdr);
  1649. tid =
  1650. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  1651. }
  1652. QDF_NBUF_CB_RX_PKT_LEN(nbuf) =
  1653. hal_rx_msdu_start_msdu_len_get(
  1654. rx_tlv_hdr);
  1655. nbuf->next = NULL;
  1656. dp_2k_jump_handle(soc, nbuf,
  1657. rx_tlv_hdr,
  1658. peer_id, tid);
  1659. nbuf = next;
  1660. if (peer)
  1661. dp_peer_unref_del_find_by_id(
  1662. peer);
  1663. continue;
  1664. case HAL_REO_ERR_BAR_FRAME_2K_JUMP:
  1665. case HAL_REO_ERR_BAR_FRAME_OOR:
  1666. if (peer)
  1667. dp_rx_wbm_err_handle_bar(soc,
  1668. peer,
  1669. nbuf);
  1670. break;
  1671. default:
  1672. dp_info_rl("Got pkt with REO ERROR: %d",
  1673. wbm_err_info.reo_err_code);
  1674. break;
  1675. }
  1676. }
  1677. } else if (wbm_err_info.wbm_err_src ==
  1678. HAL_RX_WBM_ERR_SRC_RXDMA) {
  1679. if (wbm_err_info.rxdma_psh_rsn
  1680. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1681. DP_STATS_INC(soc,
  1682. rx.err.rxdma_error
  1683. [wbm_err_info.rxdma_err_code], 1);
  1684. /* increment @pdev level */
  1685. pool_id = wbm_err_info.pool_id;
  1686. dp_pdev = dp_get_pdev_for_lmac_id(soc, pool_id);
  1687. if (dp_pdev)
  1688. DP_STATS_INC(dp_pdev,
  1689. err.rxdma_error, 1);
  1690. switch (wbm_err_info.rxdma_err_code) {
  1691. case HAL_RXDMA_ERR_UNENCRYPTED:
  1692. case HAL_RXDMA_ERR_WIFI_PARSE:
  1693. pool_id = wbm_err_info.pool_id;
  1694. dp_rx_process_rxdma_err(soc, nbuf,
  1695. rx_tlv_hdr,
  1696. peer,
  1697. wbm_err_info.
  1698. rxdma_err_code,
  1699. pool_id);
  1700. nbuf = next;
  1701. if (peer)
  1702. dp_peer_unref_del_find_by_id(peer);
  1703. continue;
  1704. case HAL_RXDMA_ERR_TKIP_MIC:
  1705. dp_rx_process_mic_error(soc, nbuf,
  1706. rx_tlv_hdr,
  1707. peer);
  1708. nbuf = next;
  1709. if (peer) {
  1710. DP_STATS_INC(peer, rx.err.mic_err, 1);
  1711. dp_peer_unref_del_find_by_id(
  1712. peer);
  1713. }
  1714. continue;
  1715. case HAL_RXDMA_ERR_DECRYPT:
  1716. if (peer) {
  1717. DP_STATS_INC(peer, rx.err.
  1718. decrypt_err, 1);
  1719. break;
  1720. }
  1721. if (!dp_handle_rxdma_decrypt_err())
  1722. break;
  1723. pool_id = wbm_err_info.pool_id;
  1724. err_code = wbm_err_info.rxdma_err_code;
  1725. tlv_hdr = rx_tlv_hdr;
  1726. dp_rx_process_rxdma_err(soc, nbuf,
  1727. tlv_hdr, NULL,
  1728. err_code,
  1729. pool_id);
  1730. nbuf = next;
  1731. continue;
  1732. default:
  1733. dp_err_rl("RXDMA error %d",
  1734. wbm_err_info.rxdma_err_code);
  1735. }
  1736. }
  1737. } else {
  1738. /* Should not come here */
  1739. qdf_assert(0);
  1740. }
  1741. if (peer)
  1742. dp_peer_unref_del_find_by_id(peer);
  1743. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1744. QDF_TRACE_LEVEL_DEBUG);
  1745. qdf_nbuf_free(nbuf);
  1746. nbuf = next;
  1747. }
  1748. return rx_bufs_used; /* Assume no scale factor for now */
  1749. }
  1750. /**
  1751. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  1752. *
  1753. * @soc: core DP main context
  1754. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1755. * @rx_desc: void pointer to rx descriptor
  1756. *
  1757. * Return: void
  1758. */
  1759. static void dup_desc_dbg(struct dp_soc *soc,
  1760. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1761. void *rx_desc)
  1762. {
  1763. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  1764. dp_rx_dump_info_and_assert(
  1765. soc,
  1766. soc->rx_rel_ring.hal_srng,
  1767. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  1768. rx_desc);
  1769. }
  1770. /**
  1771. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  1772. *
  1773. * @soc: core DP main context
  1774. * @mac_id: mac id which is one of 3 mac_ids
  1775. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1776. * @head: head of descs list to be freed
  1777. * @tail: tail of decs list to be freed
  1778. * Return: number of msdu in MPDU to be popped
  1779. */
  1780. static inline uint32_t
  1781. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1782. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1783. union dp_rx_desc_list_elem_t **head,
  1784. union dp_rx_desc_list_elem_t **tail)
  1785. {
  1786. void *rx_msdu_link_desc;
  1787. qdf_nbuf_t msdu;
  1788. qdf_nbuf_t last;
  1789. struct hal_rx_msdu_list msdu_list;
  1790. uint16_t num_msdus;
  1791. struct hal_buf_info buf_info;
  1792. uint32_t rx_bufs_used = 0;
  1793. uint32_t msdu_cnt;
  1794. uint32_t i;
  1795. uint8_t push_reason;
  1796. uint8_t rxdma_error_code = 0;
  1797. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  1798. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1799. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  1800. hal_rxdma_desc_t ring_desc;
  1801. if (!pdev) {
  1802. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1803. "pdev is null for mac_id = %d", mac_id);
  1804. return rx_bufs_used;
  1805. }
  1806. msdu = 0;
  1807. last = NULL;
  1808. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1809. &msdu_cnt);
  1810. push_reason =
  1811. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  1812. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1813. rxdma_error_code =
  1814. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  1815. }
  1816. do {
  1817. rx_msdu_link_desc =
  1818. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1819. qdf_assert_always(rx_msdu_link_desc);
  1820. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1821. &msdu_list, &num_msdus);
  1822. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1823. /* if the msdus belongs to NSS offloaded radio &&
  1824. * the rbm is not SW1_BM then return the msdu_link
  1825. * descriptor without freeing the msdus (nbufs). let
  1826. * these buffers be given to NSS completion ring for
  1827. * NSS to free them.
  1828. * else iterate through the msdu link desc list and
  1829. * free each msdu in the list.
  1830. */
  1831. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  1832. wlan_cfg_get_dp_pdev_nss_enabled(
  1833. pdev->wlan_cfg_ctx))
  1834. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  1835. else {
  1836. for (i = 0; i < num_msdus; i++) {
  1837. struct dp_rx_desc *rx_desc =
  1838. dp_rx_cookie_2_va_rxdma_buf(soc,
  1839. msdu_list.sw_cookie[i]);
  1840. qdf_assert_always(rx_desc);
  1841. msdu = rx_desc->nbuf;
  1842. /*
  1843. * this is a unlikely scenario
  1844. * where the host is reaping
  1845. * a descriptor which
  1846. * it already reaped just a while ago
  1847. * but is yet to replenish
  1848. * it back to HW.
  1849. * In this case host will dump
  1850. * the last 128 descriptors
  1851. * including the software descriptor
  1852. * rx_desc and assert.
  1853. */
  1854. ring_desc = rxdma_dst_ring_desc;
  1855. if (qdf_unlikely(!rx_desc->in_use)) {
  1856. dup_desc_dbg(soc,
  1857. ring_desc,
  1858. rx_desc);
  1859. continue;
  1860. }
  1861. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1862. QDF_DMA_FROM_DEVICE);
  1863. QDF_TRACE(QDF_MODULE_ID_DP,
  1864. QDF_TRACE_LEVEL_DEBUG,
  1865. "[%s][%d] msdu_nbuf=%pK ",
  1866. __func__, __LINE__, msdu);
  1867. qdf_nbuf_free(msdu);
  1868. rx_bufs_used++;
  1869. dp_rx_add_to_free_desc_list(head,
  1870. tail, rx_desc);
  1871. }
  1872. }
  1873. } else {
  1874. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  1875. }
  1876. /*
  1877. * Store the current link buffer into to the local structure
  1878. * to be used for release purpose.
  1879. */
  1880. hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
  1881. buf_info.sw_cookie, buf_info.rbm);
  1882. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
  1883. dp_rx_link_desc_return_by_addr(soc,
  1884. (hal_buff_addrinfo_t)
  1885. rx_link_buf_info,
  1886. bm_action);
  1887. } while (buf_info.paddr);
  1888. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  1889. if (pdev)
  1890. DP_STATS_INC(pdev, err.rxdma_error, 1);
  1891. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  1892. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1893. "Packet received with Decrypt error");
  1894. }
  1895. return rx_bufs_used;
  1896. }
  1897. uint32_t
  1898. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1899. uint32_t mac_id, uint32_t quota)
  1900. {
  1901. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1902. hal_rxdma_desc_t rxdma_dst_ring_desc;
  1903. hal_soc_handle_t hal_soc;
  1904. void *err_dst_srng;
  1905. union dp_rx_desc_list_elem_t *head = NULL;
  1906. union dp_rx_desc_list_elem_t *tail = NULL;
  1907. struct dp_srng *dp_rxdma_srng;
  1908. struct rx_desc_pool *rx_desc_pool;
  1909. uint32_t work_done = 0;
  1910. uint32_t rx_bufs_used = 0;
  1911. if (!pdev)
  1912. return 0;
  1913. err_dst_srng = soc->rxdma_err_dst_ring[mac_id].hal_srng;
  1914. if (!err_dst_srng) {
  1915. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1916. "%s %d : HAL Monitor Destination Ring Init \
  1917. Failed -- %pK",
  1918. __func__, __LINE__, err_dst_srng);
  1919. return 0;
  1920. }
  1921. hal_soc = soc->hal_soc;
  1922. qdf_assert(hal_soc);
  1923. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  1924. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1925. "%s %d : HAL Monitor Destination Ring Init \
  1926. Failed -- %pK",
  1927. __func__, __LINE__, err_dst_srng);
  1928. return 0;
  1929. }
  1930. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  1931. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  1932. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  1933. rxdma_dst_ring_desc,
  1934. &head, &tail);
  1935. }
  1936. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  1937. if (rx_bufs_used) {
  1938. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  1939. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1940. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1941. rx_desc_pool, rx_bufs_used, &head, &tail);
  1942. work_done += rx_bufs_used;
  1943. }
  1944. return work_done;
  1945. }
  1946. static inline uint32_t
  1947. dp_wbm_int_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1948. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1949. union dp_rx_desc_list_elem_t **head,
  1950. union dp_rx_desc_list_elem_t **tail)
  1951. {
  1952. void *rx_msdu_link_desc;
  1953. qdf_nbuf_t msdu;
  1954. qdf_nbuf_t last;
  1955. struct hal_rx_msdu_list msdu_list;
  1956. uint16_t num_msdus;
  1957. struct hal_buf_info buf_info;
  1958. uint32_t rx_bufs_used = 0, msdu_cnt, i;
  1959. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  1960. msdu = 0;
  1961. last = NULL;
  1962. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1963. &msdu_cnt);
  1964. do {
  1965. rx_msdu_link_desc =
  1966. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1967. if (!rx_msdu_link_desc) {
  1968. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_LINK_DESC], 1);
  1969. break;
  1970. }
  1971. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1972. &msdu_list, &num_msdus);
  1973. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1974. for (i = 0; i < num_msdus; i++) {
  1975. struct dp_rx_desc *rx_desc =
  1976. dp_rx_cookie_2_va_rxdma_buf(
  1977. soc,
  1978. msdu_list.sw_cookie[i]);
  1979. qdf_assert_always(rx_desc);
  1980. msdu = rx_desc->nbuf;
  1981. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1982. QDF_DMA_FROM_DEVICE);
  1983. qdf_nbuf_free(msdu);
  1984. rx_bufs_used++;
  1985. dp_rx_add_to_free_desc_list(head,
  1986. tail, rx_desc);
  1987. }
  1988. }
  1989. /*
  1990. * Store the current link buffer into to the local structure
  1991. * to be used for release purpose.
  1992. */
  1993. hal_rxdma_buff_addr_info_set(rx_link_buf_info, buf_info.paddr,
  1994. buf_info.sw_cookie, buf_info.rbm);
  1995. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info);
  1996. dp_rx_link_desc_return_by_addr(soc, (hal_buff_addrinfo_t)
  1997. rx_link_buf_info,
  1998. HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  1999. } while (buf_info.paddr);
  2000. return rx_bufs_used;
  2001. }
  2002. /*
  2003. *
  2004. * dp_handle_wbm_internal_error() - handles wbm_internal_error case
  2005. *
  2006. * @soc: core DP main context
  2007. * @hal_desc: hal descriptor
  2008. * @buf_type: indicates if the buffer is of type link disc or msdu
  2009. * Return: None
  2010. *
  2011. * wbm_internal_error is seen in following scenarios :
  2012. *
  2013. * 1. Null pointers detected in WBM_RELEASE_RING descriptors
  2014. * 2. Null pointers detected during delinking process
  2015. *
  2016. * Some null pointer cases:
  2017. *
  2018. * a. MSDU buffer pointer is NULL
  2019. * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
  2020. * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
  2021. */
  2022. void
  2023. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  2024. uint32_t buf_type)
  2025. {
  2026. struct hal_buf_info buf_info = {0};
  2027. struct dp_rx_desc *rx_desc = NULL;
  2028. uint32_t rx_buf_cookie;
  2029. uint32_t rx_bufs_reaped = 0;
  2030. union dp_rx_desc_list_elem_t *head = NULL;
  2031. union dp_rx_desc_list_elem_t *tail = NULL;
  2032. uint8_t pool_id;
  2033. hal_rx_reo_buf_paddr_get(hal_desc, &buf_info);
  2034. if (!buf_info.paddr) {
  2035. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_BUFFER], 1);
  2036. return;
  2037. }
  2038. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(hal_desc);
  2039. pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(rx_buf_cookie);
  2040. if (buf_type == HAL_WBM_RELEASE_RING_2_BUFFER_TYPE) {
  2041. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_NULL_MSDU_BUFF], 1);
  2042. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  2043. if (rx_desc && rx_desc->nbuf) {
  2044. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  2045. QDF_DMA_FROM_DEVICE);
  2046. rx_desc->unmapped = 1;
  2047. qdf_nbuf_free(rx_desc->nbuf);
  2048. dp_rx_add_to_free_desc_list(&head,
  2049. &tail,
  2050. rx_desc);
  2051. rx_bufs_reaped++;
  2052. }
  2053. } else if (buf_type == HAL_WBM_RELEASE_RING_2_DESC_TYPE) {
  2054. rx_bufs_reaped = dp_wbm_int_err_mpdu_pop(soc, pool_id,
  2055. hal_desc,
  2056. &head, &tail);
  2057. }
  2058. if (rx_bufs_reaped) {
  2059. struct rx_desc_pool *rx_desc_pool;
  2060. struct dp_srng *dp_rxdma_srng;
  2061. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_REO_BUFF_REAPED], 1);
  2062. dp_rxdma_srng = &soc->rx_refill_buf_ring[pool_id];
  2063. rx_desc_pool = &soc->rx_desc_buf[pool_id];
  2064. dp_rx_buffers_replenish(soc, pool_id, dp_rxdma_srng,
  2065. rx_desc_pool,
  2066. rx_bufs_reaped,
  2067. &head, &tail);
  2068. }
  2069. }