dp_rx_err.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "dp_internal.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #ifdef CONFIG_MCL
  27. #include <cds_ieee80211_common.h>
  28. #else
  29. #include <linux/ieee80211.h>
  30. #endif
  31. #include "dp_rx_defrag.h"
  32. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  33. #ifdef RX_DESC_DEBUG_CHECK
  34. static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
  35. {
  36. if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) {
  37. return false;
  38. }
  39. rx_desc->magic = 0;
  40. return true;
  41. }
  42. #else
  43. static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
  44. {
  45. return true;
  46. }
  47. #endif
  48. /**
  49. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  50. * back on same vap or a different vap.
  51. *
  52. * @soc: core DP main context
  53. * @peer: dp peer handler
  54. * @rx_tlv_hdr: start of the rx TLV header
  55. * @nbuf: pkt buffer
  56. *
  57. * Return: bool (true if it is a looped back pkt else false)
  58. *
  59. */
  60. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  61. struct dp_peer *peer,
  62. uint8_t *rx_tlv_hdr,
  63. qdf_nbuf_t nbuf)
  64. {
  65. struct dp_vdev *vdev = peer->vdev;
  66. struct dp_ast_entry *ase;
  67. uint16_t sa_idx = 0;
  68. uint8_t *data;
  69. /*
  70. * Multicast Echo Check is required only if vdev is STA and
  71. * received pkt is a multicast/broadcast pkt. otherwise
  72. * skip the MEC check.
  73. */
  74. if (vdev->opmode != wlan_op_mode_sta)
  75. return false;
  76. if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))
  77. return false;
  78. data = qdf_nbuf_data(nbuf);
  79. /*
  80. * if the received pkts src mac addr matches with vdev
  81. * mac address then drop the pkt as it is looped back
  82. */
  83. if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN],
  84. vdev->mac_addr.raw,
  85. DP_MAC_ADDR_LEN)))
  86. return true;
  87. /*
  88. * In case of qwrap isolation mode, donot drop loopback packets.
  89. * In isolation mode, all packets from the wired stations need to go
  90. * to rootap and loop back to reach the wireless stations and
  91. * vice-versa.
  92. */
  93. if (qdf_unlikely(vdev->isolation_vdev))
  94. return false;
  95. /* if the received pkts src mac addr matches with the
  96. * wired PCs MAC addr which is behind the STA or with
  97. * wireless STAs MAC addr which are behind the Repeater,
  98. * then drop the pkt as it is looped back
  99. */
  100. qdf_spin_lock_bh(&soc->ast_lock);
  101. if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) {
  102. sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
  103. if ((sa_idx < 0) ||
  104. (sa_idx >= (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
  105. qdf_spin_unlock_bh(&soc->ast_lock);
  106. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  107. "invalid sa_idx: %d", sa_idx);
  108. qdf_assert_always(0);
  109. }
  110. ase = soc->ast_table[sa_idx];
  111. if (!ase) {
  112. /* We do not get a peer map event for STA and without
  113. * this event we don't know what is STA's sa_idx.
  114. * For this reason the AST is still not associated to
  115. * any index postion in ast_table.
  116. * In these kind of scenarios where sa is valid but
  117. * ast is not in ast_table, we use the below API to get
  118. * AST entry for STA's own mac_address.
  119. */
  120. ase = dp_peer_ast_hash_find(soc,
  121. &data[DP_MAC_ADDR_LEN]);
  122. }
  123. } else
  124. ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN]);
  125. if (ase) {
  126. ase->ast_idx = sa_idx;
  127. soc->ast_table[sa_idx] = ase;
  128. if (ase->pdev_id != vdev->pdev->pdev_id) {
  129. qdf_spin_unlock_bh(&soc->ast_lock);
  130. QDF_TRACE(QDF_MODULE_ID_DP,
  131. QDF_TRACE_LEVEL_INFO,
  132. "Detected DBDC Root AP %pM, %d %d",
  133. &data[DP_MAC_ADDR_LEN], vdev->pdev->pdev_id,
  134. ase->pdev_id);
  135. return false;
  136. }
  137. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  138. (ase->peer != peer)) {
  139. qdf_spin_unlock_bh(&soc->ast_lock);
  140. QDF_TRACE(QDF_MODULE_ID_DP,
  141. QDF_TRACE_LEVEL_INFO,
  142. "received pkt with same src mac %pM",
  143. &data[DP_MAC_ADDR_LEN]);
  144. return true;
  145. }
  146. }
  147. qdf_spin_unlock_bh(&soc->ast_lock);
  148. return false;
  149. }
  150. /**
  151. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  152. * (WBM) by address
  153. *
  154. * @soc: core DP main context
  155. * @link_desc_addr: link descriptor addr
  156. *
  157. * Return: QDF_STATUS
  158. */
  159. QDF_STATUS
  160. dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
  161. uint8_t bm_action)
  162. {
  163. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  164. void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  165. void *hal_soc = soc->hal_soc;
  166. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  167. void *src_srng_desc;
  168. if (!wbm_rel_srng) {
  169. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  170. "WBM RELEASE RING not initialized");
  171. return status;
  172. }
  173. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  174. /* TODO */
  175. /*
  176. * Need API to convert from hal_ring pointer to
  177. * Ring Type / Ring Id combo
  178. */
  179. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  180. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  181. wbm_rel_srng);
  182. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  183. goto done;
  184. }
  185. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  186. if (qdf_likely(src_srng_desc)) {
  187. /* Return link descriptor through WBM ring (SW2WBM)*/
  188. hal_rx_msdu_link_desc_set(hal_soc,
  189. src_srng_desc, link_desc_addr, bm_action);
  190. status = QDF_STATUS_SUCCESS;
  191. } else {
  192. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  193. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  194. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  195. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  196. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  197. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  198. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  199. }
  200. done:
  201. hal_srng_access_end(hal_soc, wbm_rel_srng);
  202. return status;
  203. }
  204. /**
  205. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  206. * (WBM), following error handling
  207. *
  208. * @soc: core DP main context
  209. * @ring_desc: opaque pointer to the REO error ring descriptor
  210. *
  211. * Return: QDF_STATUS
  212. */
  213. QDF_STATUS
  214. dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action)
  215. {
  216. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  217. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  218. }
  219. /**
  220. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  221. *
  222. * @soc: core txrx main context
  223. * @ring_desc: opaque pointer to the REO error ring descriptor
  224. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  225. * @head: head of the local descriptor free-list
  226. * @tail: tail of the local descriptor free-list
  227. * @quota: No. of units (packets) that can be serviced in one shot.
  228. *
  229. * This function is used to drop all MSDU in an MPDU
  230. *
  231. * Return: uint32_t: No. of elements processed
  232. */
  233. static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
  234. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  235. union dp_rx_desc_list_elem_t **head,
  236. union dp_rx_desc_list_elem_t **tail,
  237. uint32_t quota)
  238. {
  239. uint32_t rx_bufs_used = 0;
  240. void *link_desc_va;
  241. struct hal_buf_info buf_info;
  242. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  243. int i;
  244. uint8_t *rx_tlv_hdr;
  245. uint32_t tid;
  246. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  247. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  248. /* No UNMAP required -- this is "malloc_consistent" memory */
  249. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  250. &mpdu_desc_info->msdu_count);
  251. for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
  252. struct dp_rx_desc *rx_desc =
  253. dp_rx_cookie_2_va_rxdma_buf(soc,
  254. msdu_list.sw_cookie[i]);
  255. qdf_assert(rx_desc);
  256. if (!dp_rx_desc_check_magic(rx_desc)) {
  257. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  258. FL("Invalid rx_desc cookie=%d"),
  259. msdu_list.sw_cookie[i]);
  260. return rx_bufs_used;
  261. }
  262. rx_bufs_used++;
  263. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  264. rx_desc->rx_buf_start);
  265. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  266. "Packet received with PN error for tid :%d", tid);
  267. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  268. if (hal_rx_encryption_info_valid(rx_tlv_hdr))
  269. hal_rx_print_pn(rx_tlv_hdr);
  270. /* Just free the buffers */
  271. qdf_nbuf_free(rx_desc->nbuf);
  272. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  273. }
  274. /* Return link descriptor through WBM ring (SW2WBM)*/
  275. dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  276. return rx_bufs_used;
  277. }
  278. /**
  279. * dp_rx_pn_error_handle() - Handles PN check errors
  280. *
  281. * @soc: core txrx main context
  282. * @ring_desc: opaque pointer to the REO error ring descriptor
  283. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  284. * @head: head of the local descriptor free-list
  285. * @tail: tail of the local descriptor free-list
  286. * @quota: No. of units (packets) that can be serviced in one shot.
  287. *
  288. * This function implements PN error handling
  289. * If the peer is configured to ignore the PN check errors
  290. * or if DP feels, that this frame is still OK, the frame can be
  291. * re-injected back to REO to use some of the other features
  292. * of REO e.g. duplicate detection/routing to other cores
  293. *
  294. * Return: uint32_t: No. of elements processed
  295. */
  296. static uint32_t
  297. dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
  298. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  299. union dp_rx_desc_list_elem_t **head,
  300. union dp_rx_desc_list_elem_t **tail,
  301. uint32_t quota)
  302. {
  303. uint16_t peer_id;
  304. uint32_t rx_bufs_used = 0;
  305. struct dp_peer *peer;
  306. bool peer_pn_policy = false;
  307. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  308. mpdu_desc_info->peer_meta_data);
  309. peer = dp_peer_find_by_id(soc, peer_id);
  310. if (qdf_likely(peer)) {
  311. /*
  312. * TODO: Check for peer specific policies & set peer_pn_policy
  313. */
  314. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  315. "discard rx due to PN error for peer %pK "
  316. "(%02x:%02x:%02x:%02x:%02x:%02x)",
  317. peer,
  318. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  319. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  320. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  321. }
  322. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  323. "Packet received with PN error");
  324. /* No peer PN policy -- definitely drop */
  325. if (!peer_pn_policy)
  326. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  327. mpdu_desc_info,
  328. head, tail, quota);
  329. return rx_bufs_used;
  330. }
  331. /**
  332. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  333. *
  334. * @soc: core txrx main context
  335. * @ring_desc: opaque pointer to the REO error ring descriptor
  336. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  337. * @head: head of the local descriptor free-list
  338. * @tail: tail of the local descriptor free-list
  339. * @quota: No. of units (packets) that can be serviced in one shot.
  340. *
  341. * This function implements the error handling when sequence number
  342. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  343. * need to be handled:
  344. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  345. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  346. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  347. * For case B), the frame is normally dropped, no more action is taken
  348. *
  349. * Return: uint32_t: No. of elements processed
  350. */
  351. static uint32_t
  352. dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
  353. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  354. union dp_rx_desc_list_elem_t **head,
  355. union dp_rx_desc_list_elem_t **tail,
  356. uint32_t quota)
  357. {
  358. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  359. head, tail, quota);
  360. }
  361. /**
  362. * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
  363. * to pdev invalid peer list
  364. *
  365. * @soc: core DP main context
  366. * @nbuf: Buffer pointer
  367. * @rx_tlv_hdr: start of rx tlv header
  368. * @mac_id: mac id
  369. *
  370. * Return: bool: true for last msdu of mpdu
  371. */
  372. static bool
  373. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  374. uint8_t mac_id)
  375. {
  376. bool mpdu_done = false;
  377. qdf_nbuf_t curr_nbuf = NULL;
  378. qdf_nbuf_t tmp_nbuf = NULL;
  379. /* TODO: Currently only single radio is supported, hence
  380. * pdev hard coded to '0' index
  381. */
  382. struct dp_pdev *dp_pdev = soc->pdev_list[mac_id];
  383. if (!dp_pdev->first_nbuf) {
  384. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  385. dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr);
  386. dp_pdev->first_nbuf = true;
  387. /* If the new nbuf received is the first msdu of the
  388. * amsdu and there are msdus in the invalid peer msdu
  389. * list, then let us free all the msdus of the invalid
  390. * peer msdu list.
  391. * This scenario can happen when we start receiving
  392. * new a-msdu even before the previous a-msdu is completely
  393. * received.
  394. */
  395. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  396. while (curr_nbuf) {
  397. tmp_nbuf = curr_nbuf->next;
  398. qdf_nbuf_free(curr_nbuf);
  399. curr_nbuf = tmp_nbuf;
  400. }
  401. dp_pdev->invalid_peer_head_msdu = NULL;
  402. dp_pdev->invalid_peer_tail_msdu = NULL;
  403. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
  404. &(dp_pdev->ppdu_info.rx_status));
  405. }
  406. if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
  407. hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  408. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  409. qdf_assert_always(dp_pdev->first_nbuf == true);
  410. dp_pdev->first_nbuf = false;
  411. mpdu_done = true;
  412. }
  413. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  414. dp_pdev->invalid_peer_tail_msdu,
  415. nbuf);
  416. return mpdu_done;
  417. }
  418. /**
  419. * dp_2k_jump_handle() - Function to handle 2k jump exception
  420. * on WBM ring
  421. *
  422. * @soc: core DP main context
  423. * @nbuf: buffer pointer
  424. * @rx_tlv_hdr: start of rx tlv header
  425. * @peer_id: peer id of first msdu
  426. * @tid: Tid for which exception occurred
  427. *
  428. * This function handles 2k jump violations arising out
  429. * of receiving aggregates in non BA case. This typically
  430. * may happen if aggregates are received on a QOS enabled TID
  431. * while Rx window size is still initialized to value of 2. Or
  432. * it may also happen if negotiated window size is 1 but peer
  433. * sends aggregates.
  434. *
  435. */
  436. static void
  437. dp_2k_jump_handle(struct dp_soc *soc,
  438. qdf_nbuf_t nbuf,
  439. uint8_t *rx_tlv_hdr,
  440. uint16_t peer_id,
  441. uint8_t tid)
  442. {
  443. uint32_t ppdu_id;
  444. struct dp_peer *peer = NULL;
  445. struct dp_rx_tid *rx_tid = NULL;
  446. peer = dp_peer_find_by_id(soc, peer_id);
  447. if (!peer || peer->delete_in_progress) {
  448. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  449. "peer not found");
  450. goto free_nbuf;
  451. }
  452. rx_tid = &peer->rx_tid[tid];
  453. if (qdf_unlikely(rx_tid == NULL)) {
  454. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  455. "rx_tid is NULL!!");
  456. goto free_nbuf;
  457. }
  458. qdf_spin_lock_bh(&rx_tid->tid_lock);
  459. ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr);
  460. if (rx_tid->ppdu_id_2k != ppdu_id) {
  461. rx_tid->ppdu_id_2k = ppdu_id;
  462. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  463. goto free_nbuf;
  464. }
  465. if (!rx_tid->delba_tx_status) {
  466. rx_tid->delba_tx_retry++;
  467. rx_tid->delba_tx_status = 1;
  468. rx_tid->delba_rcode =
  469. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  470. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  471. soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev,
  472. peer->ctrl_peer,
  473. peer->mac_addr.raw,
  474. tid,
  475. peer->vdev->ctrl_vdev,
  476. rx_tid->delba_rcode);
  477. } else {
  478. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  479. }
  480. free_nbuf:
  481. qdf_nbuf_free(nbuf);
  482. return;
  483. }
  484. /**
  485. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  486. * descriptor violation on either a
  487. * REO or WBM ring
  488. *
  489. * @soc: core DP main context
  490. * @nbuf: buffer pointer
  491. * @rx_tlv_hdr: start of rx tlv header
  492. * @pool_id: mac id
  493. *
  494. * This function handles NULL queue descriptor violations arising out
  495. * a missing REO queue for a given peer or a given TID. This typically
  496. * may happen if a packet is received on a QOS enabled TID before the
  497. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  498. * it may also happen for MC/BC frames if they are not routed to the
  499. * non-QOS TID queue, in the absence of any other default TID queue.
  500. * This error can show up both in a REO destination or WBM release ring.
  501. *
  502. */
  503. static void
  504. dp_rx_null_q_desc_handle(struct dp_soc *soc,
  505. qdf_nbuf_t nbuf,
  506. uint8_t *rx_tlv_hdr,
  507. uint8_t pool_id)
  508. {
  509. uint32_t pkt_len, l2_hdr_offset;
  510. uint16_t msdu_len;
  511. struct dp_vdev *vdev;
  512. uint16_t peer_id = 0xFFFF;
  513. struct dp_peer *peer = NULL;
  514. uint8_t tid;
  515. qdf_nbuf_set_rx_chfrag_start(nbuf,
  516. hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr));
  517. qdf_nbuf_set_rx_chfrag_end(nbuf,
  518. hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr));
  519. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  520. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  521. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  522. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  523. FL("Len %d Extn list %pK "),
  524. (uint32_t)qdf_nbuf_len(nbuf),
  525. qdf_nbuf_get_ext_list(nbuf));
  526. /* Set length in nbuf */
  527. if (!qdf_nbuf_get_ext_list(nbuf))
  528. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  529. /*
  530. * Check if DMA completed -- msdu_done is the last bit
  531. * to be written
  532. */
  533. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  534. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  535. FL("MSDU DONE failure"));
  536. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  537. QDF_TRACE_LEVEL_INFO);
  538. qdf_assert(0);
  539. }
  540. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  541. peer = dp_peer_find_by_id(soc, peer_id);
  542. if (!peer) {
  543. bool mpdu_done = false;
  544. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  545. QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, "peer is NULL");
  546. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  547. /* Trigger invalid peer handler wrapper */
  548. dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done);
  549. if (mpdu_done) {
  550. pdev->invalid_peer_head_msdu = NULL;
  551. pdev->invalid_peer_tail_msdu = NULL;
  552. }
  553. return;
  554. }
  555. vdev = peer->vdev;
  556. if (!vdev) {
  557. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  558. FL("INVALID vdev %pK OR osif_rx"), vdev);
  559. /* Drop & free packet */
  560. qdf_nbuf_free(nbuf);
  561. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  562. return;
  563. }
  564. /*
  565. * Advance the packet start pointer by total size of
  566. * pre-header TLV's
  567. */
  568. if (qdf_nbuf_get_ext_list(nbuf))
  569. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  570. else
  571. qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
  572. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  573. /* this is a looped back MCBC pkt, drop it */
  574. qdf_nbuf_free(nbuf);
  575. return;
  576. }
  577. /*
  578. * In qwrap mode if the received packet matches with any of the vdev
  579. * mac addresses, drop it. Donot receive multicast packets originated
  580. * from any proxysta.
  581. */
  582. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  583. qdf_nbuf_free(nbuf);
  584. return;
  585. }
  586. if (qdf_unlikely((peer->nawds_enabled == true) &&
  587. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  588. QDF_TRACE(QDF_MODULE_ID_DP,
  589. QDF_TRACE_LEVEL_DEBUG,
  590. "%s free buffer for multicast packet",
  591. __func__);
  592. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  593. qdf_nbuf_free(nbuf);
  594. return;
  595. }
  596. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer,
  597. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  598. QDF_TRACE(QDF_MODULE_ID_DP,
  599. QDF_TRACE_LEVEL_ERROR,
  600. FL("mcast Policy Check Drop pkt"));
  601. /* Drop & free packet */
  602. qdf_nbuf_free(nbuf);
  603. return;
  604. }
  605. /* WDS Source Port Learning */
  606. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  607. vdev->wds_enabled))
  608. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
  609. if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) {
  610. /* TODO: Assuming that qos_control_valid also indicates
  611. * unicast. Should we check this?
  612. */
  613. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  614. if (peer &&
  615. peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) {
  616. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  617. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  618. }
  619. }
  620. #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */
  621. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  622. "%s: p_id %d msdu_len %d hdr_off %d",
  623. __func__, peer_id, msdu_len, l2_hdr_offset);
  624. print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
  625. qdf_nbuf_data(nbuf), 128, false);
  626. #endif /* NAPIER_EMULATION */
  627. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  628. qdf_nbuf_set_next(nbuf, NULL);
  629. dp_rx_deliver_raw(vdev, nbuf, peer);
  630. } else {
  631. if (qdf_unlikely(peer->bss_peer)) {
  632. QDF_TRACE(QDF_MODULE_ID_DP,
  633. QDF_TRACE_LEVEL_INFO,
  634. FL("received pkt with same src MAC"));
  635. /* Drop & free packet */
  636. qdf_nbuf_free(nbuf);
  637. return;
  638. }
  639. if (vdev->osif_rx) {
  640. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  641. FL("vdev %pK osif_rx %pK"), vdev,
  642. vdev->osif_rx);
  643. qdf_nbuf_set_next(nbuf, NULL);
  644. vdev->osif_rx(vdev->osif_vdev, nbuf);
  645. DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1,
  646. qdf_nbuf_len(nbuf),
  647. hal_rx_msdu_end_da_is_mcbc_get(
  648. rx_tlv_hdr));
  649. DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1,
  650. qdf_nbuf_len(nbuf));
  651. } else {
  652. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  653. FL("INVALID vdev %pK OR osif_rx"), vdev);
  654. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  655. }
  656. }
  657. return;
  658. }
  659. /**
  660. * dp_rx_err_deliver() - Function to deliver error frames to OS
  661. *
  662. * @soc: core DP main context
  663. * @rx_desc : pointer to the sw rx descriptor
  664. * @head: pointer to head of rx descriptors to be added to free list
  665. * @tail: pointer to tail of rx descriptors to be added to free list
  666. * quota: upper limit of descriptors that can be reaped
  667. *
  668. * Return: uint32_t: No. of Rx buffers reaped
  669. */
  670. static void
  671. dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  672. {
  673. uint32_t pkt_len, l2_hdr_offset;
  674. uint16_t msdu_len;
  675. struct dp_vdev *vdev;
  676. uint16_t peer_id = 0xFFFF;
  677. struct dp_peer *peer = NULL;
  678. struct ether_header *eh;
  679. bool isBroadcast;
  680. /*
  681. * Check if DMA completed -- msdu_done is the last bit
  682. * to be written
  683. */
  684. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  685. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  686. FL("MSDU DONE failure"));
  687. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  688. QDF_TRACE_LEVEL_INFO);
  689. qdf_assert(0);
  690. }
  691. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  692. peer = dp_peer_find_by_id(soc, peer_id);
  693. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  694. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  695. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  696. /* Set length in nbuf */
  697. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  698. qdf_nbuf_set_next(nbuf, NULL);
  699. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  700. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  701. if (!peer) {
  702. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
  703. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  704. qdf_nbuf_len(nbuf));
  705. /* Trigger invalid peer handler wrapper */
  706. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true);
  707. return;
  708. }
  709. vdev = peer->vdev;
  710. if (!vdev) {
  711. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  712. FL("INVALID vdev %pK OR osif_rx"), vdev);
  713. /* Drop & free packet */
  714. qdf_nbuf_free(nbuf);
  715. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  716. return;
  717. }
  718. /* Drop & free packet if mesh mode not enabled */
  719. if (!vdev->mesh_vdev) {
  720. qdf_nbuf_free(nbuf);
  721. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  722. return;
  723. }
  724. /*
  725. * Advance the packet start pointer by total size of
  726. * pre-header TLV's
  727. */
  728. qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
  729. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  730. == QDF_STATUS_SUCCESS) {
  731. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  732. FL("mesh pkt filtered"));
  733. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  734. qdf_nbuf_free(nbuf);
  735. return;
  736. }
  737. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  738. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
  739. (vdev->rx_decap_type ==
  740. htt_cmn_pkt_type_ethernet))) {
  741. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  742. isBroadcast = (IEEE80211_IS_BROADCAST
  743. (eh->ether_dhost)) ? 1 : 0 ;
  744. if (isBroadcast) {
  745. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  746. qdf_nbuf_len(nbuf));
  747. }
  748. }
  749. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  750. dp_rx_deliver_raw(vdev, nbuf, peer);
  751. } else {
  752. DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1);
  753. vdev->osif_rx(vdev->osif_vdev, nbuf);
  754. }
  755. return;
  756. }
  757. /**
  758. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  759. * @soc: DP SOC handle
  760. * @rx_desc : pointer to the sw rx descriptor
  761. * @head: pointer to head of rx descriptors to be added to free list
  762. * @tail: pointer to tail of rx descriptors to be added to free list
  763. *
  764. * return: void
  765. */
  766. void
  767. dp_rx_process_mic_error(struct dp_soc *soc,
  768. qdf_nbuf_t nbuf,
  769. uint8_t *rx_tlv_hdr)
  770. {
  771. struct dp_vdev *vdev = NULL;
  772. struct dp_pdev *pdev = NULL;
  773. struct ol_if_ops *tops = NULL;
  774. struct ieee80211_frame *wh;
  775. uint8_t *rx_pkt_hdr;
  776. struct dp_peer *peer;
  777. uint16_t peer_id, rx_seq, fragno;
  778. unsigned int tid;
  779. QDF_STATUS status;
  780. if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr))
  781. return;
  782. rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf));
  783. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  784. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  785. peer = dp_peer_find_by_id(soc, peer_id);
  786. if (!peer) {
  787. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  788. "peer not found");
  789. goto fail;
  790. }
  791. vdev = peer->vdev;
  792. if (!vdev) {
  793. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  794. "VDEV not found");
  795. goto fail;
  796. }
  797. pdev = vdev->pdev;
  798. if (!pdev) {
  799. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  800. "PDEV not found");
  801. goto fail;
  802. }
  803. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf));
  804. rx_seq = (((*(uint16_t *)wh->i_seq) &
  805. IEEE80211_SEQ_SEQ_MASK) >>
  806. IEEE80211_SEQ_SEQ_SHIFT);
  807. fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
  808. /* Can get only last fragment */
  809. if (fragno) {
  810. status = dp_rx_defrag_add_last_frag(soc, peer,
  811. tid, rx_seq, nbuf);
  812. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  813. "%s: Frag pkt seq# %d frag# %d consumed status %d !",
  814. __func__, rx_seq, fragno, status);
  815. return;
  816. }
  817. tops = pdev->soc->cdp_soc.ol_ops;
  818. if (tops->rx_mic_error)
  819. tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh);
  820. fail:
  821. qdf_nbuf_free(nbuf);
  822. return;
  823. }
  824. /**
  825. * dp_rx_err_process() - Processes error frames routed to REO error ring
  826. *
  827. * @soc: core txrx main context
  828. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  829. * @quota: No. of units (packets) that can be serviced in one shot.
  830. *
  831. * This function implements error processing and top level demultiplexer
  832. * for all the frames routed to REO error ring.
  833. *
  834. * Return: uint32_t: No. of elements processed
  835. */
  836. uint32_t
  837. dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  838. {
  839. void *hal_soc;
  840. void *ring_desc;
  841. union dp_rx_desc_list_elem_t *head = NULL;
  842. union dp_rx_desc_list_elem_t *tail = NULL;
  843. uint32_t rx_bufs_used = 0;
  844. uint8_t buf_type;
  845. uint8_t error, rbm;
  846. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  847. struct hal_buf_info hbi;
  848. struct dp_pdev *dp_pdev;
  849. struct dp_srng *dp_rxdma_srng;
  850. struct rx_desc_pool *rx_desc_pool;
  851. uint32_t cookie = 0;
  852. void *link_desc_va;
  853. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  854. uint16_t num_msdus;
  855. /* Debug -- Remove later */
  856. qdf_assert(soc && hal_ring);
  857. hal_soc = soc->hal_soc;
  858. /* Debug -- Remove later */
  859. qdf_assert(hal_soc);
  860. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  861. /* TODO */
  862. /*
  863. * Need API to convert from hal_ring pointer to
  864. * Ring Type / Ring Id combo
  865. */
  866. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  867. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  868. FL("HAL RING Access Failed -- %pK"), hal_ring);
  869. goto done;
  870. }
  871. while (qdf_likely(quota-- && (ring_desc =
  872. hal_srng_dst_get_next(hal_soc, hal_ring)))) {
  873. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  874. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  875. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  876. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  877. /*
  878. * For REO error ring, expect only MSDU LINK DESC
  879. */
  880. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  881. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  882. /*
  883. * check for the magic number in the sw cookie
  884. */
  885. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  886. LINK_DESC_ID_START);
  887. /*
  888. * Check if the buffer is to be processed on this processor
  889. */
  890. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  891. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  892. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  893. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  894. &num_msdus);
  895. if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
  896. (msdu_list.rbm[0] !=
  897. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) {
  898. /* TODO */
  899. /* Call appropriate handler */
  900. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  901. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  902. FL("Invalid RBM %d"), msdu_list.rbm[0]);
  903. /* Return link descriptor through WBM ring (SW2WBM)*/
  904. dp_rx_link_desc_return(soc, ring_desc,
  905. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  906. continue;
  907. }
  908. /* Get the MPDU DESC info */
  909. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  910. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  911. /* TODO */
  912. rx_bufs_used += dp_rx_frag_handle(soc,
  913. ring_desc, &mpdu_desc_info,
  914. &head, &tail, quota);
  915. DP_STATS_INC(soc, rx.rx_frags, 1);
  916. continue;
  917. }
  918. if (hal_rx_reo_is_pn_error(ring_desc)) {
  919. /* TOD0 */
  920. DP_STATS_INC(soc,
  921. rx.err.
  922. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  923. 1);
  924. rx_bufs_used += dp_rx_pn_error_handle(soc,
  925. ring_desc, &mpdu_desc_info,
  926. &head, &tail, quota);
  927. continue;
  928. }
  929. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  930. /* TOD0 */
  931. DP_STATS_INC(soc,
  932. rx.err.
  933. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  934. 1);
  935. rx_bufs_used += dp_rx_2k_jump_handle(soc,
  936. ring_desc, &mpdu_desc_info,
  937. &head, &tail, quota);
  938. continue;
  939. }
  940. }
  941. done:
  942. hal_srng_access_end(hal_soc, hal_ring);
  943. if (soc->rx.flags.defrag_timeout_check)
  944. dp_rx_defrag_waitlist_flush(soc);
  945. /* Assume MAC id = 0, owner = 0 */
  946. if (rx_bufs_used) {
  947. dp_pdev = soc->pdev_list[0];
  948. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  949. rx_desc_pool = &soc->rx_desc_buf[0];
  950. dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
  951. rx_bufs_used, &head, &tail);
  952. }
  953. return rx_bufs_used; /* Assume no scale factor for now */
  954. }
  955. /**
  956. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  957. *
  958. * @soc: core txrx main context
  959. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  960. * @quota: No. of units (packets) that can be serviced in one shot.
  961. *
  962. * This function implements error processing and top level demultiplexer
  963. * for all the frames routed to WBM2HOST sw release ring.
  964. *
  965. * Return: uint32_t: No. of elements processed
  966. */
  967. uint32_t
  968. dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  969. {
  970. void *hal_soc;
  971. void *ring_desc;
  972. struct dp_rx_desc *rx_desc;
  973. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  974. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  975. uint32_t rx_bufs_used = 0;
  976. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  977. uint8_t buf_type, rbm;
  978. uint32_t rx_buf_cookie;
  979. uint8_t mac_id;
  980. struct dp_pdev *dp_pdev;
  981. struct dp_srng *dp_rxdma_srng;
  982. struct rx_desc_pool *rx_desc_pool;
  983. uint8_t *rx_tlv_hdr;
  984. qdf_nbuf_t nbuf_head = NULL;
  985. qdf_nbuf_t nbuf_tail = NULL;
  986. qdf_nbuf_t nbuf, next;
  987. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  988. uint8_t pool_id;
  989. uint16_t peer_id = 0xFFFF;
  990. uint8_t tid = 0;
  991. /* Debug -- Remove later */
  992. qdf_assert(soc && hal_ring);
  993. hal_soc = soc->hal_soc;
  994. /* Debug -- Remove later */
  995. qdf_assert(hal_soc);
  996. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  997. /* TODO */
  998. /*
  999. * Need API to convert from hal_ring pointer to
  1000. * Ring Type / Ring Id combo
  1001. */
  1002. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1003. FL("HAL RING Access Failed -- %pK"), hal_ring);
  1004. goto done;
  1005. }
  1006. while (qdf_likely(quota-- && (ring_desc =
  1007. hal_srng_dst_get_next(hal_soc, hal_ring)))) {
  1008. /* XXX */
  1009. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  1010. /*
  1011. * For WBM ring, expect only MSDU buffers
  1012. */
  1013. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  1014. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1015. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  1016. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1017. == HAL_RX_WBM_ERR_SRC_REO));
  1018. /*
  1019. * Check if the buffer is to be processed on this processor
  1020. */
  1021. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1022. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  1023. /* TODO */
  1024. /* Call appropriate handler */
  1025. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1026. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1027. FL("Invalid RBM %d"), rbm);
  1028. continue;
  1029. }
  1030. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  1031. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1032. qdf_assert(rx_desc);
  1033. if (!dp_rx_desc_check_magic(rx_desc)) {
  1034. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1035. FL("Invalid rx_desc cookie=%d"),
  1036. rx_buf_cookie);
  1037. continue;
  1038. }
  1039. nbuf = rx_desc->nbuf;
  1040. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
  1041. /*
  1042. * save the wbm desc info in nbuf TLV. We will need this
  1043. * info when we do the actual nbuf processing
  1044. */
  1045. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info);
  1046. wbm_err_info.pool_id = rx_desc->pool_id;
  1047. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  1048. &wbm_err_info);
  1049. rx_bufs_reaped[rx_desc->pool_id]++;
  1050. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1051. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1052. &tail[rx_desc->pool_id],
  1053. rx_desc);
  1054. }
  1055. done:
  1056. hal_srng_access_end(hal_soc, hal_ring);
  1057. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1058. if (rx_bufs_reaped[mac_id]) {
  1059. dp_pdev = soc->pdev_list[mac_id];
  1060. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1061. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1062. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1063. rx_desc_pool, rx_bufs_reaped[mac_id],
  1064. &head[mac_id], &tail[mac_id]);
  1065. rx_bufs_used += rx_bufs_reaped[mac_id];
  1066. }
  1067. }
  1068. nbuf = nbuf_head;
  1069. while (nbuf) {
  1070. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1071. /*
  1072. * retrieve the wbm desc info from nbuf TLV, so we can
  1073. * handle error cases appropriately
  1074. */
  1075. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  1076. /* Set queue_mapping in nbuf to 0 */
  1077. dp_set_rx_queue(nbuf, 0);
  1078. next = nbuf->next;
  1079. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  1080. if (wbm_err_info.reo_psh_rsn
  1081. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  1082. DP_STATS_INC(soc,
  1083. rx.err.reo_error
  1084. [wbm_err_info.reo_err_code], 1);
  1085. switch (wbm_err_info.reo_err_code) {
  1086. /*
  1087. * Handling for packets which have NULL REO
  1088. * queue descriptor
  1089. */
  1090. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1091. pool_id = wbm_err_info.pool_id;
  1092. QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
  1093. "Got pkt with REO ERROR: %d",
  1094. wbm_err_info.reo_err_code);
  1095. dp_rx_null_q_desc_handle(soc,
  1096. nbuf,
  1097. rx_tlv_hdr,
  1098. pool_id);
  1099. nbuf = next;
  1100. continue;
  1101. /* TODO */
  1102. /* Add per error code accounting */
  1103. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1104. pool_id = wbm_err_info.pool_id;
  1105. QDF_TRACE(QDF_MODULE_ID_DP,
  1106. QDF_TRACE_LEVEL_ERROR,
  1107. "Got pkt with REO ERROR: %d",
  1108. wbm_err_info.reo_err_code);
  1109. if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) {
  1110. peer_id =
  1111. hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  1112. tid =
  1113. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  1114. }
  1115. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr,
  1116. peer_id, tid);
  1117. nbuf = next;
  1118. continue;
  1119. default:
  1120. QDF_TRACE(QDF_MODULE_ID_DP,
  1121. QDF_TRACE_LEVEL_ERROR,
  1122. "REO error %d detected",
  1123. wbm_err_info.reo_err_code);
  1124. }
  1125. }
  1126. } else if (wbm_err_info.wbm_err_src ==
  1127. HAL_RX_WBM_ERR_SRC_RXDMA) {
  1128. if (wbm_err_info.rxdma_psh_rsn
  1129. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1130. struct dp_peer *peer = NULL;
  1131. uint16_t peer_id = 0xFFFF;
  1132. DP_STATS_INC(soc,
  1133. rx.err.rxdma_error
  1134. [wbm_err_info.rxdma_err_code], 1);
  1135. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  1136. peer = dp_peer_find_by_id(soc, peer_id);
  1137. switch (wbm_err_info.rxdma_err_code) {
  1138. case HAL_RXDMA_ERR_UNENCRYPTED:
  1139. dp_rx_err_deliver(soc,
  1140. nbuf,
  1141. rx_tlv_hdr);
  1142. nbuf = next;
  1143. continue;
  1144. case HAL_RXDMA_ERR_TKIP_MIC:
  1145. dp_rx_process_mic_error(soc,
  1146. nbuf,
  1147. rx_tlv_hdr);
  1148. nbuf = next;
  1149. if (peer)
  1150. DP_STATS_INC(peer, rx.err.mic_err, 1);
  1151. continue;
  1152. case HAL_RXDMA_ERR_DECRYPT:
  1153. if (peer)
  1154. DP_STATS_INC(peer, rx.err.decrypt_err, 1);
  1155. QDF_TRACE(QDF_MODULE_ID_DP,
  1156. QDF_TRACE_LEVEL_DEBUG,
  1157. "Packet received with Decrypt error");
  1158. break;
  1159. default:
  1160. QDF_TRACE(QDF_MODULE_ID_DP,
  1161. QDF_TRACE_LEVEL_DEBUG,
  1162. "RXDMA error %d",
  1163. wbm_err_info.
  1164. rxdma_err_code);
  1165. }
  1166. }
  1167. } else {
  1168. /* Should not come here */
  1169. qdf_assert(0);
  1170. }
  1171. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1172. QDF_TRACE_LEVEL_DEBUG);
  1173. qdf_nbuf_free(nbuf);
  1174. nbuf = next;
  1175. }
  1176. return rx_bufs_used; /* Assume no scale factor for now */
  1177. }
  1178. /**
  1179. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  1180. *
  1181. * @soc: core DP main context
  1182. * @mac_id: mac id which is one of 3 mac_ids
  1183. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1184. * @head: head of descs list to be freed
  1185. * @tail: tail of decs list to be freed
  1186. * Return: number of msdu in MPDU to be popped
  1187. */
  1188. static inline uint32_t
  1189. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1190. void *rxdma_dst_ring_desc,
  1191. union dp_rx_desc_list_elem_t **head,
  1192. union dp_rx_desc_list_elem_t **tail)
  1193. {
  1194. void *rx_msdu_link_desc;
  1195. qdf_nbuf_t msdu;
  1196. qdf_nbuf_t last;
  1197. struct hal_rx_msdu_list msdu_list;
  1198. uint16_t num_msdus;
  1199. struct hal_buf_info buf_info;
  1200. void *p_buf_addr_info;
  1201. void *p_last_buf_addr_info;
  1202. uint32_t rx_bufs_used = 0;
  1203. uint32_t msdu_cnt;
  1204. uint32_t i;
  1205. uint8_t push_reason;
  1206. uint8_t rxdma_error_code = 0;
  1207. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  1208. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1209. msdu = 0;
  1210. last = NULL;
  1211. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1212. &p_last_buf_addr_info, &msdu_cnt);
  1213. push_reason =
  1214. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  1215. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1216. rxdma_error_code =
  1217. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  1218. }
  1219. do {
  1220. rx_msdu_link_desc =
  1221. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1222. qdf_assert(rx_msdu_link_desc);
  1223. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1224. &msdu_list, &num_msdus);
  1225. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1226. /* if the msdus belongs to NSS offloaded radio &&
  1227. * the rbm is not SW1_BM then return the msdu_link
  1228. * descriptor without freeing the msdus (nbufs). let
  1229. * these buffers be given to NSS completion ring for
  1230. * NSS to free them.
  1231. * else iterate through the msdu link desc list and
  1232. * free each msdu in the list.
  1233. */
  1234. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  1235. wlan_cfg_get_dp_pdev_nss_enabled(
  1236. pdev->wlan_cfg_ctx))
  1237. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  1238. else {
  1239. for (i = 0; i < num_msdus; i++) {
  1240. struct dp_rx_desc *rx_desc =
  1241. dp_rx_cookie_2_va_rxdma_buf(soc,
  1242. msdu_list.sw_cookie[i]);
  1243. qdf_assert(rx_desc);
  1244. msdu = rx_desc->nbuf;
  1245. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1246. QDF_DMA_FROM_DEVICE);
  1247. QDF_TRACE(QDF_MODULE_ID_DP,
  1248. QDF_TRACE_LEVEL_DEBUG,
  1249. "[%s][%d] msdu_nbuf=%pK ",
  1250. __func__, __LINE__, msdu);
  1251. qdf_nbuf_free(msdu);
  1252. rx_bufs_used++;
  1253. dp_rx_add_to_free_desc_list(head,
  1254. tail, rx_desc);
  1255. }
  1256. }
  1257. } else {
  1258. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  1259. }
  1260. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  1261. &p_buf_addr_info);
  1262. dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action);
  1263. p_last_buf_addr_info = p_buf_addr_info;
  1264. } while (buf_info.paddr);
  1265. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  1266. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  1267. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1268. "Packet received with Decrypt error");
  1269. }
  1270. return rx_bufs_used;
  1271. }
  1272. /**
  1273. * dp_rxdma_err_process() - RxDMA error processing functionality
  1274. *
  1275. * @soc: core txrx main contex
  1276. * @mac_id: mac id which is one of 3 mac_ids
  1277. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  1278. * @quota: No. of units (packets) that can be serviced in one shot.
  1279. * Return: num of buffers processed
  1280. */
  1281. uint32_t
  1282. dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  1283. {
  1284. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1285. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  1286. void *hal_soc;
  1287. void *rxdma_dst_ring_desc;
  1288. void *err_dst_srng;
  1289. union dp_rx_desc_list_elem_t *head = NULL;
  1290. union dp_rx_desc_list_elem_t *tail = NULL;
  1291. struct dp_srng *dp_rxdma_srng;
  1292. struct rx_desc_pool *rx_desc_pool;
  1293. uint32_t work_done = 0;
  1294. uint32_t rx_bufs_used = 0;
  1295. if (!pdev)
  1296. return 0;
  1297. err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
  1298. if (!err_dst_srng) {
  1299. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1300. "%s %d : HAL Monitor Destination Ring Init \
  1301. Failed -- %pK",
  1302. __func__, __LINE__, err_dst_srng);
  1303. return 0;
  1304. }
  1305. hal_soc = soc->hal_soc;
  1306. qdf_assert(hal_soc);
  1307. if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) {
  1308. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1309. "%s %d : HAL Monitor Destination Ring Init \
  1310. Failed -- %pK",
  1311. __func__, __LINE__, err_dst_srng);
  1312. return 0;
  1313. }
  1314. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  1315. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  1316. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  1317. rxdma_dst_ring_desc,
  1318. &head, &tail);
  1319. }
  1320. hal_srng_access_end(hal_soc, err_dst_srng);
  1321. if (rx_bufs_used) {
  1322. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1323. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1324. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1325. rx_desc_pool, rx_bufs_used, &head, &tail);
  1326. work_done += rx_bufs_used;
  1327. }
  1328. return work_done;
  1329. }