dp_rx_err.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "dp_internal.h"
  22. #include "hal_api.h"
  23. #include "qdf_trace.h"
  24. #include "qdf_nbuf.h"
  25. #ifdef CONFIG_MCL
  26. #include <cds_ieee80211_common.h>
  27. #else
  28. #include <linux/ieee80211.h>
  29. #endif
  30. #include "dp_rx_defrag.h"
  31. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  32. #ifdef RX_DESC_DEBUG_CHECK
  33. static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
  34. {
  35. if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC)) {
  36. return false;
  37. }
  38. rx_desc->magic = 0;
  39. return true;
  40. }
  41. #else
  42. static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
  43. {
  44. return true;
  45. }
  46. #endif
  47. /**
  48. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  49. * back on same vap or a different vap.
  50. *
  51. * @soc: core DP main context
  52. * @peer: dp peer handler
  53. * @rx_tlv_hdr: start of the rx TLV header
  54. * @nbuf: pkt buffer
  55. *
  56. * Return: bool (true if it is a looped back pkt else false)
  57. *
  58. */
  59. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  60. struct dp_peer *peer,
  61. uint8_t *rx_tlv_hdr,
  62. qdf_nbuf_t nbuf)
  63. {
  64. struct dp_vdev *vdev = peer->vdev;
  65. struct dp_ast_entry *ase;
  66. uint16_t sa_idx;
  67. uint8_t *data;
  68. /*
  69. * Multicast Echo Check is required only if vdev is STA and
  70. * received pkt is a multicast/broadcast pkt. otherwise
  71. * skip the MEC check.
  72. */
  73. if (vdev->opmode != wlan_op_mode_sta)
  74. return false;
  75. if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))
  76. return false;
  77. data = qdf_nbuf_data(nbuf);
  78. /*
  79. * if the received pkts src mac addr matches with vdev
  80. * mac address then drop the pkt as it is looped back
  81. */
  82. if (!(qdf_mem_cmp(&data[DP_MAC_ADDR_LEN],
  83. vdev->mac_addr.raw,
  84. DP_MAC_ADDR_LEN)))
  85. return true;
  86. /* if the received pkts src mac addr matches with the
  87. * wired PCs MAC addr which is behind the STA or with
  88. * wireless STAs MAC addr which are behind the Repeater,
  89. * then drop the pkt as it is looped back
  90. */
  91. qdf_spin_lock_bh(&soc->ast_lock);
  92. if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) {
  93. sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
  94. if ((sa_idx < 0) || (sa_idx > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
  95. qdf_spin_unlock_bh(&soc->ast_lock);
  96. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  97. "invalid sa_idx: %d", sa_idx);
  98. qdf_assert_always(0);
  99. }
  100. ase = soc->ast_table[sa_idx];
  101. } else
  102. ase = dp_peer_ast_hash_find(soc, &data[DP_MAC_ADDR_LEN], 0);
  103. if (ase) {
  104. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  105. (ase->peer != peer)) {
  106. qdf_spin_unlock_bh(&soc->ast_lock);
  107. QDF_TRACE(QDF_MODULE_ID_DP,
  108. QDF_TRACE_LEVEL_INFO,
  109. "received pkt with same src mac %pM",
  110. &data[DP_MAC_ADDR_LEN]);
  111. return true;
  112. }
  113. }
  114. qdf_spin_unlock_bh(&soc->ast_lock);
  115. return false;
  116. }
  117. /**
  118. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  119. * (WBM) by address
  120. *
  121. * @soc: core DP main context
  122. * @link_desc_addr: link descriptor addr
  123. *
  124. * Return: QDF_STATUS
  125. */
  126. QDF_STATUS
  127. dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
  128. uint8_t bm_action)
  129. {
  130. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  131. void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  132. void *hal_soc = soc->hal_soc;
  133. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  134. void *src_srng_desc;
  135. if (!wbm_rel_srng) {
  136. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  137. "WBM RELEASE RING not initialized");
  138. return status;
  139. }
  140. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  141. /* TODO */
  142. /*
  143. * Need API to convert from hal_ring pointer to
  144. * Ring Type / Ring Id combo
  145. */
  146. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  147. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  148. wbm_rel_srng);
  149. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  150. goto done;
  151. }
  152. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  153. if (qdf_likely(src_srng_desc)) {
  154. /* Return link descriptor through WBM ring (SW2WBM)*/
  155. hal_rx_msdu_link_desc_set(hal_soc,
  156. src_srng_desc, link_desc_addr, bm_action);
  157. status = QDF_STATUS_SUCCESS;
  158. } else {
  159. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  160. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  161. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  164. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  165. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  166. }
  167. done:
  168. hal_srng_access_end(hal_soc, wbm_rel_srng);
  169. return status;
  170. }
  171. /**
  172. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  173. * (WBM), following error handling
  174. *
  175. * @soc: core DP main context
  176. * @ring_desc: opaque pointer to the REO error ring descriptor
  177. *
  178. * Return: QDF_STATUS
  179. */
  180. QDF_STATUS
  181. dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action)
  182. {
  183. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  184. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  185. }
  186. /**
  187. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  188. *
  189. * @soc: core txrx main context
  190. * @ring_desc: opaque pointer to the REO error ring descriptor
  191. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  192. * @head: head of the local descriptor free-list
  193. * @tail: tail of the local descriptor free-list
  194. * @quota: No. of units (packets) that can be serviced in one shot.
  195. *
  196. * This function is used to drop all MSDU in an MPDU
  197. *
  198. * Return: uint32_t: No. of elements processed
  199. */
  200. static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
  201. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  202. union dp_rx_desc_list_elem_t **head,
  203. union dp_rx_desc_list_elem_t **tail,
  204. uint32_t quota)
  205. {
  206. uint32_t rx_bufs_used = 0;
  207. void *link_desc_va;
  208. struct hal_buf_info buf_info;
  209. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  210. int i;
  211. uint8_t *rx_tlv_hdr;
  212. uint32_t tid;
  213. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  214. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  215. /* No UNMAP required -- this is "malloc_consistent" memory */
  216. hal_rx_msdu_list_get(link_desc_va, &msdu_list,
  217. &mpdu_desc_info->msdu_count);
  218. for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
  219. struct dp_rx_desc *rx_desc =
  220. dp_rx_cookie_2_va_rxdma_buf(soc,
  221. msdu_list.sw_cookie[i]);
  222. qdf_assert(rx_desc);
  223. if (!dp_rx_desc_check_magic(rx_desc)) {
  224. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  225. FL("Invalid rx_desc cookie=%d"),
  226. msdu_list.sw_cookie[i]);
  227. return rx_bufs_used;
  228. }
  229. rx_bufs_used++;
  230. tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start);
  231. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  232. "Packet received with PN error for tid :%d", tid);
  233. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  234. if (hal_rx_encryption_info_valid(rx_tlv_hdr))
  235. hal_rx_print_pn(rx_tlv_hdr);
  236. /* Just free the buffers */
  237. qdf_nbuf_free(rx_desc->nbuf);
  238. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  239. }
  240. /* Return link descriptor through WBM ring (SW2WBM)*/
  241. dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  242. return rx_bufs_used;
  243. }
  244. /**
  245. * dp_rx_pn_error_handle() - Handles PN check errors
  246. *
  247. * @soc: core txrx main context
  248. * @ring_desc: opaque pointer to the REO error ring descriptor
  249. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  250. * @head: head of the local descriptor free-list
  251. * @tail: tail of the local descriptor free-list
  252. * @quota: No. of units (packets) that can be serviced in one shot.
  253. *
  254. * This function implements PN error handling
  255. * If the peer is configured to ignore the PN check errors
  256. * or if DP feels, that this frame is still OK, the frame can be
  257. * re-injected back to REO to use some of the other features
  258. * of REO e.g. duplicate detection/routing to other cores
  259. *
  260. * Return: uint32_t: No. of elements processed
  261. */
  262. static uint32_t
  263. dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
  264. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  265. union dp_rx_desc_list_elem_t **head,
  266. union dp_rx_desc_list_elem_t **tail,
  267. uint32_t quota)
  268. {
  269. uint16_t peer_id;
  270. uint32_t rx_bufs_used = 0;
  271. struct dp_peer *peer;
  272. bool peer_pn_policy = false;
  273. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  274. mpdu_desc_info->peer_meta_data);
  275. peer = dp_peer_find_by_id(soc, peer_id);
  276. if (qdf_likely(peer)) {
  277. /*
  278. * TODO: Check for peer specific policies & set peer_pn_policy
  279. */
  280. }
  281. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  282. "Packet received with PN error");
  283. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  284. "discard rx due to PN error for peer %pK "
  285. "(%02x:%02x:%02x:%02x:%02x:%02x)\n",
  286. peer,
  287. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  288. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  289. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  290. /* No peer PN policy -- definitely drop */
  291. if (!peer_pn_policy)
  292. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  293. mpdu_desc_info,
  294. head, tail, quota);
  295. return rx_bufs_used;
  296. }
  297. /**
  298. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  299. *
  300. * @soc: core txrx main context
  301. * @ring_desc: opaque pointer to the REO error ring descriptor
  302. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  303. * @head: head of the local descriptor free-list
  304. * @tail: tail of the local descriptor free-list
  305. * @quota: No. of units (packets) that can be serviced in one shot.
  306. *
  307. * This function implements the error handling when sequence number
  308. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  309. * need to be handled:
  310. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  311. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  312. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  313. * For case B), the frame is normally dropped, no more action is taken
  314. *
  315. * Return: uint32_t: No. of elements processed
  316. */
  317. static uint32_t
  318. dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
  319. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  320. union dp_rx_desc_list_elem_t **head,
  321. union dp_rx_desc_list_elem_t **tail,
  322. uint32_t quota)
  323. {
  324. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  325. head, tail, quota);
  326. }
  327. static bool
  328. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  329. uint8_t mac_id)
  330. {
  331. bool mpdu_done = false;
  332. qdf_nbuf_t curr_nbuf, next_nbuf;
  333. /* TODO: Currently only single radio is supported, hence
  334. * pdev hard coded to '0' index
  335. */
  336. struct dp_pdev *dp_pdev = soc->pdev_list[mac_id];
  337. if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) {
  338. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  339. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  340. while (curr_nbuf) {
  341. next_nbuf = qdf_nbuf_next(curr_nbuf);
  342. qdf_nbuf_free(curr_nbuf);
  343. curr_nbuf = next_nbuf;
  344. }
  345. dp_pdev->invalid_peer_head_msdu = NULL;
  346. dp_pdev->invalid_peer_tail_msdu = NULL;
  347. hal_rx_mon_hw_desc_get_mpdu_status(rx_tlv_hdr,
  348. &(dp_pdev->ppdu_info.rx_status));
  349. }
  350. if (hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr)) {
  351. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  352. mpdu_done = true;
  353. }
  354. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  355. dp_pdev->invalid_peer_tail_msdu,
  356. nbuf);
  357. return mpdu_done;
  358. }
  359. /**
  360. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  361. * descriptor violation on either a
  362. * REO or WBM ring
  363. *
  364. * @soc: core DP main context
  365. * @rx_desc : pointer to the sw rx descriptor
  366. * @head: pointer to head of rx descriptors to be added to free list
  367. * @tail: pointer to tail of rx descriptors to be added to free list
  368. * quota: upper limit of descriptors that can be reaped
  369. *
  370. * This function handles NULL queue descriptor violations arising out
  371. * a missing REO queue for a given peer or a given TID. This typically
  372. * may happen if a packet is received on a QOS enabled TID before the
  373. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  374. * it may also happen for MC/BC frames if they are not routed to the
  375. * non-QOS TID queue, in the absence of any other default TID queue.
  376. * This error can show up both in a REO destination or WBM release ring.
  377. *
  378. * Return: uint32_t: No. of Rx buffers reaped
  379. */
  380. static void
  381. dp_rx_null_q_desc_handle(struct dp_soc *soc,
  382. qdf_nbuf_t nbuf,
  383. uint8_t *rx_tlv_hdr,
  384. uint8_t pool_id)
  385. {
  386. uint32_t pkt_len, l2_hdr_offset;
  387. uint16_t msdu_len;
  388. struct dp_vdev *vdev;
  389. uint16_t peer_id = 0xFFFF;
  390. struct dp_peer *peer = NULL;
  391. uint8_t tid;
  392. qdf_nbuf_set_rx_chfrag_start(nbuf,
  393. hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr));
  394. qdf_nbuf_set_rx_chfrag_end(nbuf,
  395. hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr));
  396. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  397. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  398. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  399. /* Set length in nbuf */
  400. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  401. /*
  402. * Check if DMA completed -- msdu_done is the last bit
  403. * to be written
  404. */
  405. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  406. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  407. FL("MSDU DONE failure"));
  408. hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO);
  409. qdf_assert(0);
  410. }
  411. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  412. peer = dp_peer_find_by_id(soc, peer_id);
  413. if (!peer) {
  414. bool mpdu_done = false;
  415. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  416. FL("peer is NULL"));
  417. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  418. /* Trigger invalid peer handler wrapper */
  419. dp_rx_process_invalid_peer_wrapper(soc, nbuf, mpdu_done);
  420. return;
  421. }
  422. vdev = peer->vdev;
  423. if (!vdev) {
  424. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  425. FL("INVALID vdev %pK OR osif_rx"), vdev);
  426. /* Drop & free packet */
  427. qdf_nbuf_free(nbuf);
  428. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  429. return;
  430. }
  431. /*
  432. * Advance the packet start pointer by total size of
  433. * pre-header TLV's
  434. */
  435. qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
  436. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  437. /* this is a looped back MCBC pkt, drop it */
  438. qdf_nbuf_free(nbuf);
  439. return;
  440. }
  441. /*
  442. * In qwrap mode if the received packet matches with any of the vdev
  443. * mac addresses, drop it. Donot receive multicast packets originated
  444. * from any proxysta.
  445. */
  446. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  447. qdf_nbuf_free(nbuf);
  448. return;
  449. }
  450. if (qdf_unlikely((peer->nawds_enabled == true) &&
  451. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  452. QDF_TRACE(QDF_MODULE_ID_DP,
  453. QDF_TRACE_LEVEL_DEBUG,
  454. "%s free buffer for multicast packet",
  455. __func__);
  456. DP_STATS_INC_PKT(peer, rx.nawds_mcast_drop,
  457. 1, qdf_nbuf_len(nbuf));
  458. qdf_nbuf_free(nbuf);
  459. return;
  460. }
  461. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer,
  462. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  463. QDF_TRACE(QDF_MODULE_ID_DP,
  464. QDF_TRACE_LEVEL_ERROR,
  465. FL("mcast Policy Check Drop pkt"));
  466. /* Drop & free packet */
  467. qdf_nbuf_free(nbuf);
  468. return;
  469. }
  470. /* WDS Source Port Learning */
  471. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))
  472. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
  473. if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) {
  474. /* TODO: Assuming that qos_control_valid also indicates
  475. * unicast. Should we check this?
  476. */
  477. tid = hal_rx_mpdu_start_tid_get(rx_tlv_hdr);
  478. if (peer &&
  479. peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) {
  480. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  481. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  482. }
  483. }
  484. #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */
  485. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  486. "%s: p_id %d msdu_len %d hdr_off %d",
  487. __func__, peer_id, msdu_len, l2_hdr_offset);
  488. print_hex_dump(KERN_ERR, "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
  489. qdf_nbuf_data(nbuf), 128, false);
  490. #endif /* NAPIER_EMULATION */
  491. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  492. qdf_nbuf_set_next(nbuf, NULL);
  493. dp_rx_deliver_raw(vdev, nbuf, peer);
  494. } else {
  495. if (qdf_unlikely(peer->bss_peer)) {
  496. QDF_TRACE(QDF_MODULE_ID_DP,
  497. QDF_TRACE_LEVEL_INFO,
  498. FL("received pkt with same src MAC"));
  499. /* Drop & free packet */
  500. qdf_nbuf_free(nbuf);
  501. return;
  502. }
  503. if (vdev->osif_rx) {
  504. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  505. FL("vdev %pK osif_rx %pK"), vdev,
  506. vdev->osif_rx);
  507. qdf_nbuf_set_next(nbuf, NULL);
  508. vdev->osif_rx(vdev->osif_vdev, nbuf);
  509. DP_STATS_INCC_PKT(vdev->pdev, rx.multicast, 1,
  510. qdf_nbuf_len(nbuf),
  511. hal_rx_msdu_end_da_is_mcbc_get(
  512. rx_tlv_hdr));
  513. DP_STATS_INC_PKT(vdev->pdev, rx.to_stack, 1,
  514. qdf_nbuf_len(nbuf));
  515. } else {
  516. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  517. FL("INVALID vdev %pK OR osif_rx"), vdev);
  518. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  519. }
  520. }
  521. return;
  522. }
  523. /**
  524. * dp_rx_err_deliver() - Function to deliver error frames to OS
  525. *
  526. * @soc: core DP main context
  527. * @rx_desc : pointer to the sw rx descriptor
  528. * @head: pointer to head of rx descriptors to be added to free list
  529. * @tail: pointer to tail of rx descriptors to be added to free list
  530. * quota: upper limit of descriptors that can be reaped
  531. *
  532. * Return: uint32_t: No. of Rx buffers reaped
  533. */
  534. static void
  535. dp_rx_err_deliver(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  536. {
  537. uint32_t pkt_len, l2_hdr_offset;
  538. uint16_t msdu_len;
  539. struct dp_vdev *vdev;
  540. uint16_t peer_id = 0xFFFF;
  541. struct dp_peer *peer = NULL;
  542. struct ether_header *eh;
  543. bool isBroadcast;
  544. /*
  545. * Check if DMA completed -- msdu_done is the last bit
  546. * to be written
  547. */
  548. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  549. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  550. FL("MSDU DONE failure"));
  551. hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_INFO);
  552. qdf_assert(0);
  553. }
  554. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  555. peer = dp_peer_find_by_id(soc, peer_id);
  556. if (!peer) {
  557. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  558. FL("peer is NULL"));
  559. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  560. qdf_nbuf_len(nbuf));
  561. /* Drop & free packet */
  562. qdf_nbuf_free(nbuf);
  563. return;
  564. }
  565. vdev = peer->vdev;
  566. if (!vdev) {
  567. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  568. FL("INVALID vdev %pK OR osif_rx"), vdev);
  569. /* Drop & free packet */
  570. qdf_nbuf_free(nbuf);
  571. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  572. return;
  573. }
  574. /* Drop & free packet if mesh mode not enabled */
  575. if (!vdev->mesh_vdev) {
  576. qdf_nbuf_free(nbuf);
  577. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  578. return;
  579. }
  580. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  581. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  582. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  583. /* Set length in nbuf */
  584. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  585. qdf_nbuf_set_next(nbuf, NULL);
  586. /*
  587. * Advance the packet start pointer by total size of
  588. * pre-header TLV's
  589. */
  590. qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
  591. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  592. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  593. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  594. == QDF_STATUS_SUCCESS) {
  595. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  596. FL("mesh pkt filtered"));
  597. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  598. qdf_nbuf_free(nbuf);
  599. return;
  600. }
  601. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  602. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
  603. (vdev->rx_decap_type ==
  604. htt_cmn_pkt_type_ethernet))) {
  605. eh = (struct ether_header *)qdf_nbuf_data(nbuf);
  606. isBroadcast = (IEEE80211_IS_BROADCAST
  607. (eh->ether_dhost)) ? 1 : 0 ;
  608. if (isBroadcast) {
  609. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  610. qdf_nbuf_len(nbuf));
  611. }
  612. }
  613. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  614. dp_rx_deliver_raw(vdev, nbuf, peer);
  615. } else {
  616. DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1);
  617. vdev->osif_rx(vdev->osif_vdev, nbuf);
  618. }
  619. return;
  620. }
  621. /**
  622. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  623. * @soc: DP SOC handle
  624. * @rx_desc : pointer to the sw rx descriptor
  625. * @head: pointer to head of rx descriptors to be added to free list
  626. * @tail: pointer to tail of rx descriptors to be added to free list
  627. *
  628. * return: void
  629. */
  630. void
  631. dp_rx_process_mic_error(struct dp_soc *soc,
  632. qdf_nbuf_t nbuf,
  633. uint8_t *rx_tlv_hdr)
  634. {
  635. struct dp_vdev *vdev = NULL;
  636. struct dp_pdev *pdev = NULL;
  637. struct ol_if_ops *tops = NULL;
  638. struct ieee80211_frame *wh;
  639. uint8_t *rx_pkt_hdr;
  640. struct dp_peer *peer;
  641. uint16_t peer_id;
  642. if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr))
  643. return;
  644. rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf));
  645. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  646. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  647. peer = dp_peer_find_by_id(soc, peer_id);
  648. if (!peer) {
  649. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  650. "peer not found");
  651. goto fail;
  652. }
  653. vdev = peer->vdev;
  654. if (!vdev) {
  655. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  656. "VDEV not found");
  657. goto fail;
  658. }
  659. pdev = vdev->pdev;
  660. if (!pdev) {
  661. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  662. "PDEV not found");
  663. goto fail;
  664. }
  665. tops = pdev->soc->cdp_soc.ol_ops;
  666. if (tops->rx_mic_error)
  667. tops->rx_mic_error(pdev->osif_pdev, vdev->vdev_id, wh);
  668. fail:
  669. qdf_nbuf_free(nbuf);
  670. return;
  671. }
  672. /**
  673. * dp_rx_err_process() - Processes error frames routed to REO error ring
  674. *
  675. * @soc: core txrx main context
  676. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  677. * @quota: No. of units (packets) that can be serviced in one shot.
  678. *
  679. * This function implements error processing and top level demultiplexer
  680. * for all the frames routed to REO error ring.
  681. *
  682. * Return: uint32_t: No. of elements processed
  683. */
  684. uint32_t
  685. dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  686. {
  687. void *hal_soc;
  688. void *ring_desc;
  689. union dp_rx_desc_list_elem_t *head = NULL;
  690. union dp_rx_desc_list_elem_t *tail = NULL;
  691. uint32_t rx_bufs_used = 0;
  692. uint8_t buf_type;
  693. uint8_t error, rbm;
  694. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  695. struct hal_buf_info hbi;
  696. struct dp_pdev *dp_pdev;
  697. struct dp_srng *dp_rxdma_srng;
  698. struct rx_desc_pool *rx_desc_pool;
  699. uint32_t cookie = 0;
  700. void *link_desc_va;
  701. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  702. uint16_t num_msdus;
  703. /* Debug -- Remove later */
  704. qdf_assert(soc && hal_ring);
  705. hal_soc = soc->hal_soc;
  706. /* Debug -- Remove later */
  707. qdf_assert(hal_soc);
  708. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  709. /* TODO */
  710. /*
  711. * Need API to convert from hal_ring pointer to
  712. * Ring Type / Ring Id combo
  713. */
  714. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  715. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  716. FL("HAL RING Access Failed -- %pK"), hal_ring);
  717. goto done;
  718. }
  719. while (qdf_likely(quota-- && (ring_desc =
  720. hal_srng_dst_get_next(hal_soc, hal_ring)))) {
  721. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  722. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  723. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  724. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  725. /*
  726. * For REO error ring, expect only MSDU LINK DESC
  727. */
  728. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  729. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  730. /*
  731. * check for the magic number in the sw cookie
  732. */
  733. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  734. LINK_DESC_ID_START);
  735. /*
  736. * Check if the buffer is to be processed on this processor
  737. */
  738. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  739. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  740. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  741. hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus);
  742. if (qdf_unlikely((msdu_list.rbm[0] !=
  743. HAL_RX_BUF_RBM_SW3_BM) &&
  744. (msdu_list.rbm[0] !=
  745. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) {
  746. /* TODO */
  747. /* Call appropriate handler */
  748. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  749. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  750. FL("Invalid RBM %d"), rbm);
  751. /* Return link descriptor through WBM ring (SW2WBM)*/
  752. dp_rx_link_desc_return(soc, ring_desc,
  753. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  754. continue;
  755. }
  756. /* Get the MPDU DESC info */
  757. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  758. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  759. /* TODO */
  760. rx_bufs_used += dp_rx_frag_handle(soc,
  761. ring_desc, &mpdu_desc_info,
  762. &head, &tail, quota);
  763. DP_STATS_INC(soc, rx.rx_frags, 1);
  764. continue;
  765. }
  766. if (hal_rx_reo_is_pn_error(ring_desc)) {
  767. /* TOD0 */
  768. DP_STATS_INC(soc,
  769. rx.err.
  770. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  771. 1);
  772. rx_bufs_used += dp_rx_pn_error_handle(soc,
  773. ring_desc, &mpdu_desc_info,
  774. &head, &tail, quota);
  775. continue;
  776. }
  777. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  778. /* TOD0 */
  779. DP_STATS_INC(soc,
  780. rx.err.
  781. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  782. 1);
  783. rx_bufs_used += dp_rx_2k_jump_handle(soc,
  784. ring_desc, &mpdu_desc_info,
  785. &head, &tail, quota);
  786. continue;
  787. }
  788. }
  789. done:
  790. hal_srng_access_end(hal_soc, hal_ring);
  791. if (soc->rx.flags.defrag_timeout_check)
  792. dp_rx_defrag_waitlist_flush(soc);
  793. /* Assume MAC id = 0, owner = 0 */
  794. if (rx_bufs_used) {
  795. dp_pdev = soc->pdev_list[0];
  796. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  797. rx_desc_pool = &soc->rx_desc_buf[0];
  798. dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
  799. rx_bufs_used, &head, &tail, HAL_RX_BUF_RBM_SW3_BM);
  800. }
  801. return rx_bufs_used; /* Assume no scale factor for now */
  802. }
  803. /**
  804. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  805. *
  806. * @soc: core txrx main context
  807. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  808. * @quota: No. of units (packets) that can be serviced in one shot.
  809. *
  810. * This function implements error processing and top level demultiplexer
  811. * for all the frames routed to WBM2HOST sw release ring.
  812. *
  813. * Return: uint32_t: No. of elements processed
  814. */
  815. uint32_t
  816. dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  817. {
  818. void *hal_soc;
  819. void *ring_desc;
  820. struct dp_rx_desc *rx_desc;
  821. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  822. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  823. uint32_t rx_bufs_used = 0;
  824. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  825. uint8_t buf_type, rbm;
  826. uint32_t rx_buf_cookie;
  827. uint8_t mac_id;
  828. struct dp_pdev *dp_pdev;
  829. struct dp_srng *dp_rxdma_srng;
  830. struct rx_desc_pool *rx_desc_pool;
  831. uint8_t *rx_tlv_hdr;
  832. qdf_nbuf_t nbuf_head = NULL;
  833. qdf_nbuf_t nbuf_tail = NULL;
  834. qdf_nbuf_t nbuf, next;
  835. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  836. uint8_t pool_id;
  837. /* Debug -- Remove later */
  838. qdf_assert(soc && hal_ring);
  839. hal_soc = soc->hal_soc;
  840. /* Debug -- Remove later */
  841. qdf_assert(hal_soc);
  842. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  843. /* TODO */
  844. /*
  845. * Need API to convert from hal_ring pointer to
  846. * Ring Type / Ring Id combo
  847. */
  848. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  849. FL("HAL RING Access Failed -- %pK"), hal_ring);
  850. goto done;
  851. }
  852. while (qdf_likely(quota-- && (ring_desc =
  853. hal_srng_dst_get_next(hal_soc, hal_ring)))) {
  854. /* XXX */
  855. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  856. /*
  857. * For WBM ring, expect only MSDU buffers
  858. */
  859. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  860. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  861. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  862. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  863. == HAL_RX_WBM_ERR_SRC_REO));
  864. /*
  865. * Check if the buffer is to be processed on this processor
  866. */
  867. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  868. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  869. /* TODO */
  870. /* Call appropriate handler */
  871. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  872. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  873. FL("Invalid RBM %d"), rbm);
  874. continue;
  875. }
  876. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  877. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  878. qdf_assert(rx_desc);
  879. if (!dp_rx_desc_check_magic(rx_desc)) {
  880. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  881. FL("Invalid rx_desc cookie=%d"),
  882. rx_buf_cookie);
  883. continue;
  884. }
  885. nbuf = rx_desc->nbuf;
  886. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
  887. /*
  888. * save the wbm desc info in nbuf TLV. We will need this
  889. * info when we do the actual nbuf processing
  890. */
  891. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info);
  892. wbm_err_info.pool_id = rx_desc->pool_id;
  893. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  894. &wbm_err_info);
  895. rx_bufs_reaped[rx_desc->pool_id]++;
  896. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  897. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  898. &tail[rx_desc->pool_id],
  899. rx_desc);
  900. }
  901. done:
  902. hal_srng_access_end(hal_soc, hal_ring);
  903. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  904. if (rx_bufs_reaped[mac_id]) {
  905. dp_pdev = soc->pdev_list[mac_id];
  906. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  907. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  908. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  909. rx_desc_pool, rx_bufs_reaped[mac_id],
  910. &head[mac_id], &tail[mac_id],
  911. HAL_RX_BUF_RBM_SW3_BM);
  912. rx_bufs_used += rx_bufs_reaped[mac_id];
  913. }
  914. }
  915. nbuf = nbuf_head;
  916. while (nbuf) {
  917. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  918. /*
  919. * retrieve the wbm desc info from nbuf TLV, so we can
  920. * handle error cases appropriately
  921. */
  922. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  923. next = nbuf->next;
  924. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  925. if (wbm_err_info.reo_psh_rsn
  926. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  927. DP_STATS_INC(soc,
  928. rx.err.reo_error
  929. [wbm_err_info.reo_err_code], 1);
  930. switch (wbm_err_info.reo_err_code) {
  931. /*
  932. * Handling for packets which have NULL REO
  933. * queue descriptor
  934. */
  935. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  936. pool_id = wbm_err_info.pool_id;
  937. QDF_TRACE(QDF_MODULE_ID_DP,
  938. QDF_TRACE_LEVEL_WARN,
  939. "Got pkt with REO ERROR: %d",
  940. wbm_err_info.reo_err_code);
  941. dp_rx_null_q_desc_handle(soc,
  942. nbuf,
  943. rx_tlv_hdr,
  944. pool_id);
  945. nbuf = next;
  946. continue;
  947. /* TODO */
  948. /* Add per error code accounting */
  949. default:
  950. QDF_TRACE(QDF_MODULE_ID_DP,
  951. QDF_TRACE_LEVEL_DEBUG,
  952. "REO error %d detected",
  953. wbm_err_info.reo_err_code);
  954. }
  955. }
  956. } else if (wbm_err_info.wbm_err_src ==
  957. HAL_RX_WBM_ERR_SRC_RXDMA) {
  958. if (wbm_err_info.rxdma_psh_rsn
  959. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  960. struct dp_peer *peer = NULL;
  961. uint16_t peer_id = 0xFFFF;
  962. DP_STATS_INC(soc,
  963. rx.err.rxdma_error
  964. [wbm_err_info.rxdma_err_code], 1);
  965. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  966. peer = dp_peer_find_by_id(soc, peer_id);
  967. switch (wbm_err_info.rxdma_err_code) {
  968. case HAL_RXDMA_ERR_UNENCRYPTED:
  969. dp_rx_err_deliver(soc,
  970. nbuf,
  971. rx_tlv_hdr);
  972. nbuf = next;
  973. continue;
  974. case HAL_RXDMA_ERR_TKIP_MIC:
  975. dp_rx_process_mic_error(soc,
  976. nbuf,
  977. rx_tlv_hdr);
  978. nbuf = next;
  979. if (peer)
  980. DP_STATS_INC(peer, rx.err.mic_err, 1);
  981. continue;
  982. case HAL_RXDMA_ERR_DECRYPT:
  983. if (peer)
  984. DP_STATS_INC(peer, rx.err.decrypt_err, 1);
  985. QDF_TRACE(QDF_MODULE_ID_DP,
  986. QDF_TRACE_LEVEL_DEBUG,
  987. "Packet received with Decrypt error");
  988. break;
  989. default:
  990. QDF_TRACE(QDF_MODULE_ID_DP,
  991. QDF_TRACE_LEVEL_DEBUG,
  992. "RXDMA error %d",
  993. wbm_err_info.
  994. rxdma_err_code);
  995. }
  996. }
  997. } else {
  998. /* Should not come here */
  999. qdf_assert(0);
  1000. }
  1001. qdf_nbuf_free(nbuf);
  1002. nbuf = next;
  1003. hal_rx_dump_pkt_tlvs(rx_tlv_hdr, QDF_TRACE_LEVEL_DEBUG);
  1004. }
  1005. return rx_bufs_used; /* Assume no scale factor for now */
  1006. }
  1007. /**
  1008. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  1009. *
  1010. * @soc: core DP main context
  1011. * @mac_id: mac id which is one of 3 mac_ids
  1012. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1013. * @head: head of descs list to be freed
  1014. * @tail: tail of decs list to be freed
  1015. * Return: number of msdu in MPDU to be popped
  1016. */
  1017. static inline uint32_t
  1018. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1019. void *rxdma_dst_ring_desc,
  1020. union dp_rx_desc_list_elem_t **head,
  1021. union dp_rx_desc_list_elem_t **tail)
  1022. {
  1023. void *rx_msdu_link_desc;
  1024. qdf_nbuf_t msdu;
  1025. qdf_nbuf_t last;
  1026. struct hal_rx_msdu_list msdu_list;
  1027. uint16_t num_msdus;
  1028. struct hal_buf_info buf_info;
  1029. void *p_buf_addr_info;
  1030. void *p_last_buf_addr_info;
  1031. uint32_t rx_bufs_used = 0;
  1032. uint32_t msdu_cnt;
  1033. uint32_t i;
  1034. uint8_t push_reason;
  1035. uint8_t rxdma_error_code = 0;
  1036. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  1037. struct dp_pdev *pdev = soc->pdev_list[mac_id];
  1038. msdu = 0;
  1039. last = NULL;
  1040. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1041. &p_last_buf_addr_info, &msdu_cnt);
  1042. push_reason =
  1043. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  1044. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1045. rxdma_error_code =
  1046. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  1047. }
  1048. do {
  1049. rx_msdu_link_desc =
  1050. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1051. qdf_assert(rx_msdu_link_desc);
  1052. hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus);
  1053. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1054. /* if the msdus belongs to NSS offloaded radio &&
  1055. * the rbm is not SW3_BM then return the msdu_link
  1056. * descriptor without freeing the msdus (nbufs). let
  1057. * these buffers be given to NSS completion ring for
  1058. * NSS to free them.
  1059. * else iterate through the msdu link desc list and
  1060. * free each msdu in the list.
  1061. */
  1062. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  1063. wlan_cfg_get_dp_pdev_nss_enabled(
  1064. pdev->wlan_cfg_ctx))
  1065. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  1066. else {
  1067. for (i = 0; i < num_msdus; i++) {
  1068. struct dp_rx_desc *rx_desc =
  1069. dp_rx_cookie_2_va_rxdma_buf(soc,
  1070. msdu_list.sw_cookie[i]);
  1071. qdf_assert(rx_desc);
  1072. msdu = rx_desc->nbuf;
  1073. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1074. QDF_DMA_FROM_DEVICE);
  1075. QDF_TRACE(QDF_MODULE_ID_DP,
  1076. QDF_TRACE_LEVEL_DEBUG,
  1077. "[%s][%d] msdu_nbuf=%pK \n",
  1078. __func__, __LINE__, msdu);
  1079. qdf_nbuf_free(msdu);
  1080. rx_bufs_used++;
  1081. dp_rx_add_to_free_desc_list(head,
  1082. tail, rx_desc);
  1083. }
  1084. }
  1085. } else {
  1086. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  1087. }
  1088. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  1089. &p_buf_addr_info);
  1090. dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action);
  1091. p_last_buf_addr_info = p_buf_addr_info;
  1092. } while (buf_info.paddr);
  1093. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  1094. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  1095. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1096. "Packet received with Decrypt error");
  1097. }
  1098. return rx_bufs_used;
  1099. }
  1100. /**
  1101. * dp_rxdma_err_process() - RxDMA error processing functionality
  1102. *
  1103. * @soc: core txrx main contex
  1104. * @mac_id: mac id which is one of 3 mac_ids
  1105. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  1106. * @quota: No. of units (packets) that can be serviced in one shot.
  1107. * Return: num of buffers processed
  1108. */
  1109. uint32_t
  1110. dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  1111. {
  1112. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1113. int ring_idx = dp_get_ring_id_for_mac_id(soc, mac_id);
  1114. uint8_t pdev_id;
  1115. void *hal_soc;
  1116. void *rxdma_dst_ring_desc;
  1117. void *err_dst_srng;
  1118. union dp_rx_desc_list_elem_t *head = NULL;
  1119. union dp_rx_desc_list_elem_t *tail = NULL;
  1120. struct dp_srng *dp_rxdma_srng;
  1121. struct rx_desc_pool *rx_desc_pool;
  1122. uint32_t work_done = 0;
  1123. uint32_t rx_bufs_used = 0;
  1124. #ifdef DP_INTR_POLL_BASED
  1125. if (!pdev)
  1126. return 0;
  1127. #endif
  1128. pdev_id = pdev->pdev_id;
  1129. err_dst_srng = pdev->rxdma_err_dst_ring[ring_idx].hal_srng;
  1130. if (!err_dst_srng) {
  1131. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1132. "%s %d : HAL Monitor Destination Ring Init \
  1133. Failed -- %pK\n",
  1134. __func__, __LINE__, err_dst_srng);
  1135. return 0;
  1136. }
  1137. hal_soc = soc->hal_soc;
  1138. qdf_assert(hal_soc);
  1139. if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) {
  1140. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1141. "%s %d : HAL Monitor Destination Ring Init \
  1142. Failed -- %pK\n",
  1143. __func__, __LINE__, err_dst_srng);
  1144. return 0;
  1145. }
  1146. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  1147. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  1148. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  1149. rxdma_dst_ring_desc,
  1150. &head, &tail);
  1151. }
  1152. hal_srng_access_end(hal_soc, err_dst_srng);
  1153. if (rx_bufs_used) {
  1154. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1155. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1156. dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng,
  1157. rx_desc_pool, rx_bufs_used, &head, &tail,
  1158. HAL_RX_BUF_RBM_SW3_BM);
  1159. work_done += rx_bufs_used;
  1160. }
  1161. return work_done;
  1162. }