dp_rx_err.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "dp_internal.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #ifdef CONFIG_MCL
  27. #include <cds_ieee80211_common.h>
  28. #else
  29. #include <linux/ieee80211.h>
  30. #endif
  31. #include "dp_rx_defrag.h"
  32. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  33. #include "qdf_net_types.h"
  34. /**
  35. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  36. * back on same vap or a different vap.
  37. *
  38. * @soc: core DP main context
  39. * @peer: dp peer handler
  40. * @rx_tlv_hdr: start of the rx TLV header
  41. * @nbuf: pkt buffer
  42. *
  43. * Return: bool (true if it is a looped back pkt else false)
  44. *
  45. */
  46. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  47. struct dp_peer *peer,
  48. uint8_t *rx_tlv_hdr,
  49. qdf_nbuf_t nbuf)
  50. {
  51. struct dp_vdev *vdev = peer->vdev;
  52. struct dp_ast_entry *ase;
  53. uint16_t sa_idx = 0;
  54. uint8_t *data;
  55. /*
  56. * Multicast Echo Check is required only if vdev is STA and
  57. * received pkt is a multicast/broadcast pkt. otherwise
  58. * skip the MEC check.
  59. */
  60. if (vdev->opmode != wlan_op_mode_sta)
  61. return false;
  62. if (!hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))
  63. return false;
  64. data = qdf_nbuf_data(nbuf);
  65. /*
  66. * if the received pkts src mac addr matches with vdev
  67. * mac address then drop the pkt as it is looped back
  68. */
  69. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  70. vdev->mac_addr.raw,
  71. QDF_MAC_ADDR_SIZE)))
  72. return true;
  73. /*
  74. * In case of qwrap isolation mode, donot drop loopback packets.
  75. * In isolation mode, all packets from the wired stations need to go
  76. * to rootap and loop back to reach the wireless stations and
  77. * vice-versa.
  78. */
  79. if (qdf_unlikely(vdev->isolation_vdev))
  80. return false;
  81. /* if the received pkts src mac addr matches with the
  82. * wired PCs MAC addr which is behind the STA or with
  83. * wireless STAs MAC addr which are behind the Repeater,
  84. * then drop the pkt as it is looped back
  85. */
  86. qdf_spin_lock_bh(&soc->ast_lock);
  87. if (hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr)) {
  88. sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
  89. if ((sa_idx < 0) ||
  90. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  91. qdf_spin_unlock_bh(&soc->ast_lock);
  92. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  93. "invalid sa_idx: %d", sa_idx);
  94. qdf_assert_always(0);
  95. }
  96. ase = soc->ast_table[sa_idx];
  97. if (!ase) {
  98. /* We do not get a peer map event for STA and without
  99. * this event we don't know what is STA's sa_idx.
  100. * For this reason the AST is still not associated to
  101. * any index postion in ast_table.
  102. * In these kind of scenarios where sa is valid but
  103. * ast is not in ast_table, we use the below API to get
  104. * AST entry for STA's own mac_address.
  105. */
  106. ase = dp_peer_ast_list_find(soc, peer,
  107. &data[QDF_MAC_ADDR_SIZE]);
  108. if (ase) {
  109. ase->ast_idx = sa_idx;
  110. soc->ast_table[sa_idx] = ase;
  111. ase->is_mapped = TRUE;
  112. }
  113. }
  114. } else
  115. ase = dp_peer_ast_hash_find_by_pdevid(soc,
  116. &data[QDF_MAC_ADDR_SIZE],
  117. vdev->pdev->pdev_id);
  118. if (ase) {
  119. if (ase->pdev_id != vdev->pdev->pdev_id) {
  120. qdf_spin_unlock_bh(&soc->ast_lock);
  121. QDF_TRACE(QDF_MODULE_ID_DP,
  122. QDF_TRACE_LEVEL_INFO,
  123. "Detected DBDC Root AP %pM, %d %d",
  124. &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
  125. ase->pdev_id);
  126. return false;
  127. }
  128. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  129. (ase->peer != peer)) {
  130. qdf_spin_unlock_bh(&soc->ast_lock);
  131. QDF_TRACE(QDF_MODULE_ID_DP,
  132. QDF_TRACE_LEVEL_INFO,
  133. "received pkt with same src mac %pM",
  134. &data[QDF_MAC_ADDR_SIZE]);
  135. return true;
  136. }
  137. }
  138. qdf_spin_unlock_bh(&soc->ast_lock);
  139. return false;
  140. }
  141. /**
  142. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  143. * (WBM) by address
  144. *
  145. * @soc: core DP main context
  146. * @link_desc_addr: link descriptor addr
  147. *
  148. * Return: QDF_STATUS
  149. */
  150. QDF_STATUS
  151. dp_rx_link_desc_return_by_addr(struct dp_soc *soc, void *link_desc_addr,
  152. uint8_t bm_action)
  153. {
  154. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  155. void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  156. void *hal_soc = soc->hal_soc;
  157. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  158. void *src_srng_desc;
  159. if (!wbm_rel_srng) {
  160. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  161. "WBM RELEASE RING not initialized");
  162. return status;
  163. }
  164. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  165. /* TODO */
  166. /*
  167. * Need API to convert from hal_ring pointer to
  168. * Ring Type / Ring Id combo
  169. */
  170. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  171. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  172. wbm_rel_srng);
  173. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  174. goto done;
  175. }
  176. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  177. if (qdf_likely(src_srng_desc)) {
  178. /* Return link descriptor through WBM ring (SW2WBM)*/
  179. hal_rx_msdu_link_desc_set(hal_soc,
  180. src_srng_desc, link_desc_addr, bm_action);
  181. status = QDF_STATUS_SUCCESS;
  182. } else {
  183. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  184. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  185. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  186. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  187. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  188. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  189. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  190. }
  191. done:
  192. hal_srng_access_end(hal_soc, wbm_rel_srng);
  193. return status;
  194. }
  195. /**
  196. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  197. * (WBM), following error handling
  198. *
  199. * @soc: core DP main context
  200. * @ring_desc: opaque pointer to the REO error ring descriptor
  201. *
  202. * Return: QDF_STATUS
  203. */
  204. QDF_STATUS
  205. dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc, uint8_t bm_action)
  206. {
  207. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  208. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  209. }
  210. /**
  211. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  212. *
  213. * @soc: core txrx main context
  214. * @ring_desc: opaque pointer to the REO error ring descriptor
  215. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  216. * @head: head of the local descriptor free-list
  217. * @tail: tail of the local descriptor free-list
  218. * @quota: No. of units (packets) that can be serviced in one shot.
  219. *
  220. * This function is used to drop all MSDU in an MPDU
  221. *
  222. * Return: uint32_t: No. of elements processed
  223. */
  224. static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
  225. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  226. uint8_t *mac_id,
  227. uint32_t quota)
  228. {
  229. uint32_t rx_bufs_used = 0;
  230. void *link_desc_va;
  231. struct hal_buf_info buf_info;
  232. struct dp_pdev *pdev;
  233. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  234. int i;
  235. uint8_t *rx_tlv_hdr;
  236. uint32_t tid;
  237. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  238. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  239. /* No UNMAP required -- this is "malloc_consistent" memory */
  240. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  241. &mpdu_desc_info->msdu_count);
  242. for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
  243. struct dp_rx_desc *rx_desc =
  244. dp_rx_cookie_2_va_rxdma_buf(soc,
  245. msdu_list.sw_cookie[i]);
  246. qdf_assert_always(rx_desc);
  247. /* all buffers from a MSDU link link belong to same pdev */
  248. *mac_id = rx_desc->pool_id;
  249. pdev = soc->pdev_list[rx_desc->pool_id];
  250. if (!dp_rx_desc_check_magic(rx_desc)) {
  251. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  252. FL("Invalid rx_desc cookie=%d"),
  253. msdu_list.sw_cookie[i]);
  254. return rx_bufs_used;
  255. }
  256. qdf_nbuf_unmap_single(soc->osdev,
  257. rx_desc->nbuf, QDF_DMA_BIDIRECTIONAL);
  258. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  259. rx_bufs_used++;
  260. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  261. rx_desc->rx_buf_start);
  262. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  263. "Packet received with PN error for tid :%d", tid);
  264. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  265. if (hal_rx_encryption_info_valid(rx_tlv_hdr))
  266. hal_rx_print_pn(rx_tlv_hdr);
  267. /* Just free the buffers */
  268. qdf_nbuf_free(rx_desc->nbuf);
  269. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  270. &pdev->free_list_tail, rx_desc);
  271. }
  272. /* Return link descriptor through WBM ring (SW2WBM)*/
  273. dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  274. return rx_bufs_used;
  275. }
  276. /**
  277. * dp_rx_pn_error_handle() - Handles PN check errors
  278. *
  279. * @soc: core txrx main context
  280. * @ring_desc: opaque pointer to the REO error ring descriptor
  281. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  282. * @head: head of the local descriptor free-list
  283. * @tail: tail of the local descriptor free-list
  284. * @quota: No. of units (packets) that can be serviced in one shot.
  285. *
  286. * This function implements PN error handling
  287. * If the peer is configured to ignore the PN check errors
  288. * or if DP feels, that this frame is still OK, the frame can be
  289. * re-injected back to REO to use some of the other features
  290. * of REO e.g. duplicate detection/routing to other cores
  291. *
  292. * Return: uint32_t: No. of elements processed
  293. */
  294. static uint32_t
  295. dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
  296. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  297. uint8_t *mac_id,
  298. uint32_t quota)
  299. {
  300. uint16_t peer_id;
  301. uint32_t rx_bufs_used = 0;
  302. struct dp_peer *peer;
  303. bool peer_pn_policy = false;
  304. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  305. mpdu_desc_info->peer_meta_data);
  306. peer = dp_peer_find_by_id(soc, peer_id);
  307. if (qdf_likely(peer)) {
  308. /*
  309. * TODO: Check for peer specific policies & set peer_pn_policy
  310. */
  311. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  312. "discard rx due to PN error for peer %pK "
  313. "(%02x:%02x:%02x:%02x:%02x:%02x)",
  314. peer,
  315. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  316. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  317. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  318. dp_peer_unref_del_find_by_id(peer);
  319. }
  320. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  321. "Packet received with PN error");
  322. /* No peer PN policy -- definitely drop */
  323. if (!peer_pn_policy)
  324. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  325. mpdu_desc_info,
  326. mac_id, quota);
  327. return rx_bufs_used;
  328. }
  329. /**
  330. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  331. *
  332. * @soc: core txrx main context
  333. * @ring_desc: opaque pointer to the REO error ring descriptor
  334. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  335. * @head: head of the local descriptor free-list
  336. * @tail: tail of the local descriptor free-list
  337. * @quota: No. of units (packets) that can be serviced in one shot.
  338. *
  339. * This function implements the error handling when sequence number
  340. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  341. * need to be handled:
  342. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  343. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  344. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  345. * For case B), the frame is normally dropped, no more action is taken
  346. *
  347. * Return: uint32_t: No. of elements processed
  348. */
  349. static uint32_t
  350. dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
  351. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  352. uint8_t *mac_id, uint32_t quota)
  353. {
  354. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  355. mac_id, quota);
  356. }
  357. #ifdef CONFIG_MCL
  358. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
  359. do { \
  360. qdf_assert_always(!(head)); \
  361. qdf_assert_always(!(tail)); \
  362. } while (0)
  363. #else
  364. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
  365. #endif
  366. /**
  367. * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
  368. * to pdev invalid peer list
  369. *
  370. * @soc: core DP main context
  371. * @nbuf: Buffer pointer
  372. * @rx_tlv_hdr: start of rx tlv header
  373. * @mac_id: mac id
  374. *
  375. * Return: bool: true for last msdu of mpdu
  376. */
  377. static bool
  378. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  379. uint8_t mac_id)
  380. {
  381. bool mpdu_done = false;
  382. qdf_nbuf_t curr_nbuf = NULL;
  383. qdf_nbuf_t tmp_nbuf = NULL;
  384. /* TODO: Currently only single radio is supported, hence
  385. * pdev hard coded to '0' index
  386. */
  387. struct dp_pdev *dp_pdev = soc->pdev_list[mac_id];
  388. if (!dp_pdev->first_nbuf) {
  389. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  390. dp_pdev->ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(rx_tlv_hdr);
  391. dp_pdev->first_nbuf = true;
  392. /* If the new nbuf received is the first msdu of the
  393. * amsdu and there are msdus in the invalid peer msdu
  394. * list, then let us free all the msdus of the invalid
  395. * peer msdu list.
  396. * This scenario can happen when we start receiving
  397. * new a-msdu even before the previous a-msdu is completely
  398. * received.
  399. */
  400. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  401. while (curr_nbuf) {
  402. tmp_nbuf = curr_nbuf->next;
  403. qdf_nbuf_free(curr_nbuf);
  404. curr_nbuf = tmp_nbuf;
  405. }
  406. dp_pdev->invalid_peer_head_msdu = NULL;
  407. dp_pdev->invalid_peer_tail_msdu = NULL;
  408. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
  409. &(dp_pdev->ppdu_info.rx_status));
  410. }
  411. if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
  412. hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  413. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  414. qdf_assert_always(dp_pdev->first_nbuf == true);
  415. dp_pdev->first_nbuf = false;
  416. mpdu_done = true;
  417. }
  418. /*
  419. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  420. * should be NULL here, add the checking for debugging purpose
  421. * in case some corner case.
  422. */
  423. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  424. dp_pdev->invalid_peer_tail_msdu);
  425. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  426. dp_pdev->invalid_peer_tail_msdu,
  427. nbuf);
  428. return mpdu_done;
  429. }
  430. /**
  431. * dp_2k_jump_handle() - Function to handle 2k jump exception
  432. * on WBM ring
  433. *
  434. * @soc: core DP main context
  435. * @nbuf: buffer pointer
  436. * @rx_tlv_hdr: start of rx tlv header
  437. * @peer_id: peer id of first msdu
  438. * @tid: Tid for which exception occurred
  439. *
  440. * This function handles 2k jump violations arising out
  441. * of receiving aggregates in non BA case. This typically
  442. * may happen if aggregates are received on a QOS enabled TID
  443. * while Rx window size is still initialized to value of 2. Or
  444. * it may also happen if negotiated window size is 1 but peer
  445. * sends aggregates.
  446. *
  447. */
  448. void
  449. dp_2k_jump_handle(struct dp_soc *soc,
  450. qdf_nbuf_t nbuf,
  451. uint8_t *rx_tlv_hdr,
  452. uint16_t peer_id,
  453. uint8_t tid)
  454. {
  455. uint32_t ppdu_id;
  456. struct dp_peer *peer = NULL;
  457. struct dp_rx_tid *rx_tid = NULL;
  458. peer = dp_peer_find_by_id(soc, peer_id);
  459. if (!peer || peer->delete_in_progress) {
  460. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  461. "peer not found");
  462. goto free_nbuf;
  463. }
  464. rx_tid = &peer->rx_tid[tid];
  465. if (qdf_unlikely(!rx_tid)) {
  466. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  467. "rx_tid is NULL!!");
  468. goto free_nbuf;
  469. }
  470. qdf_spin_lock_bh(&rx_tid->tid_lock);
  471. ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr);
  472. /*
  473. * If BA session is created and a non-aggregate packet is
  474. * landing here then the issue is with sequence number mismatch.
  475. * Proceed with delba even in that case
  476. */
  477. if (rx_tid->ppdu_id_2k != ppdu_id &&
  478. rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  479. rx_tid->ppdu_id_2k = ppdu_id;
  480. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  481. goto free_nbuf;
  482. }
  483. if (!rx_tid->delba_tx_status) {
  484. rx_tid->delba_tx_retry++;
  485. rx_tid->delba_tx_status = 1;
  486. rx_tid->delba_rcode =
  487. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  488. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  489. soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev,
  490. peer->ctrl_peer,
  491. peer->mac_addr.raw,
  492. tid,
  493. peer->vdev->ctrl_vdev,
  494. rx_tid->delba_rcode);
  495. } else {
  496. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  497. }
  498. free_nbuf:
  499. if (peer)
  500. dp_peer_unref_del_find_by_id(peer);
  501. qdf_nbuf_free(nbuf);
  502. return;
  503. }
  504. #ifdef QCA_WIFI_QCA6390
  505. /**
  506. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  507. * @soc: pointer to dp_soc struct
  508. * @pool_id: Pool id to find dp_pdev
  509. * @rx_tlv_hdr: TLV header of received packet
  510. * @nbuf: SKB
  511. *
  512. * In certain types of packets if peer_id is not correct then
  513. * driver may not be able find. Try finding peer by addr_2 of
  514. * received MPDU. If you find the peer then most likely sw_peer_id &
  515. * ast_idx is corrupted.
  516. *
  517. * Return: True if you find the peer by addr_2 of received MPDU else false
  518. */
  519. static bool
  520. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  521. uint8_t pool_id,
  522. uint8_t *rx_tlv_hdr,
  523. qdf_nbuf_t nbuf)
  524. {
  525. uint8_t local_id;
  526. struct dp_peer *peer = NULL;
  527. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  528. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  529. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  530. /*
  531. * WAR- In certain types of packets if peer_id is not correct then
  532. * driver may not be able find. Try finding peer by addr_2 of
  533. * received MPDU
  534. */
  535. if (wh)
  536. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
  537. wh->i_addr2, &local_id);
  538. if (peer) {
  539. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  540. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  541. QDF_TRACE_LEVEL_DEBUG);
  542. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  543. 1, qdf_nbuf_len(nbuf));
  544. qdf_nbuf_free(nbuf);
  545. return true;
  546. }
  547. return false;
  548. }
  549. /**
  550. * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity
  551. * @soc: DP SOC context
  552. * @pkt_len: computed length of the pkt from caller in bytes
  553. *
  554. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  555. *
  556. */
  557. static inline
  558. bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
  559. {
  560. if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) {
  561. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  562. 1, pkt_len);
  563. return true;
  564. } else {
  565. return false;
  566. }
  567. }
  568. #else
  569. static inline bool
  570. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  571. uint8_t pool_id,
  572. uint8_t *rx_tlv_hdr,
  573. qdf_nbuf_t nbuf)
  574. {
  575. return false;
  576. }
  577. static inline
  578. bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
  579. {
  580. return false;
  581. }
  582. #endif
  583. /**
  584. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  585. * descriptor violation on either a
  586. * REO or WBM ring
  587. *
  588. * @soc: core DP main context
  589. * @nbuf: buffer pointer
  590. * @rx_tlv_hdr: start of rx tlv header
  591. * @pool_id: mac id
  592. * @peer: peer handle
  593. *
  594. * This function handles NULL queue descriptor violations arising out
  595. * a missing REO queue for a given peer or a given TID. This typically
  596. * may happen if a packet is received on a QOS enabled TID before the
  597. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  598. * it may also happen for MC/BC frames if they are not routed to the
  599. * non-QOS TID queue, in the absence of any other default TID queue.
  600. * This error can show up both in a REO destination or WBM release ring.
  601. *
  602. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  603. * if nbuf could not be handled or dropped.
  604. */
  605. static QDF_STATUS
  606. dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  607. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  608. struct dp_peer *peer)
  609. {
  610. uint32_t pkt_len, l2_hdr_offset;
  611. uint16_t msdu_len;
  612. struct dp_vdev *vdev;
  613. uint8_t tid;
  614. qdf_ether_header_t *eh;
  615. qdf_nbuf_set_rx_chfrag_start(nbuf,
  616. hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr));
  617. qdf_nbuf_set_rx_chfrag_end(nbuf,
  618. hal_rx_msdu_end_last_msdu_get(rx_tlv_hdr));
  619. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  620. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  621. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  622. if (!qdf_nbuf_get_ext_list(nbuf)) {
  623. if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len))
  624. goto drop_nbuf;
  625. /* Set length in nbuf */
  626. qdf_nbuf_set_pktlen(nbuf,
  627. qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE));
  628. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  629. }
  630. /*
  631. * Check if DMA completed -- msdu_done is the last bit
  632. * to be written
  633. */
  634. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  635. dp_err_rl("MSDU DONE failure");
  636. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  637. QDF_TRACE_LEVEL_INFO);
  638. qdf_assert(0);
  639. }
  640. if (!peer &&
  641. dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
  642. rx_tlv_hdr, nbuf))
  643. return QDF_STATUS_E_FAILURE;
  644. if (!peer) {
  645. bool mpdu_done = false;
  646. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  647. dp_err_rl("peer is NULL");
  648. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  649. qdf_nbuf_len(nbuf));
  650. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  651. /* Trigger invalid peer handler wrapper */
  652. dp_rx_process_invalid_peer_wrapper(soc,
  653. pdev->invalid_peer_head_msdu,
  654. mpdu_done);
  655. if (mpdu_done) {
  656. pdev->invalid_peer_head_msdu = NULL;
  657. pdev->invalid_peer_tail_msdu = NULL;
  658. }
  659. return QDF_STATUS_E_FAILURE;
  660. }
  661. vdev = peer->vdev;
  662. if (!vdev) {
  663. dp_err_rl("Null vdev!");
  664. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  665. goto drop_nbuf;
  666. }
  667. /*
  668. * Advance the packet start pointer by total size of
  669. * pre-header TLV's
  670. */
  671. if (qdf_nbuf_get_ext_list(nbuf))
  672. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  673. else
  674. qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
  675. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  676. /* this is a looped back MCBC pkt, drop it */
  677. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  678. goto drop_nbuf;
  679. }
  680. /*
  681. * In qwrap mode if the received packet matches with any of the vdev
  682. * mac addresses, drop it. Donot receive multicast packets originated
  683. * from any proxysta.
  684. */
  685. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  686. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  687. goto drop_nbuf;
  688. }
  689. if (qdf_unlikely((peer->nawds_enabled == true) &&
  690. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  691. dp_err_rl("free buffer for multicast packet");
  692. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  693. goto drop_nbuf;
  694. }
  695. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer,
  696. hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr))) {
  697. dp_err_rl("mcast Policy Check Drop pkt");
  698. goto drop_nbuf;
  699. }
  700. /* WDS Source Port Learning */
  701. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  702. vdev->wds_enabled))
  703. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
  704. if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(rx_tlv_hdr)) {
  705. /* TODO: Assuming that qos_control_valid also indicates
  706. * unicast. Should we check this?
  707. */
  708. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, rx_tlv_hdr);
  709. if (peer && !peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
  710. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  711. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  712. }
  713. }
  714. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  715. qdf_nbuf_set_next(nbuf, NULL);
  716. dp_rx_deliver_raw(vdev, nbuf, peer);
  717. } else {
  718. if (qdf_unlikely(peer->bss_peer)) {
  719. dp_info_rl("received pkt with same src MAC");
  720. DP_STATS_INC_PKT(peer, rx.mec_drop, 1,
  721. qdf_nbuf_len(nbuf));
  722. goto drop_nbuf;
  723. }
  724. if (vdev->osif_rx) {
  725. qdf_nbuf_set_next(nbuf, NULL);
  726. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  727. qdf_nbuf_len(nbuf));
  728. /*
  729. * Update the protocol tag in SKB based on
  730. * CCE metadata
  731. */
  732. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  733. EXCEPTION_DEST_RING_ID,
  734. true, true);
  735. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
  736. rx_tlv_hdr) &&
  737. (vdev->rx_decap_type ==
  738. htt_cmn_pkt_type_ethernet))) {
  739. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  740. DP_STATS_INC_PKT(peer, rx.multicast, 1,
  741. qdf_nbuf_len(nbuf));
  742. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  743. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  744. qdf_nbuf_len(nbuf));
  745. }
  746. }
  747. vdev->osif_rx(vdev->osif_vdev, nbuf);
  748. } else {
  749. dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
  750. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  751. goto drop_nbuf;
  752. }
  753. }
  754. return QDF_STATUS_SUCCESS;
  755. drop_nbuf:
  756. qdf_nbuf_free(nbuf);
  757. return QDF_STATUS_E_FAILURE;
  758. }
  759. /**
  760. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  761. * frames to OS or wifi parse errors.
  762. * @soc: core DP main context
  763. * @nbuf: buffer pointer
  764. * @rx_tlv_hdr: start of rx tlv header
  765. * @peer: peer reference
  766. * @err_code: rxdma err code
  767. *
  768. * Return: None
  769. */
  770. void
  771. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  772. uint8_t *rx_tlv_hdr, struct dp_peer *peer,
  773. uint8_t err_code)
  774. {
  775. uint32_t pkt_len, l2_hdr_offset;
  776. uint16_t msdu_len;
  777. struct dp_vdev *vdev;
  778. qdf_ether_header_t *eh;
  779. bool is_broadcast;
  780. /*
  781. * Check if DMA completed -- msdu_done is the last bit
  782. * to be written
  783. */
  784. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  785. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  786. FL("MSDU DONE failure"));
  787. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  788. QDF_TRACE_LEVEL_INFO);
  789. qdf_assert(0);
  790. }
  791. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(rx_tlv_hdr);
  792. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  793. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  794. /* Set length in nbuf */
  795. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  796. qdf_nbuf_set_next(nbuf, NULL);
  797. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  798. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  799. if (!peer) {
  800. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
  801. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  802. qdf_nbuf_len(nbuf));
  803. /* Trigger invalid peer handler wrapper */
  804. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true);
  805. return;
  806. }
  807. vdev = peer->vdev;
  808. if (!vdev) {
  809. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  810. FL("INVALID vdev %pK OR osif_rx"), vdev);
  811. /* Drop & free packet */
  812. qdf_nbuf_free(nbuf);
  813. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  814. return;
  815. }
  816. /*
  817. * Advance the packet start pointer by total size of
  818. * pre-header TLV's
  819. */
  820. qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN);
  821. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  822. uint8_t *pkt_type;
  823. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  824. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q) &&
  825. *(uint16_t *)(pkt_type + DP_SKIP_VLAN) == htons(QDF_LLC_STP)) {
  826. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  827. goto process_mesh;
  828. } else {
  829. DP_STATS_INC(vdev->pdev, dropped.wifi_parse, 1);
  830. qdf_nbuf_free(nbuf);
  831. return;
  832. }
  833. }
  834. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  835. goto process_mesh;
  836. /*
  837. * WAPI cert AP sends rekey frames as unencrypted.
  838. * Thus RXDMA will report unencrypted frame error.
  839. * To pass WAPI cert case, SW needs to pass unencrypted
  840. * rekey frame to stack.
  841. */
  842. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  843. qdf_nbuf_cb_update_peer_local_id(nbuf, peer->local_id);
  844. goto process_rx;
  845. }
  846. /*
  847. * In dynamic WEP case rekey frames are not encrypted
  848. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  849. * key install is already done
  850. */
  851. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  852. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  853. goto process_rx;
  854. process_mesh:
  855. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  856. qdf_nbuf_free(nbuf);
  857. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  858. return;
  859. }
  860. if (vdev->mesh_vdev) {
  861. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  862. == QDF_STATUS_SUCCESS) {
  863. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  864. FL("mesh pkt filtered"));
  865. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  866. qdf_nbuf_free(nbuf);
  867. return;
  868. }
  869. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  870. }
  871. process_rx:
  872. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr) &&
  873. (vdev->rx_decap_type ==
  874. htt_cmn_pkt_type_ethernet))) {
  875. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  876. is_broadcast = (QDF_IS_ADDR_BROADCAST
  877. (eh->ether_dhost)) ? 1 : 0 ;
  878. DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
  879. if (is_broadcast) {
  880. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  881. qdf_nbuf_len(nbuf));
  882. }
  883. }
  884. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  885. dp_rx_deliver_raw(vdev, nbuf, peer);
  886. } else {
  887. /* Update the protocol tag in SKB based on CCE metadata */
  888. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  889. EXCEPTION_DEST_RING_ID, true, true);
  890. DP_STATS_INC(peer, rx.to_stack.num, 1);
  891. vdev->osif_rx(vdev->osif_vdev, nbuf);
  892. }
  893. return;
  894. }
  895. /**
  896. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  897. * @soc: core DP main context
  898. * @nbuf: buffer pointer
  899. * @rx_tlv_hdr: start of rx tlv header
  900. * @peer: peer handle
  901. *
  902. * return: void
  903. */
  904. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  905. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  906. {
  907. struct dp_vdev *vdev = NULL;
  908. struct dp_pdev *pdev = NULL;
  909. struct ol_if_ops *tops = NULL;
  910. struct ieee80211_frame *wh;
  911. uint8_t *rx_pkt_hdr;
  912. uint16_t rx_seq, fragno;
  913. unsigned int tid;
  914. QDF_STATUS status;
  915. if (!hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr))
  916. return;
  917. rx_pkt_hdr = hal_rx_pkt_hdr_get(qdf_nbuf_data(nbuf));
  918. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  919. if (!peer) {
  920. dp_err_rl("peer not found");
  921. goto fail;
  922. }
  923. vdev = peer->vdev;
  924. if (!vdev) {
  925. dp_err_rl("VDEV not found");
  926. goto fail;
  927. }
  928. pdev = vdev->pdev;
  929. if (!pdev) {
  930. dp_err_rl("PDEV not found");
  931. goto fail;
  932. }
  933. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc, qdf_nbuf_data(nbuf));
  934. rx_seq = (((*(uint16_t *)wh->i_seq) &
  935. IEEE80211_SEQ_SEQ_MASK) >>
  936. IEEE80211_SEQ_SEQ_SHIFT);
  937. fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
  938. /* Can get only last fragment */
  939. if (fragno) {
  940. status = dp_rx_defrag_add_last_frag(soc, peer,
  941. tid, rx_seq, nbuf);
  942. dp_info_rl("Frag pkt seq# %d frag# %d consumed status %d !",
  943. rx_seq, fragno, status);
  944. return;
  945. }
  946. tops = pdev->soc->cdp_soc.ol_ops;
  947. if (tops->rx_mic_error)
  948. tops->rx_mic_error(pdev->ctrl_pdev, vdev->vdev_id, wh);
  949. fail:
  950. qdf_nbuf_free(nbuf);
  951. return;
  952. }
  953. /**
  954. * dp_rx_err_process() - Processes error frames routed to REO error ring
  955. *
  956. * @soc: core txrx main context
  957. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  958. * @quota: No. of units (packets) that can be serviced in one shot.
  959. *
  960. * This function implements error processing and top level demultiplexer
  961. * for all the frames routed to REO error ring.
  962. *
  963. * Return: uint32_t: No. of elements processed
  964. */
  965. uint32_t
  966. dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  967. {
  968. void *hal_soc;
  969. void *ring_desc;
  970. uint32_t count = 0;
  971. uint32_t rx_bufs_used = 0;
  972. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  973. uint8_t mac_id = 0;
  974. uint8_t buf_type;
  975. uint8_t error, rbm;
  976. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  977. struct hal_buf_info hbi;
  978. struct dp_pdev *dp_pdev;
  979. struct dp_srng *dp_rxdma_srng;
  980. struct rx_desc_pool *rx_desc_pool;
  981. uint32_t cookie = 0;
  982. void *link_desc_va;
  983. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  984. uint16_t num_msdus;
  985. struct dp_rx_desc *rx_desc = NULL;
  986. /* Debug -- Remove later */
  987. qdf_assert(soc && hal_ring);
  988. hal_soc = soc->hal_soc;
  989. /* Debug -- Remove later */
  990. qdf_assert(hal_soc);
  991. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  992. /* TODO */
  993. /*
  994. * Need API to convert from hal_ring pointer to
  995. * Ring Type / Ring Id combo
  996. */
  997. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  998. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  999. FL("HAL RING Access Failed -- %pK"), hal_ring);
  1000. goto done;
  1001. }
  1002. while (qdf_likely(quota-- && (ring_desc =
  1003. hal_srng_dst_get_next(hal_soc, hal_ring)))) {
  1004. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1005. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1006. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  1007. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  1008. /*
  1009. * For REO error ring, expect only MSDU LINK DESC
  1010. */
  1011. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  1012. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1013. /*
  1014. * check for the magic number in the sw cookie
  1015. */
  1016. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  1017. LINK_DESC_ID_START);
  1018. /*
  1019. * Check if the buffer is to be processed on this processor
  1020. */
  1021. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1022. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1023. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1024. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1025. &num_msdus);
  1026. if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
  1027. (msdu_list.rbm[0] !=
  1028. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) {
  1029. /* TODO */
  1030. /* Call appropriate handler */
  1031. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1032. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1033. QDF_TRACE(QDF_MODULE_ID_DP,
  1034. QDF_TRACE_LEVEL_ERROR,
  1035. FL("Invalid RBM %d"),
  1036. msdu_list.rbm[0]);
  1037. }
  1038. /* Return link descriptor through WBM ring (SW2WBM)*/
  1039. dp_rx_link_desc_return(soc, ring_desc,
  1040. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1041. continue;
  1042. }
  1043. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
  1044. msdu_list.sw_cookie[0]);
  1045. qdf_assert_always(rx_desc);
  1046. mac_id = rx_desc->pool_id;
  1047. /* Get the MPDU DESC info */
  1048. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1049. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1050. /*
  1051. * We only handle one msdu per link desc for fragmented
  1052. * case. We drop the msdus and release the link desc
  1053. * back if there are more than one msdu in link desc.
  1054. */
  1055. if (qdf_unlikely(num_msdus > 1)) {
  1056. count = dp_rx_msdus_drop(soc, ring_desc,
  1057. &mpdu_desc_info,
  1058. &mac_id, quota);
  1059. rx_bufs_reaped[mac_id] += count;
  1060. continue;
  1061. }
  1062. count = dp_rx_frag_handle(soc,
  1063. ring_desc, &mpdu_desc_info,
  1064. rx_desc, &mac_id, quota);
  1065. rx_bufs_reaped[mac_id] += count;
  1066. DP_STATS_INC(soc, rx.rx_frags, 1);
  1067. continue;
  1068. }
  1069. if (hal_rx_reo_is_pn_error(ring_desc)) {
  1070. /* TOD0 */
  1071. DP_STATS_INC(soc,
  1072. rx.err.
  1073. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  1074. 1);
  1075. count = dp_rx_pn_error_handle(soc,
  1076. ring_desc,
  1077. &mpdu_desc_info, &mac_id,
  1078. quota);
  1079. rx_bufs_reaped[mac_id] += count;
  1080. continue;
  1081. }
  1082. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  1083. /* TOD0 */
  1084. DP_STATS_INC(soc,
  1085. rx.err.
  1086. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  1087. 1);
  1088. count = dp_rx_2k_jump_handle(soc,
  1089. ring_desc, &mpdu_desc_info,
  1090. &mac_id, quota);
  1091. rx_bufs_reaped[mac_id] += count;
  1092. continue;
  1093. }
  1094. }
  1095. done:
  1096. hal_srng_access_end(hal_soc, hal_ring);
  1097. if (soc->rx.flags.defrag_timeout_check) {
  1098. uint32_t now_ms =
  1099. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1100. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1101. dp_rx_defrag_waitlist_flush(soc);
  1102. }
  1103. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1104. if (rx_bufs_reaped[mac_id]) {
  1105. dp_pdev = soc->pdev_list[mac_id];
  1106. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1107. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1108. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1109. rx_desc_pool,
  1110. rx_bufs_reaped[mac_id],
  1111. &dp_pdev->free_list_head,
  1112. &dp_pdev->free_list_tail);
  1113. rx_bufs_used += rx_bufs_reaped[mac_id];
  1114. }
  1115. }
  1116. return rx_bufs_used; /* Assume no scale factor for now */
  1117. }
  1118. /**
  1119. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  1120. *
  1121. * @soc: core txrx main context
  1122. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  1123. * @quota: No. of units (packets) that can be serviced in one shot.
  1124. *
  1125. * This function implements error processing and top level demultiplexer
  1126. * for all the frames routed to WBM2HOST sw release ring.
  1127. *
  1128. * Return: uint32_t: No. of elements processed
  1129. */
  1130. uint32_t
  1131. dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  1132. {
  1133. void *hal_soc;
  1134. void *ring_desc;
  1135. struct dp_rx_desc *rx_desc;
  1136. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1137. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1138. uint32_t rx_bufs_used = 0;
  1139. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1140. uint8_t buf_type, rbm;
  1141. uint32_t rx_buf_cookie;
  1142. uint8_t mac_id;
  1143. struct dp_pdev *dp_pdev;
  1144. struct dp_srng *dp_rxdma_srng;
  1145. struct rx_desc_pool *rx_desc_pool;
  1146. uint8_t *rx_tlv_hdr;
  1147. qdf_nbuf_t nbuf_head = NULL;
  1148. qdf_nbuf_t nbuf_tail = NULL;
  1149. qdf_nbuf_t nbuf, next;
  1150. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  1151. uint8_t pool_id;
  1152. uint8_t tid = 0;
  1153. /* Debug -- Remove later */
  1154. qdf_assert(soc && hal_ring);
  1155. hal_soc = soc->hal_soc;
  1156. /* Debug -- Remove later */
  1157. qdf_assert(hal_soc);
  1158. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  1159. /* TODO */
  1160. /*
  1161. * Need API to convert from hal_ring pointer to
  1162. * Ring Type / Ring Id combo
  1163. */
  1164. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1165. FL("HAL RING Access Failed -- %pK"), hal_ring);
  1166. goto done;
  1167. }
  1168. while (qdf_likely(quota-- && (ring_desc =
  1169. hal_srng_dst_get_next(hal_soc, hal_ring)))) {
  1170. /* XXX */
  1171. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  1172. /*
  1173. * For WBM ring, expect only MSDU buffers
  1174. */
  1175. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  1176. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1177. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  1178. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1179. == HAL_RX_WBM_ERR_SRC_REO));
  1180. /*
  1181. * Check if the buffer is to be processed on this processor
  1182. */
  1183. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1184. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  1185. /* TODO */
  1186. /* Call appropriate handler */
  1187. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1189. FL("Invalid RBM %d"), rbm);
  1190. continue;
  1191. }
  1192. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  1193. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1194. qdf_assert_always(rx_desc);
  1195. if (!dp_rx_desc_check_magic(rx_desc)) {
  1196. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1197. FL("Invalid rx_desc cookie=%d"),
  1198. rx_buf_cookie);
  1199. continue;
  1200. }
  1201. /*
  1202. * this is a unlikely scenario where the host is reaping
  1203. * a descriptor which it already reaped just a while ago
  1204. * but is yet to replenish it back to HW.
  1205. * In this case host will dump the last 128 descriptors
  1206. * including the software descriptor rx_desc and assert.
  1207. */
  1208. if (qdf_unlikely(!rx_desc->in_use)) {
  1209. DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
  1210. dp_rx_dump_info_and_assert(soc, hal_ring,
  1211. ring_desc, rx_desc);
  1212. }
  1213. nbuf = rx_desc->nbuf;
  1214. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
  1215. /*
  1216. * save the wbm desc info in nbuf TLV. We will need this
  1217. * info when we do the actual nbuf processing
  1218. */
  1219. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
  1220. wbm_err_info.pool_id = rx_desc->pool_id;
  1221. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  1222. &wbm_err_info);
  1223. rx_bufs_reaped[rx_desc->pool_id]++;
  1224. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1225. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1226. &tail[rx_desc->pool_id],
  1227. rx_desc);
  1228. }
  1229. done:
  1230. hal_srng_access_end(hal_soc, hal_ring);
  1231. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1232. if (rx_bufs_reaped[mac_id]) {
  1233. dp_pdev = soc->pdev_list[mac_id];
  1234. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1235. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1236. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1237. rx_desc_pool, rx_bufs_reaped[mac_id],
  1238. &head[mac_id], &tail[mac_id]);
  1239. rx_bufs_used += rx_bufs_reaped[mac_id];
  1240. }
  1241. }
  1242. nbuf = nbuf_head;
  1243. while (nbuf) {
  1244. struct dp_peer *peer;
  1245. uint16_t peer_id;
  1246. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1247. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  1248. peer = dp_peer_find_by_id(soc, peer_id);
  1249. /*
  1250. * retrieve the wbm desc info from nbuf TLV, so we can
  1251. * handle error cases appropriately
  1252. */
  1253. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  1254. /* Set queue_mapping in nbuf to 0 */
  1255. dp_set_rx_queue(nbuf, 0);
  1256. next = nbuf->next;
  1257. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  1258. if (wbm_err_info.reo_psh_rsn
  1259. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  1260. DP_STATS_INC(soc,
  1261. rx.err.reo_error
  1262. [wbm_err_info.reo_err_code], 1);
  1263. switch (wbm_err_info.reo_err_code) {
  1264. /*
  1265. * Handling for packets which have NULL REO
  1266. * queue descriptor
  1267. */
  1268. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1269. pool_id = wbm_err_info.pool_id;
  1270. dp_rx_null_q_desc_handle(soc, nbuf,
  1271. rx_tlv_hdr,
  1272. pool_id, peer);
  1273. nbuf = next;
  1274. if (peer)
  1275. dp_peer_unref_del_find_by_id(
  1276. peer);
  1277. continue;
  1278. /* TODO */
  1279. /* Add per error code accounting */
  1280. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1281. pool_id = wbm_err_info.pool_id;
  1282. if (hal_rx_msdu_end_first_msdu_get(rx_tlv_hdr)) {
  1283. peer_id =
  1284. hal_rx_mpdu_start_sw_peer_id_get(rx_tlv_hdr);
  1285. tid =
  1286. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  1287. }
  1288. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr,
  1289. peer_id, tid);
  1290. nbuf = next;
  1291. if (peer)
  1292. dp_peer_unref_del_find_by_id(
  1293. peer);
  1294. continue;
  1295. default:
  1296. dp_err_rl("Got pkt with REO ERROR: %d",
  1297. wbm_err_info.reo_err_code);
  1298. break;
  1299. }
  1300. }
  1301. } else if (wbm_err_info.wbm_err_src ==
  1302. HAL_RX_WBM_ERR_SRC_RXDMA) {
  1303. if (wbm_err_info.rxdma_psh_rsn
  1304. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1305. DP_STATS_INC(soc,
  1306. rx.err.rxdma_error
  1307. [wbm_err_info.rxdma_err_code], 1);
  1308. switch (wbm_err_info.rxdma_err_code) {
  1309. case HAL_RXDMA_ERR_UNENCRYPTED:
  1310. case HAL_RXDMA_ERR_WIFI_PARSE:
  1311. dp_rx_process_rxdma_err(soc, nbuf,
  1312. rx_tlv_hdr, peer,
  1313. wbm_err_info.rxdma_err_code);
  1314. nbuf = next;
  1315. if (peer)
  1316. dp_peer_unref_del_find_by_id(peer);
  1317. continue;
  1318. case HAL_RXDMA_ERR_TKIP_MIC:
  1319. dp_rx_process_mic_error(soc, nbuf,
  1320. rx_tlv_hdr,
  1321. peer);
  1322. nbuf = next;
  1323. if (peer) {
  1324. DP_STATS_INC(peer, rx.err.mic_err, 1);
  1325. dp_peer_unref_del_find_by_id(
  1326. peer);
  1327. }
  1328. continue;
  1329. case HAL_RXDMA_ERR_DECRYPT:
  1330. if (peer)
  1331. DP_STATS_INC(peer, rx.err.decrypt_err, 1);
  1332. QDF_TRACE(QDF_MODULE_ID_DP,
  1333. QDF_TRACE_LEVEL_DEBUG,
  1334. "Packet received with Decrypt error");
  1335. break;
  1336. default:
  1337. dp_err_rl("RXDMA error %d",
  1338. wbm_err_info.rxdma_err_code);
  1339. }
  1340. }
  1341. } else {
  1342. /* Should not come here */
  1343. qdf_assert(0);
  1344. }
  1345. if (peer)
  1346. dp_peer_unref_del_find_by_id(peer);
  1347. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1348. QDF_TRACE_LEVEL_DEBUG);
  1349. qdf_nbuf_free(nbuf);
  1350. nbuf = next;
  1351. }
  1352. return rx_bufs_used; /* Assume no scale factor for now */
  1353. }
  1354. /**
  1355. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  1356. *
  1357. * @soc: core DP main context
  1358. * @mac_id: mac id which is one of 3 mac_ids
  1359. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1360. * @head: head of descs list to be freed
  1361. * @tail: tail of decs list to be freed
  1362. * Return: number of msdu in MPDU to be popped
  1363. */
  1364. static inline uint32_t
  1365. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1366. void *rxdma_dst_ring_desc,
  1367. union dp_rx_desc_list_elem_t **head,
  1368. union dp_rx_desc_list_elem_t **tail)
  1369. {
  1370. void *rx_msdu_link_desc;
  1371. qdf_nbuf_t msdu;
  1372. qdf_nbuf_t last;
  1373. struct hal_rx_msdu_list msdu_list;
  1374. uint16_t num_msdus;
  1375. struct hal_buf_info buf_info;
  1376. void *p_buf_addr_info;
  1377. void *p_last_buf_addr_info;
  1378. uint32_t rx_bufs_used = 0;
  1379. uint32_t msdu_cnt;
  1380. uint32_t i;
  1381. uint8_t push_reason;
  1382. uint8_t rxdma_error_code = 0;
  1383. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  1384. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1385. msdu = 0;
  1386. last = NULL;
  1387. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1388. &p_last_buf_addr_info, &msdu_cnt);
  1389. push_reason =
  1390. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  1391. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1392. rxdma_error_code =
  1393. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  1394. }
  1395. do {
  1396. rx_msdu_link_desc =
  1397. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1398. qdf_assert(rx_msdu_link_desc);
  1399. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1400. &msdu_list, &num_msdus);
  1401. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1402. /* if the msdus belongs to NSS offloaded radio &&
  1403. * the rbm is not SW1_BM then return the msdu_link
  1404. * descriptor without freeing the msdus (nbufs). let
  1405. * these buffers be given to NSS completion ring for
  1406. * NSS to free them.
  1407. * else iterate through the msdu link desc list and
  1408. * free each msdu in the list.
  1409. */
  1410. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  1411. wlan_cfg_get_dp_pdev_nss_enabled(
  1412. pdev->wlan_cfg_ctx))
  1413. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  1414. else {
  1415. for (i = 0; i < num_msdus; i++) {
  1416. struct dp_rx_desc *rx_desc =
  1417. dp_rx_cookie_2_va_rxdma_buf(soc,
  1418. msdu_list.sw_cookie[i]);
  1419. qdf_assert_always(rx_desc);
  1420. msdu = rx_desc->nbuf;
  1421. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1422. QDF_DMA_FROM_DEVICE);
  1423. QDF_TRACE(QDF_MODULE_ID_DP,
  1424. QDF_TRACE_LEVEL_DEBUG,
  1425. "[%s][%d] msdu_nbuf=%pK ",
  1426. __func__, __LINE__, msdu);
  1427. qdf_nbuf_free(msdu);
  1428. rx_bufs_used++;
  1429. dp_rx_add_to_free_desc_list(head,
  1430. tail, rx_desc);
  1431. }
  1432. }
  1433. } else {
  1434. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  1435. }
  1436. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  1437. &p_buf_addr_info);
  1438. dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action);
  1439. p_last_buf_addr_info = p_buf_addr_info;
  1440. } while (buf_info.paddr);
  1441. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  1442. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  1443. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1444. "Packet received with Decrypt error");
  1445. }
  1446. return rx_bufs_used;
  1447. }
  1448. /**
  1449. * dp_rxdma_err_process() - RxDMA error processing functionality
  1450. *
  1451. * @soc: core txrx main contex
  1452. * @mac_id: mac id which is one of 3 mac_ids
  1453. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  1454. * @quota: No. of units (packets) that can be serviced in one shot.
  1455. * Return: num of buffers processed
  1456. */
  1457. uint32_t
  1458. dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  1459. {
  1460. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1461. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  1462. void *hal_soc;
  1463. void *rxdma_dst_ring_desc;
  1464. void *err_dst_srng;
  1465. union dp_rx_desc_list_elem_t *head = NULL;
  1466. union dp_rx_desc_list_elem_t *tail = NULL;
  1467. struct dp_srng *dp_rxdma_srng;
  1468. struct rx_desc_pool *rx_desc_pool;
  1469. uint32_t work_done = 0;
  1470. uint32_t rx_bufs_used = 0;
  1471. if (!pdev)
  1472. return 0;
  1473. err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
  1474. if (!err_dst_srng) {
  1475. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1476. "%s %d : HAL Monitor Destination Ring Init \
  1477. Failed -- %pK",
  1478. __func__, __LINE__, err_dst_srng);
  1479. return 0;
  1480. }
  1481. hal_soc = soc->hal_soc;
  1482. qdf_assert(hal_soc);
  1483. if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) {
  1484. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1485. "%s %d : HAL Monitor Destination Ring Init \
  1486. Failed -- %pK",
  1487. __func__, __LINE__, err_dst_srng);
  1488. return 0;
  1489. }
  1490. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  1491. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  1492. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  1493. rxdma_dst_ring_desc,
  1494. &head, &tail);
  1495. }
  1496. hal_srng_access_end(hal_soc, err_dst_srng);
  1497. if (rx_bufs_used) {
  1498. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1499. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1500. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1501. rx_desc_pool, rx_bufs_used, &head, &tail);
  1502. work_done += rx_bufs_used;
  1503. }
  1504. return work_done;
  1505. }