dp_rx_err.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "dp_internal.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #include "dp_rx_defrag.h"
  27. #ifdef FEATURE_WDS
  28. #include "dp_txrx_wds.h"
  29. #endif
  30. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  31. #include "qdf_net_types.h"
  32. /* Max buffer in invalid peer SG list*/
  33. #define DP_MAX_INVALID_BUFFERS 10
  34. /**
  35. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  36. * back on same vap or a different vap.
  37. *
  38. * @soc: core DP main context
  39. * @peer: dp peer handler
  40. * @rx_tlv_hdr: start of the rx TLV header
  41. * @nbuf: pkt buffer
  42. *
  43. * Return: bool (true if it is a looped back pkt else false)
  44. *
  45. */
  46. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  47. struct dp_peer *peer,
  48. uint8_t *rx_tlv_hdr,
  49. qdf_nbuf_t nbuf)
  50. {
  51. struct dp_vdev *vdev = peer->vdev;
  52. struct dp_ast_entry *ase = NULL;
  53. uint16_t sa_idx = 0;
  54. uint8_t *data;
  55. /*
  56. * Multicast Echo Check is required only if vdev is STA and
  57. * received pkt is a multicast/broadcast pkt. otherwise
  58. * skip the MEC check.
  59. */
  60. if (vdev->opmode != wlan_op_mode_sta)
  61. return false;
  62. if (!hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc, rx_tlv_hdr))
  63. return false;
  64. data = qdf_nbuf_data(nbuf);
  65. /*
  66. * if the received pkts src mac addr matches with vdev
  67. * mac address then drop the pkt as it is looped back
  68. */
  69. if (!(qdf_mem_cmp(&data[QDF_MAC_ADDR_SIZE],
  70. vdev->mac_addr.raw,
  71. QDF_MAC_ADDR_SIZE)))
  72. return true;
  73. /*
  74. * In case of qwrap isolation mode, donot drop loopback packets.
  75. * In isolation mode, all packets from the wired stations need to go
  76. * to rootap and loop back to reach the wireless stations and
  77. * vice-versa.
  78. */
  79. if (qdf_unlikely(vdev->isolation_vdev))
  80. return false;
  81. /* if the received pkts src mac addr matches with the
  82. * wired PCs MAC addr which is behind the STA or with
  83. * wireless STAs MAC addr which are behind the Repeater,
  84. * then drop the pkt as it is looped back
  85. */
  86. qdf_spin_lock_bh(&soc->ast_lock);
  87. if (hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc, rx_tlv_hdr)) {
  88. sa_idx = hal_rx_msdu_end_sa_idx_get(soc->hal_soc, rx_tlv_hdr);
  89. if ((sa_idx < 0) ||
  90. (sa_idx >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  91. qdf_spin_unlock_bh(&soc->ast_lock);
  92. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  93. "invalid sa_idx: %d", sa_idx);
  94. qdf_assert_always(0);
  95. }
  96. ase = soc->ast_table[sa_idx];
  97. if (!ase) {
  98. /* We do not get a peer map event for STA and without
  99. * this event we don't know what is STA's sa_idx.
  100. * For this reason the AST is still not associated to
  101. * any index postion in ast_table.
  102. * In these kind of scenarios where sa is valid but
  103. * ast is not in ast_table, we use the below API to get
  104. * AST entry for STA's own mac_address.
  105. */
  106. ase = dp_peer_ast_list_find(soc, peer,
  107. &data[QDF_MAC_ADDR_SIZE]);
  108. if (ase) {
  109. ase->ast_idx = sa_idx;
  110. soc->ast_table[sa_idx] = ase;
  111. ase->is_mapped = TRUE;
  112. }
  113. }
  114. } else {
  115. ase = dp_peer_ast_hash_find_by_pdevid(soc,
  116. &data[QDF_MAC_ADDR_SIZE],
  117. vdev->pdev->pdev_id);
  118. }
  119. if (ase) {
  120. if (ase->pdev_id != vdev->pdev->pdev_id) {
  121. qdf_spin_unlock_bh(&soc->ast_lock);
  122. QDF_TRACE(QDF_MODULE_ID_DP,
  123. QDF_TRACE_LEVEL_INFO,
  124. "Detected DBDC Root AP %pM, %d %d",
  125. &data[QDF_MAC_ADDR_SIZE], vdev->pdev->pdev_id,
  126. ase->pdev_id);
  127. return false;
  128. }
  129. if ((ase->type == CDP_TXRX_AST_TYPE_MEC) ||
  130. (ase->peer != peer)) {
  131. qdf_spin_unlock_bh(&soc->ast_lock);
  132. QDF_TRACE(QDF_MODULE_ID_DP,
  133. QDF_TRACE_LEVEL_INFO,
  134. "received pkt with same src mac %pM",
  135. &data[QDF_MAC_ADDR_SIZE]);
  136. return true;
  137. }
  138. }
  139. qdf_spin_unlock_bh(&soc->ast_lock);
  140. return false;
  141. }
  142. /**
  143. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  144. * (WBM) by address
  145. *
  146. * @soc: core DP main context
  147. * @link_desc_addr: link descriptor addr
  148. *
  149. * Return: QDF_STATUS
  150. */
  151. QDF_STATUS
  152. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  153. hal_link_desc_t link_desc_addr,
  154. uint8_t bm_action)
  155. {
  156. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  157. hal_ring_handle_t wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  158. hal_soc_handle_t hal_soc = soc->hal_soc;
  159. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  160. void *src_srng_desc;
  161. if (!wbm_rel_srng) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  163. "WBM RELEASE RING not initialized");
  164. return status;
  165. }
  166. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  167. /* TODO */
  168. /*
  169. * Need API to convert from hal_ring pointer to
  170. * Ring Type / Ring Id combo
  171. */
  172. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  173. FL("HAL RING Access For WBM Release SRNG Failed - %pK"),
  174. wbm_rel_srng);
  175. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  176. goto done;
  177. }
  178. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  179. if (qdf_likely(src_srng_desc)) {
  180. /* Return link descriptor through WBM ring (SW2WBM)*/
  181. hal_rx_msdu_link_desc_set(hal_soc,
  182. src_srng_desc, link_desc_addr, bm_action);
  183. status = QDF_STATUS_SUCCESS;
  184. } else {
  185. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  186. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  187. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  188. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  189. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  190. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  191. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  192. }
  193. done:
  194. hal_srng_access_end(hal_soc, wbm_rel_srng);
  195. return status;
  196. }
  197. /**
  198. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  199. * (WBM), following error handling
  200. *
  201. * @soc: core DP main context
  202. * @ring_desc: opaque pointer to the REO error ring descriptor
  203. *
  204. * Return: QDF_STATUS
  205. */
  206. QDF_STATUS
  207. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  208. uint8_t bm_action)
  209. {
  210. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  211. return dp_rx_link_desc_return_by_addr(soc, buf_addr_info, bm_action);
  212. }
  213. /**
  214. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  215. *
  216. * @soc: core txrx main context
  217. * @ring_desc: opaque pointer to the REO error ring descriptor
  218. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  219. * @head: head of the local descriptor free-list
  220. * @tail: tail of the local descriptor free-list
  221. * @quota: No. of units (packets) that can be serviced in one shot.
  222. *
  223. * This function is used to drop all MSDU in an MPDU
  224. *
  225. * Return: uint32_t: No. of elements processed
  226. */
  227. static uint32_t
  228. dp_rx_msdus_drop(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  229. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  230. uint8_t *mac_id,
  231. uint32_t quota)
  232. {
  233. uint32_t rx_bufs_used = 0;
  234. void *link_desc_va;
  235. struct hal_buf_info buf_info;
  236. struct dp_pdev *pdev;
  237. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  238. int i;
  239. uint8_t *rx_tlv_hdr;
  240. uint32_t tid;
  241. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  242. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  243. /* No UNMAP required -- this is "malloc_consistent" memory */
  244. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  245. &mpdu_desc_info->msdu_count);
  246. for (i = 0; (i < mpdu_desc_info->msdu_count) && quota--; i++) {
  247. struct dp_rx_desc *rx_desc =
  248. dp_rx_cookie_2_va_rxdma_buf(soc,
  249. msdu_list.sw_cookie[i]);
  250. qdf_assert_always(rx_desc);
  251. /* all buffers from a MSDU link link belong to same pdev */
  252. *mac_id = rx_desc->pool_id;
  253. pdev = soc->pdev_list[rx_desc->pool_id];
  254. if (!dp_rx_desc_check_magic(rx_desc)) {
  255. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  256. FL("Invalid rx_desc cookie=%d"),
  257. msdu_list.sw_cookie[i]);
  258. return rx_bufs_used;
  259. }
  260. qdf_nbuf_unmap_single(soc->osdev,
  261. rx_desc->nbuf, QDF_DMA_FROM_DEVICE);
  262. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  263. rx_bufs_used++;
  264. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  265. rx_desc->rx_buf_start);
  266. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  267. "Packet received with PN error for tid :%d", tid);
  268. rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
  269. if (hal_rx_encryption_info_valid(soc->hal_soc, rx_tlv_hdr))
  270. hal_rx_print_pn(soc->hal_soc, rx_tlv_hdr);
  271. /* Just free the buffers */
  272. qdf_nbuf_free(rx_desc->nbuf);
  273. dp_rx_add_to_free_desc_list(&pdev->free_list_head,
  274. &pdev->free_list_tail, rx_desc);
  275. }
  276. /* Return link descriptor through WBM ring (SW2WBM)*/
  277. dp_rx_link_desc_return(soc, ring_desc, HAL_BM_ACTION_PUT_IN_IDLE_LIST);
  278. return rx_bufs_used;
  279. }
  280. /**
  281. * dp_rx_pn_error_handle() - Handles PN check errors
  282. *
  283. * @soc: core txrx main context
  284. * @ring_desc: opaque pointer to the REO error ring descriptor
  285. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  286. * @head: head of the local descriptor free-list
  287. * @tail: tail of the local descriptor free-list
  288. * @quota: No. of units (packets) that can be serviced in one shot.
  289. *
  290. * This function implements PN error handling
  291. * If the peer is configured to ignore the PN check errors
  292. * or if DP feels, that this frame is still OK, the frame can be
  293. * re-injected back to REO to use some of the other features
  294. * of REO e.g. duplicate detection/routing to other cores
  295. *
  296. * Return: uint32_t: No. of elements processed
  297. */
  298. static uint32_t
  299. dp_rx_pn_error_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  300. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  301. uint8_t *mac_id,
  302. uint32_t quota)
  303. {
  304. uint16_t peer_id;
  305. uint32_t rx_bufs_used = 0;
  306. struct dp_peer *peer;
  307. bool peer_pn_policy = false;
  308. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  309. mpdu_desc_info->peer_meta_data);
  310. peer = dp_peer_find_by_id(soc, peer_id);
  311. if (qdf_likely(peer)) {
  312. /*
  313. * TODO: Check for peer specific policies & set peer_pn_policy
  314. */
  315. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  316. "discard rx due to PN error for peer %pK "
  317. "(%02x:%02x:%02x:%02x:%02x:%02x)",
  318. peer,
  319. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  320. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  321. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  322. dp_peer_unref_del_find_by_id(peer);
  323. }
  324. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  325. "Packet received with PN error");
  326. /* No peer PN policy -- definitely drop */
  327. if (!peer_pn_policy)
  328. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  329. mpdu_desc_info,
  330. mac_id, quota);
  331. return rx_bufs_used;
  332. }
  333. /**
  334. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  335. *
  336. * @soc: core txrx main context
  337. * @ring_desc: opaque pointer to the REO error ring descriptor
  338. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  339. * @head: head of the local descriptor free-list
  340. * @tail: tail of the local descriptor free-list
  341. * @quota: No. of units (packets) that can be serviced in one shot.
  342. *
  343. * This function implements the error handling when sequence number
  344. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  345. * need to be handled:
  346. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  347. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  348. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  349. * For case B), the frame is normally dropped, no more action is taken
  350. *
  351. * Return: uint32_t: No. of elements processed
  352. */
  353. static uint32_t
  354. dp_rx_2k_jump_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  355. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  356. uint8_t *mac_id, uint32_t quota)
  357. {
  358. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  359. mac_id, quota);
  360. }
  361. #ifdef DP_INVALID_PEER_ASSERT
  362. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
  363. do { \
  364. qdf_assert_always(!(head)); \
  365. qdf_assert_always(!(tail)); \
  366. } while (0)
  367. #else
  368. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
  369. #endif
  370. /**
  371. * dp_rx_chain_msdus() - Function to chain all msdus of a mpdu
  372. * to pdev invalid peer list
  373. *
  374. * @soc: core DP main context
  375. * @nbuf: Buffer pointer
  376. * @rx_tlv_hdr: start of rx tlv header
  377. * @mac_id: mac id
  378. *
  379. * Return: bool: true for last msdu of mpdu
  380. */
  381. static bool
  382. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  383. uint8_t mac_id)
  384. {
  385. bool mpdu_done = false;
  386. qdf_nbuf_t curr_nbuf = NULL;
  387. qdf_nbuf_t tmp_nbuf = NULL;
  388. /* TODO: Currently only single radio is supported, hence
  389. * pdev hard coded to '0' index
  390. */
  391. struct dp_pdev *dp_pdev = soc->pdev_list[mac_id];
  392. /* if invalid peer SG list has max values free the buffers in list
  393. * and treat current buffer as start of list
  394. *
  395. * current logic to detect the last buffer from attn_tlv is not reliable
  396. * in OFDMA UL scenario hence add max buffers check to avoid list pile
  397. * up
  398. */
  399. if (!dp_pdev->first_nbuf ||
  400. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST
  401. (dp_pdev->invalid_peer_head_msdu) >= DP_MAX_INVALID_BUFFERS) {
  402. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  403. dp_pdev->ppdu_id = hal_rx_hw_desc_get_ppduid_get(soc->hal_soc,
  404. rx_tlv_hdr);
  405. dp_pdev->first_nbuf = true;
  406. /* If the new nbuf received is the first msdu of the
  407. * amsdu and there are msdus in the invalid peer msdu
  408. * list, then let us free all the msdus of the invalid
  409. * peer msdu list.
  410. * This scenario can happen when we start receiving
  411. * new a-msdu even before the previous a-msdu is completely
  412. * received.
  413. */
  414. curr_nbuf = dp_pdev->invalid_peer_head_msdu;
  415. while (curr_nbuf) {
  416. tmp_nbuf = curr_nbuf->next;
  417. qdf_nbuf_free(curr_nbuf);
  418. curr_nbuf = tmp_nbuf;
  419. }
  420. dp_pdev->invalid_peer_head_msdu = NULL;
  421. dp_pdev->invalid_peer_tail_msdu = NULL;
  422. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc, rx_tlv_hdr,
  423. &(dp_pdev->ppdu_info.rx_status));
  424. }
  425. if (dp_pdev->ppdu_id == hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr) &&
  426. hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  427. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  428. qdf_assert_always(dp_pdev->first_nbuf == true);
  429. dp_pdev->first_nbuf = false;
  430. mpdu_done = true;
  431. }
  432. /*
  433. * For MCL, invalid_peer_head_msdu and invalid_peer_tail_msdu
  434. * should be NULL here, add the checking for debugging purpose
  435. * in case some corner case.
  436. */
  437. DP_PDEV_INVALID_PEER_MSDU_CHECK(dp_pdev->invalid_peer_head_msdu,
  438. dp_pdev->invalid_peer_tail_msdu);
  439. DP_RX_LIST_APPEND(dp_pdev->invalid_peer_head_msdu,
  440. dp_pdev->invalid_peer_tail_msdu,
  441. nbuf);
  442. return mpdu_done;
  443. }
  444. /**
  445. * dp_2k_jump_handle() - Function to handle 2k jump exception
  446. * on WBM ring
  447. *
  448. * @soc: core DP main context
  449. * @nbuf: buffer pointer
  450. * @rx_tlv_hdr: start of rx tlv header
  451. * @peer_id: peer id of first msdu
  452. * @tid: Tid for which exception occurred
  453. *
  454. * This function handles 2k jump violations arising out
  455. * of receiving aggregates in non BA case. This typically
  456. * may happen if aggregates are received on a QOS enabled TID
  457. * while Rx window size is still initialized to value of 2. Or
  458. * it may also happen if negotiated window size is 1 but peer
  459. * sends aggregates.
  460. *
  461. */
  462. void
  463. dp_2k_jump_handle(struct dp_soc *soc,
  464. qdf_nbuf_t nbuf,
  465. uint8_t *rx_tlv_hdr,
  466. uint16_t peer_id,
  467. uint8_t tid)
  468. {
  469. uint32_t ppdu_id;
  470. struct dp_peer *peer = NULL;
  471. struct dp_rx_tid *rx_tid = NULL;
  472. peer = dp_peer_find_by_id(soc, peer_id);
  473. if (!peer || peer->delete_in_progress) {
  474. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  475. "peer not found");
  476. goto free_nbuf;
  477. }
  478. rx_tid = &peer->rx_tid[tid];
  479. if (qdf_unlikely(!rx_tid)) {
  480. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  481. "rx_tid is NULL!!");
  482. goto free_nbuf;
  483. }
  484. qdf_spin_lock_bh(&rx_tid->tid_lock);
  485. ppdu_id = hal_rx_attn_phy_ppdu_id_get(rx_tlv_hdr);
  486. /*
  487. * If BA session is created and a non-aggregate packet is
  488. * landing here then the issue is with sequence number mismatch.
  489. * Proceed with delba even in that case
  490. */
  491. if (rx_tid->ppdu_id_2k != ppdu_id &&
  492. rx_tid->ba_status != DP_RX_BA_ACTIVE) {
  493. rx_tid->ppdu_id_2k = ppdu_id;
  494. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  495. goto free_nbuf;
  496. }
  497. if (!rx_tid->delba_tx_status) {
  498. rx_tid->delba_tx_retry++;
  499. rx_tid->delba_tx_status = 1;
  500. rx_tid->delba_rcode =
  501. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  502. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  503. if (soc->cdp_soc.ol_ops->send_delba)
  504. soc->cdp_soc.ol_ops->send_delba(peer->vdev->pdev->ctrl_pdev,
  505. peer->ctrl_peer,
  506. peer->mac_addr.raw,
  507. tid,
  508. peer->vdev->ctrl_vdev,
  509. rx_tid->delba_rcode);
  510. } else {
  511. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  512. }
  513. free_nbuf:
  514. if (peer)
  515. dp_peer_unref_del_find_by_id(peer);
  516. qdf_nbuf_free(nbuf);
  517. return;
  518. }
  519. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
  520. /**
  521. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  522. * @soc: pointer to dp_soc struct
  523. * @pool_id: Pool id to find dp_pdev
  524. * @rx_tlv_hdr: TLV header of received packet
  525. * @nbuf: SKB
  526. *
  527. * In certain types of packets if peer_id is not correct then
  528. * driver may not be able find. Try finding peer by addr_2 of
  529. * received MPDU. If you find the peer then most likely sw_peer_id &
  530. * ast_idx is corrupted.
  531. *
  532. * Return: True if you find the peer by addr_2 of received MPDU else false
  533. */
  534. static bool
  535. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  536. uint8_t pool_id,
  537. uint8_t *rx_tlv_hdr,
  538. qdf_nbuf_t nbuf)
  539. {
  540. uint8_t local_id;
  541. struct dp_peer *peer = NULL;
  542. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  543. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  544. struct ieee80211_frame *wh = (struct ieee80211_frame *)rx_pkt_hdr;
  545. /*
  546. * WAR- In certain types of packets if peer_id is not correct then
  547. * driver may not be able find. Try finding peer by addr_2 of
  548. * received MPDU
  549. */
  550. if (wh)
  551. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
  552. wh->i_addr2, &local_id);
  553. if (peer) {
  554. dp_verbose_debug("MPDU sw_peer_id & ast_idx is corrupted");
  555. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  556. QDF_TRACE_LEVEL_DEBUG);
  557. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer_id,
  558. 1, qdf_nbuf_len(nbuf));
  559. qdf_nbuf_free(nbuf);
  560. return true;
  561. }
  562. return false;
  563. }
  564. /**
  565. * dp_rx_null_q_check_pkt_len_exception() - Check for pktlen validity
  566. * @soc: DP SOC context
  567. * @pkt_len: computed length of the pkt from caller in bytes
  568. *
  569. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  570. *
  571. */
  572. static inline
  573. bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
  574. {
  575. if (qdf_unlikely(pkt_len > RX_BUFFER_SIZE)) {
  576. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_pkt_len,
  577. 1, pkt_len);
  578. return true;
  579. } else {
  580. return false;
  581. }
  582. }
  583. #else
  584. static inline bool
  585. dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  586. uint8_t pool_id,
  587. uint8_t *rx_tlv_hdr,
  588. qdf_nbuf_t nbuf)
  589. {
  590. return false;
  591. }
  592. static inline
  593. bool dp_rx_null_q_check_pkt_len_exception(struct dp_soc *soc, uint32_t pkt_len)
  594. {
  595. return false;
  596. }
  597. #endif
  598. /**
  599. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  600. * descriptor violation on either a
  601. * REO or WBM ring
  602. *
  603. * @soc: core DP main context
  604. * @nbuf: buffer pointer
  605. * @rx_tlv_hdr: start of rx tlv header
  606. * @pool_id: mac id
  607. * @peer: peer handle
  608. *
  609. * This function handles NULL queue descriptor violations arising out
  610. * a missing REO queue for a given peer or a given TID. This typically
  611. * may happen if a packet is received on a QOS enabled TID before the
  612. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  613. * it may also happen for MC/BC frames if they are not routed to the
  614. * non-QOS TID queue, in the absence of any other default TID queue.
  615. * This error can show up both in a REO destination or WBM release ring.
  616. *
  617. * Return: QDF_STATUS_SUCCESS, if nbuf handled successfully. QDF status code
  618. * if nbuf could not be handled or dropped.
  619. */
  620. static QDF_STATUS
  621. dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
  622. uint8_t *rx_tlv_hdr, uint8_t pool_id,
  623. struct dp_peer *peer)
  624. {
  625. uint32_t pkt_len, l2_hdr_offset;
  626. uint16_t msdu_len;
  627. struct dp_vdev *vdev;
  628. uint8_t tid;
  629. qdf_ether_header_t *eh;
  630. qdf_nbuf_set_rx_chfrag_start(nbuf,
  631. hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  632. rx_tlv_hdr));
  633. qdf_nbuf_set_rx_chfrag_end(nbuf,
  634. hal_rx_msdu_end_last_msdu_get(soc->hal_soc,
  635. rx_tlv_hdr));
  636. qdf_nbuf_set_da_mcbc(nbuf, hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  637. rx_tlv_hdr));
  638. qdf_nbuf_set_da_valid(nbuf,
  639. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  640. rx_tlv_hdr));
  641. qdf_nbuf_set_sa_valid(nbuf,
  642. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  643. rx_tlv_hdr));
  644. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  645. rx_tlv_hdr);
  646. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  647. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  648. if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
  649. if (dp_rx_null_q_check_pkt_len_exception(soc, pkt_len))
  650. goto drop_nbuf;
  651. /* Set length in nbuf */
  652. qdf_nbuf_set_pktlen(nbuf,
  653. qdf_min(pkt_len, (uint32_t)RX_BUFFER_SIZE));
  654. qdf_assert_always(nbuf->data == rx_tlv_hdr);
  655. }
  656. /*
  657. * Check if DMA completed -- msdu_done is the last bit
  658. * to be written
  659. */
  660. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  661. dp_err_rl("MSDU DONE failure");
  662. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  663. QDF_TRACE_LEVEL_INFO);
  664. qdf_assert(0);
  665. }
  666. if (!peer &&
  667. dp_rx_null_q_handle_invalid_peer_id_exception(soc, pool_id,
  668. rx_tlv_hdr, nbuf))
  669. return QDF_STATUS_E_FAILURE;
  670. if (!peer) {
  671. bool mpdu_done = false;
  672. struct dp_pdev *pdev = soc->pdev_list[pool_id];
  673. dp_err_rl("peer is NULL");
  674. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  675. qdf_nbuf_len(nbuf));
  676. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_tlv_hdr, pool_id);
  677. /* Trigger invalid peer handler wrapper */
  678. dp_rx_process_invalid_peer_wrapper(soc,
  679. pdev->invalid_peer_head_msdu,
  680. mpdu_done, pool_id);
  681. if (mpdu_done) {
  682. pdev->invalid_peer_head_msdu = NULL;
  683. pdev->invalid_peer_tail_msdu = NULL;
  684. }
  685. return QDF_STATUS_E_FAILURE;
  686. }
  687. vdev = peer->vdev;
  688. if (!vdev) {
  689. dp_err_rl("Null vdev!");
  690. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  691. goto drop_nbuf;
  692. }
  693. /*
  694. * Advance the packet start pointer by total size of
  695. * pre-header TLV's
  696. */
  697. if (qdf_nbuf_is_frag(nbuf))
  698. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  699. else
  700. qdf_nbuf_pull_head(nbuf, (l2_hdr_offset + RX_PKT_TLVS_LEN));
  701. if (dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf)) {
  702. /* this is a looped back MCBC pkt, drop it */
  703. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  704. goto drop_nbuf;
  705. }
  706. /*
  707. * In qwrap mode if the received packet matches with any of the vdev
  708. * mac addresses, drop it. Donot receive multicast packets originated
  709. * from any proxysta.
  710. */
  711. if (check_qwrap_multicast_loopback(vdev, nbuf)) {
  712. DP_STATS_INC_PKT(peer, rx.mec_drop, 1, qdf_nbuf_len(nbuf));
  713. goto drop_nbuf;
  714. }
  715. if (qdf_unlikely((peer->nawds_enabled == true) &&
  716. hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  717. rx_tlv_hdr))) {
  718. dp_err_rl("free buffer for multicast packet");
  719. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  720. goto drop_nbuf;
  721. }
  722. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
  723. dp_err_rl("mcast Policy Check Drop pkt");
  724. goto drop_nbuf;
  725. }
  726. /* WDS Source Port Learning */
  727. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet &&
  728. vdev->wds_enabled))
  729. dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer, nbuf);
  730. if (hal_rx_is_unicast(soc->hal_soc, rx_tlv_hdr)) {
  731. tid = hal_rx_tid_get(soc->hal_soc, rx_tlv_hdr);
  732. if (!peer->rx_tid[tid].hw_qdesc_vaddr_unaligned)
  733. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  734. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  735. }
  736. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  737. qdf_nbuf_set_next(nbuf, NULL);
  738. dp_rx_deliver_raw(vdev, nbuf, peer);
  739. } else {
  740. if (vdev->osif_rx) {
  741. qdf_nbuf_set_next(nbuf, NULL);
  742. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  743. qdf_nbuf_len(nbuf));
  744. /*
  745. * Update the protocol tag in SKB based on
  746. * CCE metadata
  747. */
  748. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  749. EXCEPTION_DEST_RING_ID,
  750. true, true);
  751. /* Update the flow tag in SKB based on FSE metadata */
  752. dp_rx_update_flow_tag(soc, vdev, nbuf,
  753. rx_tlv_hdr, true);
  754. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
  755. soc->hal_soc, rx_tlv_hdr) &&
  756. (vdev->rx_decap_type ==
  757. htt_cmn_pkt_type_ethernet))) {
  758. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  759. DP_STATS_INC_PKT(peer, rx.multicast, 1,
  760. qdf_nbuf_len(nbuf));
  761. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  762. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  763. qdf_nbuf_len(nbuf));
  764. }
  765. }
  766. vdev->osif_rx(vdev->osif_vdev, nbuf);
  767. } else {
  768. dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
  769. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  770. goto drop_nbuf;
  771. }
  772. }
  773. return QDF_STATUS_SUCCESS;
  774. drop_nbuf:
  775. qdf_nbuf_free(nbuf);
  776. return QDF_STATUS_E_FAILURE;
  777. }
  778. /**
  779. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  780. * frames to OS or wifi parse errors.
  781. * @soc: core DP main context
  782. * @nbuf: buffer pointer
  783. * @rx_tlv_hdr: start of rx tlv header
  784. * @peer: peer reference
  785. * @err_code: rxdma err code
  786. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  787. * pool_id has same mapping)
  788. *
  789. * Return: None
  790. */
  791. void
  792. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  793. uint8_t *rx_tlv_hdr, struct dp_peer *peer,
  794. uint8_t err_code, uint8_t mac_id)
  795. {
  796. uint32_t pkt_len, l2_hdr_offset;
  797. uint16_t msdu_len;
  798. struct dp_vdev *vdev;
  799. qdf_ether_header_t *eh;
  800. bool is_broadcast;
  801. /*
  802. * Check if DMA completed -- msdu_done is the last bit
  803. * to be written
  804. */
  805. if (!hal_rx_attn_msdu_done_get(rx_tlv_hdr)) {
  806. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  807. FL("MSDU DONE failure"));
  808. hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
  809. QDF_TRACE_LEVEL_INFO);
  810. qdf_assert(0);
  811. }
  812. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
  813. rx_tlv_hdr);
  814. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
  815. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  816. /* Set length in nbuf */
  817. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  818. qdf_nbuf_set_next(nbuf, NULL);
  819. qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
  820. qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
  821. if (!peer) {
  822. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, "peer is NULL");
  823. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  824. qdf_nbuf_len(nbuf));
  825. /* Trigger invalid peer handler wrapper */
  826. dp_rx_process_invalid_peer_wrapper(soc, nbuf, true, mac_id);
  827. return;
  828. }
  829. vdev = peer->vdev;
  830. if (!vdev) {
  831. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  832. FL("INVALID vdev %pK OR osif_rx"), vdev);
  833. /* Drop & free packet */
  834. qdf_nbuf_free(nbuf);
  835. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  836. return;
  837. }
  838. /*
  839. * Advance the packet start pointer by total size of
  840. * pre-header TLV's
  841. */
  842. qdf_nbuf_pull_head(nbuf, l2_hdr_offset + RX_PKT_TLVS_LEN);
  843. if (err_code == HAL_RXDMA_ERR_WIFI_PARSE) {
  844. uint8_t *pkt_type;
  845. pkt_type = qdf_nbuf_data(nbuf) + (2 * QDF_MAC_ADDR_SIZE);
  846. if (*(uint16_t *)pkt_type == htons(QDF_ETH_TYPE_8021Q)) {
  847. if (*(uint16_t *)(pkt_type + DP_SKIP_VLAN) ==
  848. htons(QDF_LLC_STP)) {
  849. DP_STATS_INC(vdev->pdev, vlan_tag_stp_cnt, 1);
  850. goto process_mesh;
  851. } else {
  852. goto process_rx;
  853. }
  854. }
  855. }
  856. if (vdev->rx_decap_type == htt_cmn_pkt_type_raw)
  857. goto process_mesh;
  858. /*
  859. * WAPI cert AP sends rekey frames as unencrypted.
  860. * Thus RXDMA will report unencrypted frame error.
  861. * To pass WAPI cert case, SW needs to pass unencrypted
  862. * rekey frame to stack.
  863. */
  864. if (qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
  865. goto process_rx;
  866. }
  867. /*
  868. * In dynamic WEP case rekey frames are not encrypted
  869. * similar to WAPI. Allow EAPOL when 8021+wep is enabled and
  870. * key install is already done
  871. */
  872. if ((vdev->sec_type == cdp_sec_type_wep104) &&
  873. (qdf_nbuf_is_ipv4_eapol_pkt(nbuf)))
  874. goto process_rx;
  875. process_mesh:
  876. if (!vdev->mesh_vdev && err_code == HAL_RXDMA_ERR_UNENCRYPTED) {
  877. qdf_nbuf_free(nbuf);
  878. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  879. return;
  880. }
  881. if (vdev->mesh_vdev) {
  882. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  883. == QDF_STATUS_SUCCESS) {
  884. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_MED,
  885. FL("mesh pkt filtered"));
  886. DP_STATS_INC(vdev->pdev, dropped.mesh_filter, 1);
  887. qdf_nbuf_free(nbuf);
  888. return;
  889. }
  890. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  891. }
  892. process_rx:
  893. if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  894. rx_tlv_hdr) &&
  895. (vdev->rx_decap_type ==
  896. htt_cmn_pkt_type_ethernet))) {
  897. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  898. is_broadcast = (QDF_IS_ADDR_BROADCAST
  899. (eh->ether_dhost)) ? 1 : 0 ;
  900. DP_STATS_INC_PKT(peer, rx.multicast, 1, qdf_nbuf_len(nbuf));
  901. if (is_broadcast) {
  902. DP_STATS_INC_PKT(peer, rx.bcast, 1,
  903. qdf_nbuf_len(nbuf));
  904. }
  905. }
  906. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  907. dp_rx_deliver_raw(vdev, nbuf, peer);
  908. } else {
  909. /* Update the protocol tag in SKB based on CCE metadata */
  910. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  911. EXCEPTION_DEST_RING_ID, true, true);
  912. /* Update the flow tag in SKB based on FSE metadata */
  913. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  914. DP_STATS_INC(peer, rx.to_stack.num, 1);
  915. vdev->osif_rx(vdev->osif_vdev, nbuf);
  916. }
  917. return;
  918. }
  919. /**
  920. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  921. * @soc: core DP main context
  922. * @nbuf: buffer pointer
  923. * @rx_tlv_hdr: start of rx tlv header
  924. * @peer: peer handle
  925. *
  926. * return: void
  927. */
  928. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  929. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  930. {
  931. struct dp_vdev *vdev = NULL;
  932. struct dp_pdev *pdev = NULL;
  933. struct ol_if_ops *tops = NULL;
  934. uint16_t rx_seq, fragno;
  935. unsigned int tid;
  936. QDF_STATUS status;
  937. struct cdp_rx_mic_err_info mic_failure_info;
  938. if (!hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  939. rx_tlv_hdr))
  940. return;
  941. if (!peer) {
  942. dp_err_rl("peer not found");
  943. goto fail;
  944. }
  945. vdev = peer->vdev;
  946. if (!vdev) {
  947. dp_err_rl("VDEV not found");
  948. goto fail;
  949. }
  950. pdev = vdev->pdev;
  951. if (!pdev) {
  952. dp_err_rl("PDEV not found");
  953. goto fail;
  954. }
  955. fragno = dp_rx_frag_get_mpdu_frag_number(qdf_nbuf_data(nbuf));
  956. /* Can get only last fragment */
  957. if (fragno) {
  958. tid = hal_rx_mpdu_start_tid_get(soc->hal_soc,
  959. qdf_nbuf_data(nbuf));
  960. rx_seq = hal_rx_get_rx_sequence(qdf_nbuf_data(nbuf));
  961. status = dp_rx_defrag_add_last_frag(soc, peer,
  962. tid, rx_seq, nbuf);
  963. dp_info_rl("Frag pkt seq# %d frag# %d consumed status %d !",
  964. rx_seq, fragno, status);
  965. return;
  966. }
  967. if (hal_rx_mpdu_get_addr1(soc->hal_soc, qdf_nbuf_data(nbuf),
  968. &mic_failure_info.da_mac_addr.bytes[0])) {
  969. dp_err_rl("Failed to get da_mac_addr");
  970. goto fail;
  971. }
  972. if (hal_rx_mpdu_get_addr2(soc->hal_soc, qdf_nbuf_data(nbuf),
  973. &mic_failure_info.ta_mac_addr.bytes[0])) {
  974. dp_err_rl("Failed to get ta_mac_addr");
  975. goto fail;
  976. }
  977. mic_failure_info.key_id = 0;
  978. mic_failure_info.multicast =
  979. IEEE80211_IS_MULTICAST(mic_failure_info.da_mac_addr.bytes);
  980. qdf_mem_zero(mic_failure_info.tsc, MIC_SEQ_CTR_SIZE);
  981. mic_failure_info.frame_type = cdp_rx_frame_type_802_11;
  982. mic_failure_info.data = NULL;
  983. mic_failure_info.vdev_id = vdev->vdev_id;
  984. tops = pdev->soc->cdp_soc.ol_ops;
  985. if (tops->rx_mic_error)
  986. tops->rx_mic_error(pdev->ctrl_pdev, &mic_failure_info);
  987. fail:
  988. qdf_nbuf_free(nbuf);
  989. return;
  990. }
  991. uint32_t
  992. dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  993. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  994. {
  995. hal_ring_desc_t ring_desc;
  996. hal_soc_handle_t hal_soc;
  997. uint32_t count = 0;
  998. uint32_t rx_bufs_used = 0;
  999. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1000. uint8_t mac_id = 0;
  1001. uint8_t buf_type;
  1002. uint8_t error, rbm;
  1003. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  1004. struct hal_buf_info hbi;
  1005. struct dp_pdev *dp_pdev;
  1006. struct dp_srng *dp_rxdma_srng;
  1007. struct rx_desc_pool *rx_desc_pool;
  1008. uint32_t cookie = 0;
  1009. void *link_desc_va;
  1010. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  1011. uint16_t num_msdus;
  1012. struct dp_rx_desc *rx_desc = NULL;
  1013. /* Debug -- Remove later */
  1014. qdf_assert(soc && hal_ring_hdl);
  1015. hal_soc = soc->hal_soc;
  1016. /* Debug -- Remove later */
  1017. qdf_assert(hal_soc);
  1018. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1019. /* TODO */
  1020. /*
  1021. * Need API to convert from hal_ring pointer to
  1022. * Ring Type / Ring Id combo
  1023. */
  1024. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  1025. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1026. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1027. goto done;
  1028. }
  1029. while (qdf_likely(quota-- && (ring_desc =
  1030. hal_srng_dst_get_next(hal_soc,
  1031. hal_ring_hdl)))) {
  1032. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  1033. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  1034. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  1035. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  1036. /*
  1037. * For REO error ring, expect only MSDU LINK DESC
  1038. */
  1039. qdf_assert_always(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  1040. cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  1041. /*
  1042. * check for the magic number in the sw cookie
  1043. */
  1044. qdf_assert_always((cookie >> LINK_DESC_ID_SHIFT) &
  1045. LINK_DESC_ID_START);
  1046. /*
  1047. * Check if the buffer is to be processed on this processor
  1048. */
  1049. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1050. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1051. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &hbi);
  1052. hal_rx_msdu_list_get(soc->hal_soc, link_desc_va, &msdu_list,
  1053. &num_msdus);
  1054. if (qdf_unlikely((msdu_list.rbm[0] != DP_WBM2SW_RBM) &&
  1055. (msdu_list.rbm[0] !=
  1056. HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST))) {
  1057. /* TODO */
  1058. /* Call appropriate handler */
  1059. if (!wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  1060. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1061. QDF_TRACE(QDF_MODULE_ID_DP,
  1062. QDF_TRACE_LEVEL_ERROR,
  1063. FL("Invalid RBM %d"),
  1064. msdu_list.rbm[0]);
  1065. }
  1066. /* Return link descriptor through WBM ring (SW2WBM)*/
  1067. dp_rx_link_desc_return(soc, ring_desc,
  1068. HAL_BM_ACTION_RELEASE_MSDU_LIST);
  1069. continue;
  1070. }
  1071. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc,
  1072. msdu_list.sw_cookie[0]);
  1073. qdf_assert_always(rx_desc);
  1074. mac_id = rx_desc->pool_id;
  1075. /* Get the MPDU DESC info */
  1076. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  1077. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  1078. /*
  1079. * We only handle one msdu per link desc for fragmented
  1080. * case. We drop the msdus and release the link desc
  1081. * back if there are more than one msdu in link desc.
  1082. */
  1083. if (qdf_unlikely(num_msdus > 1)) {
  1084. count = dp_rx_msdus_drop(soc, ring_desc,
  1085. &mpdu_desc_info,
  1086. &mac_id, quota);
  1087. rx_bufs_reaped[mac_id] += count;
  1088. continue;
  1089. }
  1090. count = dp_rx_frag_handle(soc,
  1091. ring_desc, &mpdu_desc_info,
  1092. rx_desc, &mac_id, quota);
  1093. rx_bufs_reaped[mac_id] += count;
  1094. DP_STATS_INC(soc, rx.rx_frags, 1);
  1095. continue;
  1096. }
  1097. if (hal_rx_reo_is_pn_error(ring_desc)) {
  1098. /* TOD0 */
  1099. DP_STATS_INC(soc,
  1100. rx.err.
  1101. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  1102. 1);
  1103. /* increment @pdev level */
  1104. dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1105. if (dp_pdev)
  1106. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1107. count = dp_rx_pn_error_handle(soc,
  1108. ring_desc,
  1109. &mpdu_desc_info, &mac_id,
  1110. quota);
  1111. rx_bufs_reaped[mac_id] += count;
  1112. continue;
  1113. }
  1114. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  1115. /* TOD0 */
  1116. DP_STATS_INC(soc,
  1117. rx.err.
  1118. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  1119. 1);
  1120. /* increment @pdev level */
  1121. dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1122. if (dp_pdev)
  1123. DP_STATS_INC(dp_pdev, err.reo_error, 1);
  1124. count = dp_rx_2k_jump_handle(soc,
  1125. ring_desc, &mpdu_desc_info,
  1126. &mac_id, quota);
  1127. rx_bufs_reaped[mac_id] += count;
  1128. continue;
  1129. }
  1130. }
  1131. done:
  1132. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1133. if (soc->rx.flags.defrag_timeout_check) {
  1134. uint32_t now_ms =
  1135. qdf_system_ticks_to_msecs(qdf_system_ticks());
  1136. if (now_ms >= soc->rx.defrag.next_flush_ms)
  1137. dp_rx_defrag_waitlist_flush(soc);
  1138. }
  1139. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1140. if (rx_bufs_reaped[mac_id]) {
  1141. dp_pdev = soc->pdev_list[mac_id];
  1142. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1143. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1144. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1145. rx_desc_pool,
  1146. rx_bufs_reaped[mac_id],
  1147. &dp_pdev->free_list_head,
  1148. &dp_pdev->free_list_tail);
  1149. rx_bufs_used += rx_bufs_reaped[mac_id];
  1150. }
  1151. }
  1152. return rx_bufs_used; /* Assume no scale factor for now */
  1153. }
  1154. uint32_t
  1155. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1156. hal_ring_handle_t hal_ring_hdl, uint32_t quota)
  1157. {
  1158. hal_ring_desc_t ring_desc;
  1159. hal_soc_handle_t hal_soc;
  1160. struct dp_rx_desc *rx_desc;
  1161. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  1162. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  1163. uint32_t rx_bufs_used = 0;
  1164. uint32_t rx_bufs_reaped[MAX_PDEV_CNT] = { 0 };
  1165. uint8_t buf_type, rbm;
  1166. uint32_t rx_buf_cookie;
  1167. uint8_t mac_id;
  1168. struct dp_pdev *dp_pdev;
  1169. struct dp_srng *dp_rxdma_srng;
  1170. struct rx_desc_pool *rx_desc_pool;
  1171. uint8_t *rx_tlv_hdr;
  1172. qdf_nbuf_t nbuf_head = NULL;
  1173. qdf_nbuf_t nbuf_tail = NULL;
  1174. qdf_nbuf_t nbuf, next;
  1175. struct hal_wbm_err_desc_info wbm_err_info = { 0 };
  1176. uint8_t pool_id;
  1177. uint8_t tid = 0;
  1178. /* Debug -- Remove later */
  1179. qdf_assert(soc && hal_ring_hdl);
  1180. hal_soc = soc->hal_soc;
  1181. /* Debug -- Remove later */
  1182. qdf_assert(hal_soc);
  1183. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  1184. /* TODO */
  1185. /*
  1186. * Need API to convert from hal_ring pointer to
  1187. * Ring Type / Ring Id combo
  1188. */
  1189. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1190. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  1191. goto done;
  1192. }
  1193. while (qdf_likely(quota-- && (ring_desc =
  1194. hal_srng_dst_get_next(hal_soc,
  1195. hal_ring_hdl)))) {
  1196. /* XXX */
  1197. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  1198. /*
  1199. * For WBM ring, expect only MSDU buffers
  1200. */
  1201. qdf_assert_always(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  1202. qdf_assert((HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1203. == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  1204. (HAL_RX_WBM_ERR_SRC_GET(ring_desc)
  1205. == HAL_RX_WBM_ERR_SRC_REO));
  1206. /*
  1207. * Check if the buffer is to be processed on this processor
  1208. */
  1209. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1210. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  1211. /* TODO */
  1212. /* Call appropriate handler */
  1213. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  1214. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1215. FL("Invalid RBM %d"), rbm);
  1216. continue;
  1217. }
  1218. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  1219. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  1220. qdf_assert_always(rx_desc);
  1221. if (!dp_rx_desc_check_magic(rx_desc)) {
  1222. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1223. FL("Invalid rx_desc cookie=%d"),
  1224. rx_buf_cookie);
  1225. continue;
  1226. }
  1227. /*
  1228. * this is a unlikely scenario where the host is reaping
  1229. * a descriptor which it already reaped just a while ago
  1230. * but is yet to replenish it back to HW.
  1231. * In this case host will dump the last 128 descriptors
  1232. * including the software descriptor rx_desc and assert.
  1233. */
  1234. if (qdf_unlikely(!rx_desc->in_use)) {
  1235. DP_STATS_INC(soc, rx.err.hal_wbm_rel_dup, 1);
  1236. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  1237. ring_desc, rx_desc);
  1238. }
  1239. nbuf = rx_desc->nbuf;
  1240. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE);
  1241. /*
  1242. * save the wbm desc info in nbuf TLV. We will need this
  1243. * info when we do the actual nbuf processing
  1244. */
  1245. hal_rx_wbm_err_info_get(ring_desc, &wbm_err_info, hal_soc);
  1246. wbm_err_info.pool_id = rx_desc->pool_id;
  1247. hal_rx_wbm_err_info_set_in_tlv(qdf_nbuf_data(nbuf),
  1248. &wbm_err_info);
  1249. rx_bufs_reaped[rx_desc->pool_id]++;
  1250. DP_RX_LIST_APPEND(nbuf_head, nbuf_tail, rx_desc->nbuf);
  1251. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  1252. &tail[rx_desc->pool_id],
  1253. rx_desc);
  1254. }
  1255. done:
  1256. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  1257. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  1258. if (rx_bufs_reaped[mac_id]) {
  1259. dp_pdev = soc->pdev_list[mac_id];
  1260. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  1261. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1262. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1263. rx_desc_pool, rx_bufs_reaped[mac_id],
  1264. &head[mac_id], &tail[mac_id]);
  1265. rx_bufs_used += rx_bufs_reaped[mac_id];
  1266. }
  1267. }
  1268. nbuf = nbuf_head;
  1269. while (nbuf) {
  1270. struct dp_peer *peer;
  1271. uint16_t peer_id;
  1272. uint8_t e_code;
  1273. uint8_t *tlv_hdr;
  1274. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1275. peer_id = hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1276. rx_tlv_hdr);
  1277. peer = dp_peer_find_by_id(soc, peer_id);
  1278. /*
  1279. * retrieve the wbm desc info from nbuf TLV, so we can
  1280. * handle error cases appropriately
  1281. */
  1282. hal_rx_wbm_err_info_get_from_tlv(rx_tlv_hdr, &wbm_err_info);
  1283. /* Set queue_mapping in nbuf to 0 */
  1284. dp_set_rx_queue(nbuf, 0);
  1285. next = nbuf->next;
  1286. if (wbm_err_info.wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  1287. if (wbm_err_info.reo_psh_rsn
  1288. == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  1289. DP_STATS_INC(soc,
  1290. rx.err.reo_error
  1291. [wbm_err_info.reo_err_code], 1);
  1292. /* increment @pdev level */
  1293. pool_id = wbm_err_info.pool_id;
  1294. dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id);
  1295. if (dp_pdev)
  1296. DP_STATS_INC(dp_pdev, err.reo_error,
  1297. 1);
  1298. switch (wbm_err_info.reo_err_code) {
  1299. /*
  1300. * Handling for packets which have NULL REO
  1301. * queue descriptor
  1302. */
  1303. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  1304. pool_id = wbm_err_info.pool_id;
  1305. dp_rx_null_q_desc_handle(soc, nbuf,
  1306. rx_tlv_hdr,
  1307. pool_id, peer);
  1308. nbuf = next;
  1309. if (peer)
  1310. dp_peer_unref_del_find_by_id(
  1311. peer);
  1312. continue;
  1313. /* TODO */
  1314. /* Add per error code accounting */
  1315. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  1316. pool_id = wbm_err_info.pool_id;
  1317. if (hal_rx_msdu_end_first_msdu_get(soc->hal_soc,
  1318. rx_tlv_hdr)) {
  1319. peer_id =
  1320. hal_rx_mpdu_start_sw_peer_id_get(soc->hal_soc,
  1321. rx_tlv_hdr);
  1322. tid =
  1323. hal_rx_mpdu_start_tid_get(hal_soc, rx_tlv_hdr);
  1324. }
  1325. dp_2k_jump_handle(soc, nbuf, rx_tlv_hdr,
  1326. peer_id, tid);
  1327. nbuf = next;
  1328. if (peer)
  1329. dp_peer_unref_del_find_by_id(
  1330. peer);
  1331. continue;
  1332. default:
  1333. dp_err_rl("Got pkt with REO ERROR: %d",
  1334. wbm_err_info.reo_err_code);
  1335. break;
  1336. }
  1337. }
  1338. } else if (wbm_err_info.wbm_err_src ==
  1339. HAL_RX_WBM_ERR_SRC_RXDMA) {
  1340. if (wbm_err_info.rxdma_psh_rsn
  1341. == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1342. DP_STATS_INC(soc,
  1343. rx.err.rxdma_error
  1344. [wbm_err_info.rxdma_err_code], 1);
  1345. /* increment @pdev level */
  1346. pool_id = wbm_err_info.pool_id;
  1347. dp_pdev = dp_get_pdev_for_mac_id(soc, pool_id);
  1348. if (dp_pdev)
  1349. DP_STATS_INC(dp_pdev,
  1350. err.rxdma_error, 1);
  1351. switch (wbm_err_info.rxdma_err_code) {
  1352. case HAL_RXDMA_ERR_UNENCRYPTED:
  1353. case HAL_RXDMA_ERR_WIFI_PARSE:
  1354. pool_id = wbm_err_info.pool_id;
  1355. dp_rx_process_rxdma_err(soc, nbuf,
  1356. rx_tlv_hdr,
  1357. peer,
  1358. wbm_err_info.
  1359. rxdma_err_code,
  1360. pool_id);
  1361. nbuf = next;
  1362. if (peer)
  1363. dp_peer_unref_del_find_by_id(peer);
  1364. continue;
  1365. case HAL_RXDMA_ERR_TKIP_MIC:
  1366. dp_rx_process_mic_error(soc, nbuf,
  1367. rx_tlv_hdr,
  1368. peer);
  1369. nbuf = next;
  1370. if (peer) {
  1371. DP_STATS_INC(peer, rx.err.mic_err, 1);
  1372. dp_peer_unref_del_find_by_id(
  1373. peer);
  1374. }
  1375. continue;
  1376. case HAL_RXDMA_ERR_DECRYPT:
  1377. pool_id = wbm_err_info.pool_id;
  1378. e_code = wbm_err_info.rxdma_err_code;
  1379. tlv_hdr = rx_tlv_hdr;
  1380. if (peer) {
  1381. DP_STATS_INC(peer, rx.err.
  1382. decrypt_err, 1);
  1383. } else {
  1384. dp_rx_process_rxdma_err(soc,
  1385. nbuf,
  1386. tlv_hdr,
  1387. NULL,
  1388. e_code,
  1389. pool_id
  1390. );
  1391. nbuf = next;
  1392. continue;
  1393. }
  1394. QDF_TRACE(QDF_MODULE_ID_DP,
  1395. QDF_TRACE_LEVEL_DEBUG,
  1396. "Packet received with Decrypt error");
  1397. break;
  1398. default:
  1399. dp_err_rl("RXDMA error %d",
  1400. wbm_err_info.rxdma_err_code);
  1401. }
  1402. }
  1403. } else {
  1404. /* Should not come here */
  1405. qdf_assert(0);
  1406. }
  1407. if (peer)
  1408. dp_peer_unref_del_find_by_id(peer);
  1409. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  1410. QDF_TRACE_LEVEL_DEBUG);
  1411. qdf_nbuf_free(nbuf);
  1412. nbuf = next;
  1413. }
  1414. return rx_bufs_used; /* Assume no scale factor for now */
  1415. }
  1416. /**
  1417. * dup_desc_dbg() - dump and assert if duplicate rx desc found
  1418. *
  1419. * @soc: core DP main context
  1420. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1421. * @rx_desc: void pointer to rx descriptor
  1422. *
  1423. * Return: void
  1424. */
  1425. static void dup_desc_dbg(struct dp_soc *soc,
  1426. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1427. void *rx_desc)
  1428. {
  1429. DP_STATS_INC(soc, rx.err.hal_rxdma_err_dup, 1);
  1430. dp_rx_dump_info_and_assert(
  1431. soc,
  1432. soc->rx_rel_ring.hal_srng,
  1433. hal_rxdma_desc_to_hal_ring_desc(rxdma_dst_ring_desc),
  1434. rx_desc);
  1435. }
  1436. /**
  1437. * dp_rx_err_mpdu_pop() - extract the MSDU's from link descs
  1438. *
  1439. * @soc: core DP main context
  1440. * @mac_id: mac id which is one of 3 mac_ids
  1441. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  1442. * @head: head of descs list to be freed
  1443. * @tail: tail of decs list to be freed
  1444. * Return: number of msdu in MPDU to be popped
  1445. */
  1446. static inline uint32_t
  1447. dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  1448. hal_rxdma_desc_t rxdma_dst_ring_desc,
  1449. union dp_rx_desc_list_elem_t **head,
  1450. union dp_rx_desc_list_elem_t **tail)
  1451. {
  1452. void *rx_msdu_link_desc;
  1453. qdf_nbuf_t msdu;
  1454. qdf_nbuf_t last;
  1455. struct hal_rx_msdu_list msdu_list;
  1456. uint16_t num_msdus;
  1457. struct hal_buf_info buf_info;
  1458. void *p_buf_addr_info;
  1459. void *p_last_buf_addr_info;
  1460. uint32_t rx_bufs_used = 0;
  1461. uint32_t msdu_cnt;
  1462. uint32_t i;
  1463. uint8_t push_reason;
  1464. uint8_t rxdma_error_code = 0;
  1465. uint8_t bm_action = HAL_BM_ACTION_PUT_IN_IDLE_LIST;
  1466. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1467. hal_rxdma_desc_t ring_desc;
  1468. msdu = 0;
  1469. last = NULL;
  1470. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  1471. &p_last_buf_addr_info, &msdu_cnt);
  1472. push_reason =
  1473. hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc);
  1474. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  1475. rxdma_error_code =
  1476. hal_rx_reo_ent_rxdma_error_code_get(rxdma_dst_ring_desc);
  1477. }
  1478. do {
  1479. rx_msdu_link_desc =
  1480. dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  1481. qdf_assert(rx_msdu_link_desc);
  1482. hal_rx_msdu_list_get(soc->hal_soc, rx_msdu_link_desc,
  1483. &msdu_list, &num_msdus);
  1484. if (msdu_list.sw_cookie[0] != HAL_RX_COOKIE_SPECIAL) {
  1485. /* if the msdus belongs to NSS offloaded radio &&
  1486. * the rbm is not SW1_BM then return the msdu_link
  1487. * descriptor without freeing the msdus (nbufs). let
  1488. * these buffers be given to NSS completion ring for
  1489. * NSS to free them.
  1490. * else iterate through the msdu link desc list and
  1491. * free each msdu in the list.
  1492. */
  1493. if (msdu_list.rbm[0] != HAL_RX_BUF_RBM_SW3_BM &&
  1494. wlan_cfg_get_dp_pdev_nss_enabled(
  1495. pdev->wlan_cfg_ctx))
  1496. bm_action = HAL_BM_ACTION_RELEASE_MSDU_LIST;
  1497. else {
  1498. for (i = 0; i < num_msdus; i++) {
  1499. struct dp_rx_desc *rx_desc =
  1500. dp_rx_cookie_2_va_rxdma_buf(soc,
  1501. msdu_list.sw_cookie[i]);
  1502. qdf_assert_always(rx_desc);
  1503. msdu = rx_desc->nbuf;
  1504. /*
  1505. * this is a unlikely scenario
  1506. * where the host is reaping
  1507. * a descriptor which
  1508. * it already reaped just a while ago
  1509. * but is yet to replenish
  1510. * it back to HW.
  1511. * In this case host will dump
  1512. * the last 128 descriptors
  1513. * including the software descriptor
  1514. * rx_desc and assert.
  1515. */
  1516. ring_desc = rxdma_dst_ring_desc;
  1517. if (qdf_unlikely(!rx_desc->in_use)) {
  1518. dup_desc_dbg(soc,
  1519. ring_desc,
  1520. rx_desc);
  1521. continue;
  1522. }
  1523. qdf_nbuf_unmap_single(soc->osdev, msdu,
  1524. QDF_DMA_FROM_DEVICE);
  1525. QDF_TRACE(QDF_MODULE_ID_DP,
  1526. QDF_TRACE_LEVEL_DEBUG,
  1527. "[%s][%d] msdu_nbuf=%pK ",
  1528. __func__, __LINE__, msdu);
  1529. qdf_nbuf_free(msdu);
  1530. rx_bufs_used++;
  1531. dp_rx_add_to_free_desc_list(head,
  1532. tail, rx_desc);
  1533. }
  1534. }
  1535. } else {
  1536. rxdma_error_code = HAL_RXDMA_ERR_WAR;
  1537. }
  1538. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  1539. &p_buf_addr_info);
  1540. dp_rx_link_desc_return(soc, p_last_buf_addr_info, bm_action);
  1541. p_last_buf_addr_info = p_buf_addr_info;
  1542. } while (buf_info.paddr);
  1543. DP_STATS_INC(soc, rx.err.rxdma_error[rxdma_error_code], 1);
  1544. if (pdev)
  1545. DP_STATS_INC(pdev, err.rxdma_error, 1);
  1546. if (rxdma_error_code == HAL_RXDMA_ERR_DECRYPT) {
  1547. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1548. "Packet received with Decrypt error");
  1549. }
  1550. return rx_bufs_used;
  1551. }
  1552. uint32_t
  1553. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  1554. uint32_t mac_id, uint32_t quota)
  1555. {
  1556. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  1557. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  1558. hal_rxdma_desc_t rxdma_dst_ring_desc;
  1559. hal_soc_handle_t hal_soc;
  1560. void *err_dst_srng;
  1561. union dp_rx_desc_list_elem_t *head = NULL;
  1562. union dp_rx_desc_list_elem_t *tail = NULL;
  1563. struct dp_srng *dp_rxdma_srng;
  1564. struct rx_desc_pool *rx_desc_pool;
  1565. uint32_t work_done = 0;
  1566. uint32_t rx_bufs_used = 0;
  1567. if (!pdev)
  1568. return 0;
  1569. err_dst_srng = pdev->rxdma_err_dst_ring[mac_for_pdev].hal_srng;
  1570. if (!err_dst_srng) {
  1571. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1572. "%s %d : HAL Monitor Destination Ring Init \
  1573. Failed -- %pK",
  1574. __func__, __LINE__, err_dst_srng);
  1575. return 0;
  1576. }
  1577. hal_soc = soc->hal_soc;
  1578. qdf_assert(hal_soc);
  1579. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, err_dst_srng))) {
  1580. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1581. "%s %d : HAL Monitor Destination Ring Init \
  1582. Failed -- %pK",
  1583. __func__, __LINE__, err_dst_srng);
  1584. return 0;
  1585. }
  1586. while (qdf_likely(quota-- && (rxdma_dst_ring_desc =
  1587. hal_srng_dst_get_next(hal_soc, err_dst_srng)))) {
  1588. rx_bufs_used += dp_rx_err_mpdu_pop(soc, mac_id,
  1589. rxdma_dst_ring_desc,
  1590. &head, &tail);
  1591. }
  1592. dp_srng_access_end(int_ctx, soc, err_dst_srng);
  1593. if (rx_bufs_used) {
  1594. dp_rxdma_srng = &pdev->rx_refill_buf_ring;
  1595. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  1596. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  1597. rx_desc_pool, rx_bufs_used, &head, &tail);
  1598. work_done += rx_bufs_used;
  1599. }
  1600. return work_done;
  1601. }