dp_rx_err.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "dp_internal.h"
  22. #include "hal_api.h"
  23. #include "qdf_trace.h"
  24. #include "qdf_nbuf.h"
  25. #ifdef CONFIG_MCL
  26. #include <cds_ieee80211_common.h>
  27. #else
  28. #include <ieee80211.h>
  29. #endif
  30. #include "dp_rx_defrag.h"
  31. #include <enet.h> /* LLC_SNAP_HDR_LEN */
  32. /**
  33. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  34. *
  35. * @soc: core txrx main context
  36. * @ring_desc: opaque pointer to the REO error ring descriptor
  37. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  38. * @head: head of the local descriptor free-list
  39. * @tail: tail of the local descriptor free-list
  40. * @quota: No. of units (packets) that can be serviced in one shot.
  41. *
  42. * This function is used to drop all MSDU in an MPDU
  43. *
  44. * Return: uint32_t: No. of elements processed
  45. */
  46. static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
  47. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  48. union dp_rx_desc_list_elem_t **head,
  49. union dp_rx_desc_list_elem_t **tail,
  50. uint32_t quota)
  51. {
  52. uint32_t rx_bufs_used = 0;
  53. void *link_desc_va;
  54. struct hal_buf_info buf_info;
  55. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  56. int i;
  57. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  58. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  59. /* No UNMAP required -- this is "malloc_consistent" memory */
  60. hal_rx_msdu_list_get(link_desc_va, &msdu_list,
  61. mpdu_desc_info->msdu_count);
  62. for (i = 0; (i < HAL_RX_NUM_MSDU_DESC) && quota--; i++) {
  63. struct dp_rx_desc *rx_desc =
  64. dp_rx_cookie_2_va_rxdma_buf(soc,
  65. msdu_list.sw_cookie[i]);
  66. qdf_assert(rx_desc);
  67. rx_bufs_used++;
  68. /* Just free the buffers */
  69. qdf_nbuf_free(rx_desc->nbuf);
  70. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  71. }
  72. return rx_bufs_used;
  73. }
  74. /**
  75. * dp_rx_pn_error_handle() - Handles PN check errors
  76. *
  77. * @soc: core txrx main context
  78. * @ring_desc: opaque pointer to the REO error ring descriptor
  79. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  80. * @head: head of the local descriptor free-list
  81. * @tail: tail of the local descriptor free-list
  82. * @quota: No. of units (packets) that can be serviced in one shot.
  83. *
  84. * This function implements PN error handling
  85. * If the peer is configured to ignore the PN check errors
  86. * or if DP feels, that this frame is still OK, the frame can be
  87. * re-injected back to REO to use some of the other features
  88. * of REO e.g. duplicate detection/routing to other cores
  89. *
  90. * Return: uint32_t: No. of elements processed
  91. */
  92. static uint32_t
  93. dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
  94. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  95. union dp_rx_desc_list_elem_t **head,
  96. union dp_rx_desc_list_elem_t **tail,
  97. uint32_t quota)
  98. {
  99. uint16_t peer_id;
  100. uint32_t rx_bufs_used = 0;
  101. struct dp_peer *peer;
  102. bool peer_pn_policy = false;
  103. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  104. mpdu_desc_info->peer_meta_data);
  105. peer = dp_peer_find_by_id(soc, peer_id);
  106. if (qdf_likely(peer)) {
  107. /*
  108. * TODO: Check for peer specific policies & set peer_pn_policy
  109. */
  110. }
  111. /* No peer PN policy -- definitely drop */
  112. if (!peer_pn_policy)
  113. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  114. mpdu_desc_info,
  115. head, tail, quota);
  116. return rx_bufs_used;
  117. }
  118. /**
  119. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  120. *
  121. * @soc: core txrx main context
  122. * @ring_desc: opaque pointer to the REO error ring descriptor
  123. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  124. * @head: head of the local descriptor free-list
  125. * @tail: tail of the local descriptor free-list
  126. * @quota: No. of units (packets) that can be serviced in one shot.
  127. *
  128. * This function implements the error handling when sequence number
  129. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  130. * need to be handled:
  131. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  132. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  133. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  134. * For case B), the frame is normally dropped, no more action is taken
  135. *
  136. * Return: uint32_t: No. of elements processed
  137. */
  138. static uint32_t
  139. dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
  140. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  141. union dp_rx_desc_list_elem_t **head,
  142. union dp_rx_desc_list_elem_t **tail,
  143. uint32_t quota)
  144. {
  145. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  146. head, tail, quota);
  147. }
  148. static bool
  149. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
  150. struct dp_rx_desc *rx_desc)
  151. {
  152. bool mpdu_done = false;
  153. if (hal_rx_msdu_end_first_msdu_get(rx_desc->rx_buf_start)) {
  154. qdf_nbuf_set_chfrag_start(rx_desc->nbuf, 1);
  155. soc->invalid_peer_head_msdu = NULL;
  156. soc->invalid_peer_tail_msdu = NULL;
  157. }
  158. if (hal_rx_msdu_end_last_msdu_get(rx_desc->rx_buf_start)) {
  159. qdf_nbuf_set_chfrag_end(rx_desc->nbuf, 1);
  160. mpdu_done = true;
  161. }
  162. DP_RX_LIST_APPEND(soc->invalid_peer_head_msdu,
  163. soc->invalid_peer_tail_msdu,
  164. nbuf);
  165. return mpdu_done;
  166. }
  167. /**
  168. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  169. * descriptor violation on either a
  170. * REO or WBM ring
  171. *
  172. * @soc: core DP main context
  173. * @rx_desc : pointer to the sw rx descriptor
  174. * @head: pointer to head of rx descriptors to be added to free list
  175. * @tail: pointer to tail of rx descriptors to be added to free list
  176. * quota: upper limit of descriptors that can be reaped
  177. *
  178. * This function handles NULL queue descriptor violations arising out
  179. * a missing REO queue for a given peer or a given TID. This typically
  180. * may happen if a packet is received on a QOS enabled TID before the
  181. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  182. * it may also happen for MC/BC frames if they are not routed to the
  183. * non-QOS TID queue, in the absence of any other default TID queue.
  184. * This error can show up both in a REO destination or WBM release ring.
  185. *
  186. * Return: uint32_t: No. of Rx buffers reaped
  187. */
  188. static uint32_t
  189. dp_rx_null_q_desc_handle(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
  190. union dp_rx_desc_list_elem_t **head,
  191. union dp_rx_desc_list_elem_t **tail,
  192. uint32_t quota)
  193. {
  194. uint32_t rx_bufs_used = 0;
  195. uint32_t pkt_len, l2_hdr_offset;
  196. uint16_t msdu_len;
  197. qdf_nbuf_t nbuf;
  198. struct dp_vdev *vdev;
  199. uint16_t peer_id = 0xFFFF;
  200. struct dp_peer *peer = NULL;
  201. uint32_t sgi, rate_mcs, tid;
  202. rx_bufs_used++;
  203. nbuf = rx_desc->nbuf;
  204. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  205. QDF_DMA_BIDIRECTIONAL);
  206. rx_desc->rx_buf_start = qdf_nbuf_data(nbuf);
  207. l2_hdr_offset =
  208. hal_rx_msdu_end_l3_hdr_padding_get(rx_desc->rx_buf_start);
  209. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_desc->rx_buf_start);
  210. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  211. /* Set length in nbuf */
  212. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  213. /*
  214. * Check if DMA completed -- msdu_done is the last bit
  215. * to be written
  216. */
  217. if (!hal_rx_attn_msdu_done_get(rx_desc->rx_buf_start)) {
  218. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  219. FL("MSDU DONE failure"));
  220. hal_rx_dump_pkt_tlvs(rx_desc->rx_buf_start,
  221. QDF_TRACE_LEVEL_INFO);
  222. qdf_assert(0);
  223. }
  224. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_desc->rx_buf_start);
  225. peer = dp_peer_find_by_id(soc, peer_id);
  226. if (!peer) {
  227. bool mpdu_done = false;
  228. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  229. FL("peer is NULL"));
  230. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_desc);
  231. if (mpdu_done)
  232. dp_rx_process_invalid_peer(soc, nbuf);
  233. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  234. return rx_bufs_used;
  235. }
  236. vdev = peer->vdev;
  237. if (!vdev) {
  238. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  239. FL("INVALID vdev %p OR osif_rx"), vdev);
  240. /* Drop & free packet */
  241. qdf_nbuf_free(nbuf);
  242. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  243. goto fail;
  244. }
  245. sgi = hal_rx_msdu_start_sgi_get(rx_desc->rx_buf_start);
  246. rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_desc->rx_buf_start);
  247. tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start);
  248. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  249. "%s: %d, SGI: %d, rate_mcs: %d, tid: %d",
  250. __func__, __LINE__, sgi, rate_mcs, tid);
  251. /* WDS Source Port Learning */
  252. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet) &&
  253. (vdev->wds_enabled))
  254. dp_rx_wds_srcport_learn(soc, rx_desc->rx_buf_start, peer, nbuf);
  255. /*
  256. * Advance the packet start pointer by total size of
  257. * pre-header TLV's
  258. */
  259. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  260. if (l2_hdr_offset)
  261. qdf_nbuf_pull_head(nbuf, l2_hdr_offset);
  262. if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(
  263. rx_desc->rx_buf_start)) {
  264. /* TODO: Assuming that qos_control_valid also indicates
  265. * unicast. Should we check this?
  266. */
  267. if (peer &&
  268. peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) {
  269. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  270. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  271. }
  272. }
  273. #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */
  274. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  275. "%s: p_id %d msdu_len %d hdr_off %d",
  276. __func__, peer_id, msdu_len, l2_hdr_offset);
  277. print_hex_dump(KERN_ERR,
  278. "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
  279. qdf_nbuf_data(nbuf), 128, false);
  280. #endif /* NAPIER_EMULATION */
  281. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  282. qdf_nbuf_set_next(nbuf, NULL);
  283. dp_rx_deliver_raw(vdev, nbuf);
  284. } else {
  285. if (qdf_unlikely(peer->bss_peer)) {
  286. QDF_TRACE(QDF_MODULE_ID_DP,
  287. QDF_TRACE_LEVEL_INFO,
  288. FL("received pkt with same src MAC"));
  289. /* Drop & free packet */
  290. qdf_nbuf_free(nbuf);
  291. goto fail;
  292. }
  293. if (vdev->osif_rx) {
  294. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  295. FL("vdev %p osif_rx %p"), vdev,
  296. vdev->osif_rx);
  297. qdf_nbuf_set_next(nbuf, NULL);
  298. vdev->osif_rx(vdev->osif_vdev, nbuf);
  299. DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1);
  300. } else {
  301. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  302. FL("INVALID vdev %p OR osif_rx"), vdev);
  303. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  304. }
  305. }
  306. fail:
  307. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  308. return rx_bufs_used;
  309. }
  310. /**
  311. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  312. * (WBM), following error handling
  313. *
  314. * @soc: core DP main context
  315. * @ring_desc: opaque pointer to the REO error ring descriptor
  316. *
  317. * Return: QDF_STATUS
  318. */
  319. static QDF_STATUS
  320. dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc)
  321. {
  322. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  323. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  324. void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  325. void *hal_soc = soc->hal_soc;
  326. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  327. void *src_srng_desc;
  328. if (!wbm_rel_srng) {
  329. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  330. "WBM RELEASE RING not initialized");
  331. return status;
  332. }
  333. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  334. /* TODO */
  335. /*
  336. * Need API to convert from hal_ring pointer to
  337. * Ring Type / Ring Id combo
  338. */
  339. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  340. FL("HAL RING Access For WBM Release SRNG Failed - %p"),
  341. wbm_rel_srng);
  342. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  343. goto done;
  344. }
  345. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  346. if (qdf_likely(src_srng_desc)) {
  347. /* Return link descriptor through WBM ring (SW2WBM)*/
  348. hal_rx_msdu_link_desc_set(hal_soc,
  349. src_srng_desc, buf_addr_info);
  350. status = QDF_STATUS_SUCCESS;
  351. } else {
  352. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  353. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  354. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  355. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  356. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  357. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  358. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  359. }
  360. done:
  361. hal_srng_access_end(hal_soc, wbm_rel_srng);
  362. return status;
  363. }
  364. /**
  365. * dp_rx_err_process() - Processes error frames routed to REO error ring
  366. *
  367. * @soc: core txrx main context
  368. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  369. * @quota: No. of units (packets) that can be serviced in one shot.
  370. *
  371. * This function implements error processing and top level demultiplexer
  372. * for all the frames routed to REO error ring.
  373. *
  374. * Return: uint32_t: No. of elements processed
  375. */
  376. uint32_t
  377. dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  378. {
  379. void *hal_soc;
  380. void *ring_desc;
  381. union dp_rx_desc_list_elem_t *head = NULL;
  382. union dp_rx_desc_list_elem_t *tail = NULL;
  383. uint32_t rx_bufs_used = 0;
  384. uint8_t buf_type;
  385. uint8_t error, rbm;
  386. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  387. struct hal_buf_info hbi;
  388. struct dp_pdev *dp_pdev;
  389. struct dp_srng *dp_rxdma_srng;
  390. struct rx_desc_pool *rx_desc_pool;
  391. /* Debug -- Remove later */
  392. qdf_assert(soc && hal_ring);
  393. hal_soc = soc->hal_soc;
  394. /* Debug -- Remove later */
  395. qdf_assert(hal_soc);
  396. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  397. /* TODO */
  398. /*
  399. * Need API to convert from hal_ring pointer to
  400. * Ring Type / Ring Id combo
  401. */
  402. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  403. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  404. FL("HAL RING Access Failed -- %p"), hal_ring);
  405. goto done;
  406. }
  407. while (qdf_likely((ring_desc =
  408. hal_srng_dst_get_next(hal_soc, hal_ring))
  409. && quota--)) {
  410. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  411. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  412. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  413. /*
  414. * Check if the buffer is to be processed on this processor
  415. */
  416. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  417. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  418. /* TODO */
  419. /* Call appropriate handler */
  420. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  421. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  422. FL("Invalid RBM %d"), rbm);
  423. continue;
  424. }
  425. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  426. /*
  427. * For REO error ring, expect only MSDU LINK DESC
  428. */
  429. qdf_assert(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  430. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  431. /* Get the MPDU DESC info */
  432. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  433. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  434. /* TODO */
  435. rx_bufs_used += dp_rx_frag_handle(soc,
  436. ring_desc, &mpdu_desc_info,
  437. &head, &tail, quota);
  438. DP_STATS_INC(soc, rx.rx_frags, 1);
  439. continue;
  440. }
  441. if (hal_rx_reo_is_pn_error(ring_desc)) {
  442. /* TOD0 */
  443. DP_STATS_INC(soc,
  444. rx.err.
  445. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  446. 1);
  447. rx_bufs_used += dp_rx_pn_error_handle(soc,
  448. ring_desc, &mpdu_desc_info,
  449. &head, &tail, quota);
  450. continue;
  451. }
  452. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  453. /* TOD0 */
  454. DP_STATS_INC(soc,
  455. rx.err.
  456. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  457. 1);
  458. rx_bufs_used += dp_rx_2k_jump_handle(soc,
  459. ring_desc, &mpdu_desc_info,
  460. &head, &tail, quota);
  461. continue;
  462. }
  463. /* Return link descriptor through WBM ring (SW2WBM)*/
  464. dp_rx_link_desc_return(soc, ring_desc);
  465. }
  466. done:
  467. hal_srng_access_end(hal_soc, hal_ring);
  468. /* Assume MAC id = 0, owner = 0 */
  469. if (rx_bufs_used) {
  470. dp_pdev = soc->pdev_list[0];
  471. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  472. rx_desc_pool = &soc->rx_desc_buf[0];
  473. dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
  474. rx_bufs_used, &head, &tail, HAL_RX_BUF_RBM_SW3_BM);
  475. }
  476. return rx_bufs_used; /* Assume no scale factor for now */
  477. }
  478. /**
  479. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  480. *
  481. * @soc: core txrx main context
  482. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  483. * @quota: No. of units (packets) that can be serviced in one shot.
  484. *
  485. * This function implements error processing and top level demultiplexer
  486. * for all the frames routed to WBM2HOST sw release ring.
  487. *
  488. * Return: uint32_t: No. of elements processed
  489. */
  490. uint32_t
  491. dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  492. {
  493. void *hal_soc;
  494. void *ring_desc;
  495. struct dp_rx_desc *rx_desc;
  496. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  497. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  498. uint32_t rx_bufs_used[MAX_PDEV_CNT] = { 0 };
  499. uint32_t rx_bufs_reaped = 0;
  500. uint8_t buf_type, rbm;
  501. uint8_t wbm_err_src;
  502. uint32_t rx_buf_cookie;
  503. uint8_t mac_id;
  504. struct dp_pdev *dp_pdev;
  505. struct dp_srng *dp_rxdma_srng;
  506. struct rx_desc_pool *rx_desc_pool;
  507. /* Debug -- Remove later */
  508. qdf_assert(soc && hal_ring);
  509. hal_soc = soc->hal_soc;
  510. /* Debug -- Remove later */
  511. qdf_assert(hal_soc);
  512. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  513. /* TODO */
  514. /*
  515. * Need API to convert from hal_ring pointer to
  516. * Ring Type / Ring Id combo
  517. */
  518. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  519. FL("HAL RING Access Failed -- %p"), hal_ring);
  520. goto done;
  521. }
  522. while (qdf_likely((ring_desc =
  523. hal_srng_dst_get_next(hal_soc, hal_ring))
  524. && quota--)) {
  525. /* XXX */
  526. wbm_err_src = HAL_RX_WBM_ERR_SRC_GET(ring_desc);
  527. qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  528. (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
  529. /*
  530. * Check if the buffer is to be processed on this processor
  531. */
  532. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  533. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  534. /* TODO */
  535. /* Call appropriate handler */
  536. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  537. FL("Invalid RBM %d"), rbm);
  538. continue;
  539. }
  540. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  541. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  542. qdf_assert(rx_desc);
  543. /* XXX */
  544. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  545. /*
  546. * For WBM ring, expect only MSDU buffers
  547. */
  548. qdf_assert(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  549. if (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  550. uint8_t push_reason =
  551. HAL_RX_WBM_REO_PUSH_REASON_GET(ring_desc);
  552. if (push_reason == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  553. uint8_t reo_error_code =
  554. HAL_RX_WBM_REO_ERROR_CODE_GET(ring_desc);
  555. DP_STATS_INC(soc, rx.err.reo_error[
  556. reo_error_code], 1);
  557. switch (reo_error_code) {
  558. /*
  559. * Handling for packets which have NULL REO
  560. * queue descriptor
  561. */
  562. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  563. QDF_TRACE(QDF_MODULE_ID_DP,
  564. QDF_TRACE_LEVEL_WARN,
  565. "Got pkt with REO ERROR: %d",
  566. reo_error_code);
  567. rx_bufs_used[rx_desc->pool_id] +=
  568. dp_rx_null_q_desc_handle(soc,
  569. rx_desc,
  570. &head[rx_desc->pool_id],
  571. &tail[rx_desc->pool_id], quota);
  572. continue;
  573. /* TODO */
  574. /* Add per error code accounting */
  575. default:
  576. QDF_TRACE(QDF_MODULE_ID_DP,
  577. QDF_TRACE_LEVEL_ERROR,
  578. "REO error %d detected",
  579. reo_error_code);
  580. }
  581. }
  582. } else if (wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) {
  583. uint8_t push_reason =
  584. HAL_RX_WBM_RXDMA_PUSH_REASON_GET(ring_desc);
  585. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  586. uint8_t rxdma_error_code =
  587. HAL_RX_WBM_RXDMA_ERROR_CODE_GET(ring_desc);
  588. DP_STATS_INC(soc, rx.err.rxdma_error[
  589. rxdma_error_code], 1);
  590. switch (rxdma_error_code) {
  591. /* TODO */
  592. /* Add per error code accounting */
  593. default:
  594. QDF_TRACE(QDF_MODULE_ID_DP,
  595. QDF_TRACE_LEVEL_ERROR,
  596. "RXDMA error %d detected",
  597. rxdma_error_code);
  598. }
  599. }
  600. } else {
  601. /* Should not come here */
  602. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  603. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  604. qdf_assert(rx_desc);
  605. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  606. QDF_DMA_BIDIRECTIONAL);
  607. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  608. hal_rx_dump_pkt_tlvs(rx_desc->rx_buf_start,
  609. QDF_TRACE_LEVEL_INFO);
  610. qdf_assert(0);
  611. }
  612. rx_bufs_used[rx_desc->pool_id]++;
  613. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  614. QDF_DMA_BIDIRECTIONAL);
  615. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  616. hal_rx_dump_pkt_tlvs(rx_desc->rx_buf_start,
  617. QDF_TRACE_LEVEL_INFO);
  618. qdf_nbuf_free(rx_desc->nbuf);
  619. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  620. &tail[rx_desc->pool_id], rx_desc);
  621. }
  622. done:
  623. hal_srng_access_end(hal_soc, hal_ring);
  624. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  625. if (rx_bufs_used[mac_id]) {
  626. dp_pdev = soc->pdev_list[mac_id];
  627. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  628. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  629. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  630. rx_desc_pool, rx_bufs_used[mac_id],
  631. &head[mac_id], &tail[mac_id],
  632. HAL_RX_BUF_RBM_SW3_BM);
  633. rx_bufs_reaped += rx_bufs_used[mac_id];
  634. }
  635. }
  636. return rx_bufs_reaped; /* Assume no scale factor for now */
  637. }