dp_rx_err.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "dp_internal.h"
  22. #include "hal_api.h"
  23. #include "qdf_trace.h"
  24. #include "qdf_nbuf.h"
  25. #ifdef CONFIG_MCL
  26. #include <cds_ieee80211_common.h>
  27. #else
  28. #include <ieee80211.h>
  29. #endif
  30. /**
  31. * dp_rx_frag_handle() - Handles fragmented Rx frames
  32. *
  33. * @soc: core txrx main context
  34. * @ring_desc: opaque pointer to the REO error ring descriptor
  35. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  36. * @head: head of the local descriptor free-list
  37. * @tail: tail of the local descriptor free-list
  38. * @quota: No. of units (packets) that can be serviced in one shot.
  39. *
  40. * This function implements RX 802.11 fragmentation handling
  41. * The handling is mostly same as legacy fragmentation handling.
  42. * If required, this function can re-inject the frames back to
  43. * REO ring (with proper setting to by-pass fragmentation check
  44. * but use duplicate detection / re-ordering and routing these frames
  45. * to a different core.
  46. *
  47. * Return: uint32_t: No. of elements processed
  48. */
  49. static uint32_t
  50. dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
  51. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  52. union dp_rx_desc_list_elem_t **head,
  53. union dp_rx_desc_list_elem_t **tail,
  54. uint32_t quota)
  55. {
  56. uint32_t rx_bufs_used = 0;
  57. return rx_bufs_used;
  58. }
  59. /**
  60. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  61. *
  62. * @soc: core txrx main context
  63. * @ring_desc: opaque pointer to the REO error ring descriptor
  64. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  65. * @head: head of the local descriptor free-list
  66. * @tail: tail of the local descriptor free-list
  67. * @quota: No. of units (packets) that can be serviced in one shot.
  68. *
  69. * This function is used to drop all MSDU in an MPDU
  70. *
  71. * Return: uint32_t: No. of elements processed
  72. */
  73. static uint32_t
  74. dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
  75. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  76. union dp_rx_desc_list_elem_t **head,
  77. union dp_rx_desc_list_elem_t **tail,
  78. uint32_t quota)
  79. {
  80. uint8_t num_msdus;
  81. uint32_t rx_bufs_used = 0;
  82. void *link_desc_va;
  83. struct hal_buf_info buf_info;
  84. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  85. int i;
  86. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  87. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  88. qdf_assert(rx_msdu_link_desc);
  89. /* No UNMAP required -- this is "malloc_consistent" memory */
  90. hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus);
  91. for (i = 0; (i < HAL_RX_NUM_MSDU_DESC) && quota--; i++) {
  92. struct dp_rx_desc *rx_desc =
  93. dp_rx_cookie_2_va_rxdma_buf(soc,
  94. msdu_list.sw_cookie[i]);
  95. qdf_assert(rx_desc);
  96. rx_bufs_used++;
  97. /* Just free the buffers */
  98. qdf_nbuf_free(rx_desc->nbuf);
  99. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  100. }
  101. return rx_bufs_used;
  102. }
  103. /**
  104. * dp_rx_pn_error_handle() - Handles PN check errors
  105. *
  106. * @soc: core txrx main context
  107. * @ring_desc: opaque pointer to the REO error ring descriptor
  108. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  109. * @head: head of the local descriptor free-list
  110. * @tail: tail of the local descriptor free-list
  111. * @quota: No. of units (packets) that can be serviced in one shot.
  112. *
  113. * This function implements PN error handling
  114. * If the peer is configured to ignore the PN check errors
  115. * or if DP feels, that this frame is still OK, the frame can be
  116. * re-injected back to REO to use some of the other features
  117. * of REO e.g. duplicate detection/routing to other cores
  118. *
  119. * Return: uint32_t: No. of elements processed
  120. */
  121. static uint32_t
  122. dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
  123. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  124. union dp_rx_desc_list_elem_t **head,
  125. union dp_rx_desc_list_elem_t **tail,
  126. uint32_t quota)
  127. {
  128. uint16_t peer_id;
  129. uint32_t rx_bufs_used = 0;
  130. struct dp_peer *peer;
  131. bool peer_pn_policy = false;
  132. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  133. mpdu_desc_info->peer_meta_data);
  134. peer = dp_peer_find_by_id(soc, peer_id);
  135. if (qdf_likely(peer)) {
  136. /*
  137. * TODO: Check for peer specific policies & set peer_pn_policy
  138. */
  139. }
  140. /* No peer PN policy -- definitely drop */
  141. if (!peer_pn_policy)
  142. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  143. mpdu_desc_info,
  144. head, tail, quota);
  145. return rx_bufs_used;
  146. }
  147. /**
  148. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  149. *
  150. * @soc: core txrx main context
  151. * @ring_desc: opaque pointer to the REO error ring descriptor
  152. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  153. * @head: head of the local descriptor free-list
  154. * @tail: tail of the local descriptor free-list
  155. * @quota: No. of units (packets) that can be serviced in one shot.
  156. *
  157. * This function implements the error handling when sequence number
  158. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  159. * need to be handled:
  160. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  161. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  162. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  163. * For case B), the frame is normally dropped, no more action is taken
  164. *
  165. * Return: uint32_t: No. of elements processed
  166. */
  167. static uint32_t
  168. dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
  169. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  170. union dp_rx_desc_list_elem_t **head,
  171. union dp_rx_desc_list_elem_t **tail,
  172. uint32_t quota)
  173. {
  174. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  175. head, tail, quota);
  176. }
  177. static bool
  178. dp_rx_chain_msdus(struct dp_soc *soc, qdf_nbuf_t nbuf,
  179. struct dp_rx_desc *rx_desc)
  180. {
  181. bool mpdu_done = false;
  182. if (hal_rx_msdu_end_first_msdu_get(rx_desc->rx_buf_start)) {
  183. qdf_nbuf_set_chfrag_start(rx_desc->nbuf, 1);
  184. soc->invalid_peer_head_msdu = NULL;
  185. soc->invalid_peer_tail_msdu = NULL;
  186. }
  187. if (hal_rx_msdu_end_last_msdu_get(rx_desc->rx_buf_start)) {
  188. qdf_nbuf_set_chfrag_end(rx_desc->nbuf, 1);
  189. mpdu_done = true;
  190. }
  191. DP_RX_LIST_APPEND(soc->invalid_peer_head_msdu,
  192. soc->invalid_peer_tail_msdu,
  193. nbuf);
  194. return mpdu_done;
  195. }
  196. /**
  197. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  198. * descriptor violation on either a
  199. * REO or WBM ring
  200. *
  201. * @soc: core DP main context
  202. * @rx_desc : pointer to the sw rx descriptor
  203. * @head: pointer to head of rx descriptors to be added to free list
  204. * @tail: pointer to tail of rx descriptors to be added to free list
  205. * quota: upper limit of descriptors that can be reaped
  206. *
  207. * This function handles NULL queue descriptor violations arising out
  208. * a missing REO queue for a given peer or a given TID. This typically
  209. * may happen if a packet is received on a QOS enabled TID before the
  210. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  211. * it may also happen for MC/BC frames if they are not routed to the
  212. * non-QOS TID queue, in the absence of any other default TID queue.
  213. * This error can show up both in a REO destination or WBM release ring.
  214. *
  215. * Return: uint32_t: No. of Rx buffers reaped
  216. */
  217. static uint32_t
  218. dp_rx_null_q_desc_handle(struct dp_soc *soc, struct dp_rx_desc *rx_desc,
  219. union dp_rx_desc_list_elem_t **head,
  220. union dp_rx_desc_list_elem_t **tail,
  221. uint32_t quota)
  222. {
  223. uint32_t rx_bufs_used = 0;
  224. uint32_t pkt_len, l2_hdr_offset;
  225. uint16_t msdu_len;
  226. qdf_nbuf_t nbuf;
  227. struct dp_vdev *vdev;
  228. uint16_t peer_id = 0xFFFF;
  229. struct dp_peer *peer = NULL;
  230. uint32_t sgi, rate_mcs, tid;
  231. rx_bufs_used++;
  232. nbuf = rx_desc->nbuf;
  233. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  234. QDF_DMA_BIDIRECTIONAL);
  235. rx_desc->rx_buf_start = qdf_nbuf_data(nbuf);
  236. l2_hdr_offset =
  237. hal_rx_msdu_end_l3_hdr_padding_get(rx_desc->rx_buf_start);
  238. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_desc->rx_buf_start);
  239. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  240. /* Set length in nbuf */
  241. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  242. /*
  243. * Check if DMA completed -- msdu_done is the last bit
  244. * to be written
  245. */
  246. if (!hal_rx_attn_msdu_done_get(rx_desc->rx_buf_start)) {
  247. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  248. FL("MSDU DONE failure"));
  249. hal_rx_dump_pkt_tlvs(rx_desc->rx_buf_start,
  250. QDF_TRACE_LEVEL_INFO);
  251. qdf_assert(0);
  252. }
  253. peer_id = hal_rx_mpdu_start_sw_peer_id_get(rx_desc->rx_buf_start);
  254. peer = dp_peer_find_by_id(soc, peer_id);
  255. if (!peer) {
  256. bool mpdu_done = false;
  257. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  258. FL("peer is NULL"));
  259. mpdu_done = dp_rx_chain_msdus(soc, nbuf, rx_desc);
  260. if (mpdu_done)
  261. dp_rx_process_invalid_peer(soc, nbuf);
  262. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  263. return rx_bufs_used;
  264. }
  265. vdev = peer->vdev;
  266. if (!vdev) {
  267. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  268. FL("INVALID vdev %p OR osif_rx"), vdev);
  269. /* Drop & free packet */
  270. qdf_nbuf_free(nbuf);
  271. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  272. goto fail;
  273. }
  274. sgi = hal_rx_msdu_start_sgi_get(rx_desc->rx_buf_start);
  275. rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_desc->rx_buf_start);
  276. tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start);
  277. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  278. "%s: %d, SGI: %d, rate_mcs: %d, tid: %d",
  279. __func__, __LINE__, sgi, rate_mcs, tid);
  280. /* WDS Source Port Learning */
  281. if (qdf_likely(vdev->rx_decap_type == htt_cmn_pkt_type_ethernet) &&
  282. (vdev->wds_enabled))
  283. dp_rx_wds_srcport_learn(soc, rx_desc->rx_buf_start, peer, nbuf);
  284. /*
  285. * Advance the packet start pointer by total size of
  286. * pre-header TLV's
  287. */
  288. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  289. if (l2_hdr_offset)
  290. qdf_nbuf_pull_head(nbuf, l2_hdr_offset);
  291. if (hal_rx_mpdu_start_mpdu_qos_control_valid_get(
  292. rx_desc->rx_buf_start)) {
  293. /* TODO: Assuming that qos_control_valid also indicates
  294. * unicast. Should we check this?
  295. */
  296. if (peer &&
  297. peer->rx_tid[tid].hw_qdesc_vaddr_unaligned == NULL) {
  298. /* IEEE80211_SEQ_MAX indicates invalid start_seq */
  299. dp_rx_tid_setup_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  300. }
  301. }
  302. #ifdef QCA_WIFI_NAPIER_EMULATION /* Debug code, remove later */
  303. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  304. "%s: p_id %d msdu_len %d hdr_off %d",
  305. __func__, peer_id, msdu_len, l2_hdr_offset);
  306. print_hex_dump(KERN_ERR,
  307. "\t Pkt Data:", DUMP_PREFIX_NONE, 32, 4,
  308. qdf_nbuf_data(nbuf), 128, false);
  309. #endif /* NAPIER_EMULATION */
  310. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw)) {
  311. qdf_nbuf_set_next(nbuf, NULL);
  312. dp_rx_deliver_raw(vdev, nbuf);
  313. } else {
  314. if (qdf_unlikely(peer->bss_peer)) {
  315. QDF_TRACE(QDF_MODULE_ID_DP,
  316. QDF_TRACE_LEVEL_INFO,
  317. FL("received pkt with same src MAC"));
  318. /* Drop & free packet */
  319. qdf_nbuf_free(nbuf);
  320. goto fail;
  321. }
  322. if (vdev->osif_rx) {
  323. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  324. FL("vdev %p osif_rx %p"), vdev,
  325. vdev->osif_rx);
  326. qdf_nbuf_set_next(nbuf, NULL);
  327. vdev->osif_rx(vdev->osif_vdev, nbuf);
  328. DP_STATS_INC(vdev->pdev, rx.to_stack.num, 1);
  329. } else {
  330. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  331. FL("INVALID vdev %p OR osif_rx"), vdev);
  332. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  333. }
  334. }
  335. fail:
  336. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  337. return rx_bufs_used;
  338. }
  339. /**
  340. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  341. * (WBM), following error handling
  342. *
  343. * @soc: core DP main context
  344. * @ring_desc: opaque pointer to the REO error ring descriptor
  345. *
  346. * Return: QDF_STATUS
  347. */
  348. static QDF_STATUS
  349. dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc)
  350. {
  351. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  352. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  353. void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  354. void *hal_soc = soc->hal_soc;
  355. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  356. void *src_srng_desc;
  357. if (!wbm_rel_srng) {
  358. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  359. "WBM RELEASE RING not initialized");
  360. return status;
  361. }
  362. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  363. /* TODO */
  364. /*
  365. * Need API to convert from hal_ring pointer to
  366. * Ring Type / Ring Id combo
  367. */
  368. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  369. FL("HAL RING Access For WBM Release SRNG Failed - %p"),
  370. wbm_rel_srng);
  371. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  372. goto done;
  373. }
  374. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  375. if (qdf_likely(src_srng_desc)) {
  376. /* Return link descriptor through WBM ring (SW2WBM)*/
  377. hal_rx_msdu_link_desc_set(hal_soc,
  378. src_srng_desc, buf_addr_info);
  379. status = QDF_STATUS_SUCCESS;
  380. } else {
  381. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  382. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  383. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  384. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  385. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  386. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  387. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  388. }
  389. done:
  390. hal_srng_access_end(hal_soc, wbm_rel_srng);
  391. return status;
  392. }
  393. /**
  394. * dp_rx_err_process() - Processes error frames routed to REO error ring
  395. *
  396. * @soc: core txrx main context
  397. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  398. * @quota: No. of units (packets) that can be serviced in one shot.
  399. *
  400. * This function implements error processing and top level demultiplexer
  401. * for all the frames routed to REO error ring.
  402. *
  403. * Return: uint32_t: No. of elements processed
  404. */
  405. uint32_t
  406. dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  407. {
  408. void *hal_soc;
  409. void *ring_desc;
  410. union dp_rx_desc_list_elem_t *head = NULL;
  411. union dp_rx_desc_list_elem_t *tail = NULL;
  412. uint32_t rx_bufs_used = 0;
  413. uint8_t buf_type;
  414. uint8_t error, rbm;
  415. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  416. struct hal_buf_info hbi;
  417. struct dp_pdev *dp_pdev;
  418. struct dp_srng *dp_rxdma_srng;
  419. struct rx_desc_pool *rx_desc_pool;
  420. /* Debug -- Remove later */
  421. qdf_assert(soc && hal_ring);
  422. hal_soc = soc->hal_soc;
  423. /* Debug -- Remove later */
  424. qdf_assert(hal_soc);
  425. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  426. /* TODO */
  427. /*
  428. * Need API to convert from hal_ring pointer to
  429. * Ring Type / Ring Id combo
  430. */
  431. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  432. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  433. FL("HAL RING Access Failed -- %p"), hal_ring);
  434. goto done;
  435. }
  436. while (qdf_likely((ring_desc =
  437. hal_srng_dst_get_next(hal_soc, hal_ring))
  438. && quota--)) {
  439. DP_STATS_INC(soc, rx.err_ring_pkts, 1);
  440. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  441. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  442. /*
  443. * Check if the buffer is to be processed on this processor
  444. */
  445. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  446. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  447. /* TODO */
  448. /* Call appropriate handler */
  449. DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
  450. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  451. FL("Invalid RBM %d"), rbm);
  452. continue;
  453. }
  454. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  455. /*
  456. * For REO error ring, expect only MSDU LINK DESC
  457. */
  458. qdf_assert(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  459. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  460. /* Get the MPDU DESC info */
  461. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  462. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  463. /* TODO */
  464. rx_bufs_used += dp_rx_frag_handle(soc,
  465. ring_desc, &mpdu_desc_info,
  466. &head, &tail, quota);
  467. DP_STATS_INC(soc, rx.rx_frags, 1);
  468. continue;
  469. }
  470. if (hal_rx_reo_is_pn_error(ring_desc)) {
  471. /* TOD0 */
  472. DP_STATS_INC(soc,
  473. rx.err.
  474. reo_error[HAL_REO_ERR_PN_CHECK_FAILED],
  475. 1);
  476. rx_bufs_used += dp_rx_pn_error_handle(soc,
  477. ring_desc, &mpdu_desc_info,
  478. &head, &tail, quota);
  479. continue;
  480. }
  481. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  482. /* TOD0 */
  483. DP_STATS_INC(soc,
  484. rx.err.
  485. reo_error[HAL_REO_ERR_REGULAR_FRAME_2K_JUMP],
  486. 1);
  487. rx_bufs_used += dp_rx_2k_jump_handle(soc,
  488. ring_desc, &mpdu_desc_info,
  489. &head, &tail, quota);
  490. continue;
  491. }
  492. /* Return link descriptor through WBM ring (SW2WBM)*/
  493. dp_rx_link_desc_return(soc, ring_desc);
  494. }
  495. done:
  496. hal_srng_access_end(hal_soc, hal_ring);
  497. /* Assume MAC id = 0, owner = 0 */
  498. if (rx_bufs_used) {
  499. dp_pdev = soc->pdev_list[0];
  500. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  501. rx_desc_pool = &soc->rx_desc_buf[0];
  502. dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
  503. rx_bufs_used, &head, &tail, HAL_RX_BUF_RBM_SW3_BM);
  504. }
  505. return rx_bufs_used; /* Assume no scale factor for now */
  506. }
  507. /**
  508. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  509. *
  510. * @soc: core txrx main context
  511. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  512. * @quota: No. of units (packets) that can be serviced in one shot.
  513. *
  514. * This function implements error processing and top level demultiplexer
  515. * for all the frames routed to WBM2HOST sw release ring.
  516. *
  517. * Return: uint32_t: No. of elements processed
  518. */
  519. uint32_t
  520. dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  521. {
  522. void *hal_soc;
  523. void *ring_desc;
  524. struct dp_rx_desc *rx_desc;
  525. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT] = { NULL };
  526. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT] = { NULL };
  527. uint32_t rx_bufs_used[MAX_PDEV_CNT] = { 0 };
  528. uint32_t rx_bufs_reaped = 0;
  529. uint8_t buf_type, rbm;
  530. uint8_t wbm_err_src;
  531. uint32_t rx_buf_cookie;
  532. uint8_t mac_id;
  533. struct dp_pdev *dp_pdev;
  534. struct dp_srng *dp_rxdma_srng;
  535. struct rx_desc_pool *rx_desc_pool;
  536. /* Debug -- Remove later */
  537. qdf_assert(soc && hal_ring);
  538. hal_soc = soc->hal_soc;
  539. /* Debug -- Remove later */
  540. qdf_assert(hal_soc);
  541. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  542. /* TODO */
  543. /*
  544. * Need API to convert from hal_ring pointer to
  545. * Ring Type / Ring Id combo
  546. */
  547. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  548. FL("HAL RING Access Failed -- %p"), hal_ring);
  549. goto done;
  550. }
  551. while (qdf_likely((ring_desc =
  552. hal_srng_dst_get_next(hal_soc, hal_ring))
  553. && quota--)) {
  554. /* XXX */
  555. wbm_err_src = HAL_RX_WBM_ERR_SRC_GET(ring_desc);
  556. qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  557. (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
  558. /*
  559. * Check if the buffer is to be processed on this processor
  560. */
  561. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  562. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  563. /* TODO */
  564. /* Call appropriate handler */
  565. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  566. FL("Invalid RBM %d"), rbm);
  567. continue;
  568. }
  569. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  570. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  571. qdf_assert(rx_desc);
  572. /* XXX */
  573. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  574. /*
  575. * For WBM ring, expect only MSDU buffers
  576. */
  577. qdf_assert(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  578. if (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  579. uint8_t push_reason =
  580. HAL_RX_WBM_REO_PUSH_REASON_GET(ring_desc);
  581. if (push_reason == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  582. uint8_t reo_error_code =
  583. HAL_RX_WBM_REO_ERROR_CODE_GET(ring_desc);
  584. DP_STATS_INC(soc, rx.err.reo_error[
  585. reo_error_code], 1);
  586. switch (reo_error_code) {
  587. /*
  588. * Handling for packets which have NULL REO
  589. * queue descriptor
  590. */
  591. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  592. QDF_TRACE(QDF_MODULE_ID_DP,
  593. QDF_TRACE_LEVEL_WARN,
  594. "Got pkt with REO ERROR: %d",
  595. reo_error_code);
  596. rx_bufs_used[rx_desc->pool_id] +=
  597. dp_rx_null_q_desc_handle(soc,
  598. rx_desc,
  599. &head[rx_desc->pool_id],
  600. &tail[rx_desc->pool_id], quota);
  601. continue;
  602. /* TODO */
  603. /* Add per error code accounting */
  604. default:
  605. QDF_TRACE(QDF_MODULE_ID_DP,
  606. QDF_TRACE_LEVEL_ERROR,
  607. "REO error %d detected",
  608. reo_error_code);
  609. }
  610. }
  611. } else if (wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) {
  612. uint8_t push_reason =
  613. HAL_RX_WBM_RXDMA_PUSH_REASON_GET(ring_desc);
  614. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  615. uint8_t rxdma_error_code =
  616. HAL_RX_WBM_RXDMA_ERROR_CODE_GET(ring_desc);
  617. DP_STATS_INC(soc, rx.err.rxdma_error[
  618. rxdma_error_code], 1);
  619. switch (rxdma_error_code) {
  620. /* TODO */
  621. /* Add per error code accounting */
  622. default:
  623. QDF_TRACE(QDF_MODULE_ID_DP,
  624. QDF_TRACE_LEVEL_ERROR,
  625. "RXDMA error %d detected",
  626. rxdma_error_code);
  627. }
  628. }
  629. } else {
  630. /* Should not come here */
  631. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  632. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  633. qdf_assert(rx_desc);
  634. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  635. QDF_DMA_BIDIRECTIONAL);
  636. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  637. hal_rx_dump_pkt_tlvs(rx_desc->rx_buf_start,
  638. QDF_TRACE_LEVEL_INFO);
  639. qdf_assert(0);
  640. }
  641. rx_bufs_used[rx_desc->pool_id]++;
  642. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  643. QDF_DMA_BIDIRECTIONAL);
  644. rx_desc->rx_buf_start = qdf_nbuf_data(rx_desc->nbuf);
  645. hal_rx_dump_pkt_tlvs(rx_desc->rx_buf_start,
  646. QDF_TRACE_LEVEL_INFO);
  647. qdf_nbuf_free(rx_desc->nbuf);
  648. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  649. &tail[rx_desc->pool_id], rx_desc);
  650. }
  651. done:
  652. hal_srng_access_end(hal_soc, hal_ring);
  653. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  654. if (rx_bufs_used[mac_id]) {
  655. dp_pdev = soc->pdev_list[mac_id];
  656. dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
  657. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  658. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  659. rx_desc_pool, rx_bufs_used[mac_id],
  660. &head[mac_id], &tail[mac_id],
  661. HAL_RX_BUF_RBM_SW3_BM);
  662. rx_bufs_reaped += rx_bufs_used[mac_id];
  663. }
  664. }
  665. return rx_bufs_reaped; /* Assume no scale factor for now */
  666. }