dp_rx_err.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. * Copyright (c) 2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "hal_api.h"
  22. #include "qdf_trace.h"
  23. #include "qdf_nbuf.h"
  24. /**
  25. * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
  26. * the MSDU Link Descriptor
  27. * @soc: core txrx main context
  28. * @cookie: cookie used to lookup virtual address of link descriptor
  29. * Normally this is just an index into a per SOC array.
  30. *
  31. * This is the VA of the link descriptor, that HAL layer later uses to
  32. * retrieve the list of MSDU's for a given .
  33. *
  34. * Return: void *: Virtual Address of the Rx descriptor
  35. */
  36. static inline
  37. void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
  38. struct hal_buf_info *buf_info)
  39. {
  40. void *link_desc_va;
  41. /* TODO */
  42. /* Add sanity for cookie */
  43. link_desc_va = soc->link_desc_banks[buf_info->sw_cookie].base_vaddr +
  44. (buf_info->paddr -
  45. soc->link_desc_banks[buf_info->sw_cookie].base_paddr);
  46. return link_desc_va;
  47. }
  48. /**
  49. * dp_rx_frag_handle() - Handles fragmented Rx frames
  50. *
  51. * @soc: core txrx main context
  52. * @ring_desc: opaque pointer to the REO error ring descriptor
  53. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  54. * @head: head of the local descriptor free-list
  55. * @tail: tail of the local descriptor free-list
  56. * @quota: No. of units (packets) that can be serviced in one shot.
  57. *
  58. * This function implements RX 802.11 fragmentation handling
  59. * The handling is mostly same as legacy fragmentation handling.
  60. * If required, this function can re-inject the frames back to
  61. * REO ring (with proper setting to by-pass fragmentation check
  62. * but use duplicate detection / re-ordering and routing these frames
  63. * to a different core.
  64. *
  65. * Return: uint32_t: No. of elements processed
  66. */
  67. uint32_t
  68. dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
  69. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  70. union dp_rx_desc_list_elem_t **head,
  71. union dp_rx_desc_list_elem_t **tail,
  72. uint32_t quota)
  73. {
  74. uint32_t rx_bufs_used = 0;
  75. return rx_bufs_used;
  76. }
  77. /**
  78. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  79. *
  80. * @soc: core txrx main context
  81. * @ring_desc: opaque pointer to the REO error ring descriptor
  82. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  83. * @head: head of the local descriptor free-list
  84. * @tail: tail of the local descriptor free-list
  85. * @quota: No. of units (packets) that can be serviced in one shot.
  86. *
  87. * This function is used to drop all MSDU in an MPDU
  88. *
  89. * Return: uint32_t: No. of elements processed
  90. */
  91. static uint32_t
  92. dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
  93. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  94. union dp_rx_desc_list_elem_t **head,
  95. union dp_rx_desc_list_elem_t **tail,
  96. uint32_t quota)
  97. {
  98. uint8_t num_msdus;
  99. uint32_t rx_bufs_used = 0;
  100. void *link_desc_va;
  101. struct hal_buf_info buf_info;
  102. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  103. int i;
  104. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  105. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  106. qdf_assert(rx_msdu_link_desc);
  107. /* No UNMAP required -- this is "malloc_consistent" memory */
  108. hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus);
  109. for (i = 0; (i < HAL_RX_NUM_MSDU_DESC) && quota--; i++) {
  110. struct dp_rx_desc *rx_desc =
  111. dp_rx_cookie_2_va(soc, msdu_list.sw_cookie[i]);
  112. qdf_assert(rx_desc);
  113. rx_bufs_used++;
  114. /* Just free the buffers */
  115. qdf_nbuf_free(rx_desc->nbuf);
  116. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  117. }
  118. return rx_bufs_used;
  119. }
  120. /**
  121. * dp_rx_pn_error_handle() - Handles PN check errors
  122. *
  123. * @soc: core txrx main context
  124. * @ring_desc: opaque pointer to the REO error ring descriptor
  125. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  126. * @head: head of the local descriptor free-list
  127. * @tail: tail of the local descriptor free-list
  128. * @quota: No. of units (packets) that can be serviced in one shot.
  129. *
  130. * This function implements PN error handling
  131. * If the peer is configured to ignore the PN check errors
  132. * or if DP feels, that this frame is still OK, the frame can be
  133. * re-injected back to REO to use some of the other features
  134. * of REO e.g. duplicate detection/routing to other cores
  135. *
  136. * Return: uint32_t: No. of elements processed
  137. */
  138. uint32_t
  139. dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
  140. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  141. union dp_rx_desc_list_elem_t **head,
  142. union dp_rx_desc_list_elem_t **tail,
  143. uint32_t quota)
  144. {
  145. uint16_t peer_id;
  146. uint32_t rx_bufs_used = 0;
  147. struct dp_peer *peer;
  148. bool peer_pn_policy = false;
  149. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  150. mpdu_desc_info->peer_meta_data);
  151. peer = dp_peer_find_by_id(soc, peer_id);
  152. if (qdf_likely(peer)) {
  153. /*
  154. * TODO: Check for peer specific policies & set peer_pn_policy
  155. */
  156. }
  157. /* No peer PN policy -- definitely drop */
  158. if (!peer_pn_policy)
  159. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  160. mpdu_desc_info,
  161. head, tail, quota);
  162. return rx_bufs_used;
  163. }
  164. /**
  165. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  166. *
  167. * @soc: core txrx main context
  168. * @ring_desc: opaque pointer to the REO error ring descriptor
  169. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  170. * @head: head of the local descriptor free-list
  171. * @tail: tail of the local descriptor free-list
  172. * @quota: No. of units (packets) that can be serviced in one shot.
  173. *
  174. * This function implements the error handling when sequence number
  175. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  176. * need to be handled:
  177. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  178. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  179. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  180. * For case B), the frame is normally dropped, no more action is taken
  181. *
  182. * Return: uint32_t: No. of elements processed
  183. */
  184. uint32_t
  185. dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
  186. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  187. union dp_rx_desc_list_elem_t **head,
  188. union dp_rx_desc_list_elem_t **tail,
  189. uint32_t quota)
  190. {
  191. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  192. head, tail, quota);
  193. }
  194. /**
  195. * dp_rx_null_q_desc_handle() - Function to handle NULL Queue
  196. * descriptor violation on either a
  197. * REO or WBM ring
  198. *
  199. * @soc: core DP main context
  200. * @ring_desc: opaque pointer to the REO error ring descriptor
  201. * @head: pointer to head of rx descriptors to be added to free list
  202. * @tail: pointer to tail of rx descriptors to be added to free list
  203. * quota: upper limit of descriptors that can be reaped
  204. *
  205. * This function handles NULL queue descriptor violations arising out
  206. * a missing REO queue for a given peer or a given TID. This typically
  207. * may happen if a packet is received on a QOS enabled TID before the
  208. * ADDBA negotiation for that TID, when the TID queue is setup. Or
  209. * it may also happen for MC/BC frames if they are not routed to the
  210. * non-QOS TID queue, in the absence of any other default TID queue.
  211. * This error can show up both in a REO destination or WBM release ring.
  212. *
  213. * Return: uint32_t: No. of Rx buffers reaped
  214. */
  215. uint32_t
  216. dp_rx_null_q_desc_handle(struct dp_soc *soc, void *ring_desc,
  217. union dp_rx_desc_list_elem_t **head,
  218. union dp_rx_desc_list_elem_t **tail,
  219. uint32_t quota)
  220. {
  221. uint32_t rx_buf_cookie;
  222. struct dp_rx_desc *rx_desc;
  223. uint32_t rx_bufs_used = 0;
  224. uint32_t pkt_len, l2_hdr_offset;
  225. uint16_t msdu_len;
  226. qdf_nbuf_t nbuf;
  227. struct dp_pdev *pdev0;
  228. struct dp_vdev *vdev0;
  229. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  230. rx_desc = dp_rx_cookie_2_va(soc, rx_buf_cookie);
  231. qdf_assert(rx_desc);
  232. rx_bufs_used++;
  233. nbuf = rx_desc->nbuf;
  234. qdf_nbuf_unmap_single(soc->osdev, nbuf,
  235. QDF_DMA_BIDIRECTIONAL);
  236. rx_desc->rx_buf_start = qdf_nbuf_data(nbuf);
  237. l2_hdr_offset =
  238. hal_rx_msdu_end_l3_hdr_padding_get(rx_desc->rx_buf_start);
  239. msdu_len = hal_rx_msdu_start_msdu_len_get(rx_desc->rx_buf_start);
  240. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  241. /* Set length in nbuf */
  242. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  243. /*
  244. * Check if DMA completed -- msdu_done is the last bit
  245. * to be written
  246. */
  247. if (!hal_rx_attn_msdu_done_get(rx_desc->rx_buf_start)) {
  248. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  249. FL("nbuf->data 0x%p"), rx_desc->rx_buf_start);
  250. qdf_assert(0);
  251. }
  252. /*
  253. * Advance the packet start pointer by total size of
  254. * pre-header TLV's
  255. */
  256. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  257. if (l2_hdr_offset)
  258. qdf_nbuf_pull_head(nbuf, l2_hdr_offset);
  259. pdev0 = soc->pdev_list[0];/* Hard code 0th elem */
  260. if (pdev0) {
  261. vdev0 = (struct dp_vdev *)TAILQ_FIRST(&pdev0->vdev_list);
  262. if (vdev0 && vdev0->osif_rx) {
  263. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  264. FL("pdev0 %p vdev0 %p osif_rx %p"), pdev0, vdev0,
  265. vdev0->osif_rx);
  266. vdev0->osif_rx(vdev0->osif_vdev, nbuf);
  267. } else {
  268. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  269. FL("INVALID vdev0 %p OR osif_rx"), vdev0);
  270. }
  271. } else {
  272. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  273. FL("INVALID pdev %p"), pdev0);
  274. }
  275. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  276. return rx_bufs_used;
  277. }
  278. /**
  279. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  280. * (WBM), following error handling
  281. *
  282. * @soc: core DP main context
  283. * @ring_desc: opaque pointer to the REO error ring descriptor
  284. *
  285. * Return: QDF_STATUS
  286. */
  287. static QDF_STATUS
  288. dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc)
  289. {
  290. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  291. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  292. void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  293. void *hal_soc = soc->hal_soc;
  294. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  295. void *src_srng_desc;
  296. if (!wbm_rel_srng) {
  297. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  298. "WBM RELEASE RING not initialized");
  299. return status;
  300. }
  301. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  302. /* TODO */
  303. /*
  304. * Need API to convert from hal_ring pointer to
  305. * Ring Type / Ring Id combo
  306. */
  307. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  308. FL("HAL RING Access For WBM Release SRNG Failed - %p"),
  309. wbm_rel_srng);
  310. goto done;
  311. }
  312. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  313. if (qdf_likely(src_srng_desc)) {
  314. /* Return link descriptor through WBM ring (SW2WBM)*/
  315. hal_rx_msdu_link_desc_set(hal_soc,
  316. src_srng_desc, buf_addr_info);
  317. status = QDF_STATUS_SUCCESS;
  318. } else {
  319. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  320. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  321. FL("WBM Release Ring (Id %d) Full"), srng->ring_id);
  322. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  323. "HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x",
  324. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  325. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  326. }
  327. done:
  328. hal_srng_access_end(hal_soc, wbm_rel_srng);
  329. return status;
  330. }
  331. /**
  332. * dp_rx_err_process() - Processes error frames routed to REO error ring
  333. *
  334. * @soc: core txrx main context
  335. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  336. * @quota: No. of units (packets) that can be serviced in one shot.
  337. *
  338. * This function implements error processing and top level demultiplexer
  339. * for all the frames routed to REO error ring.
  340. *
  341. * Return: uint32_t: No. of elements processed
  342. */
  343. uint32_t
  344. dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  345. {
  346. void *hal_soc;
  347. void *ring_desc;
  348. union dp_rx_desc_list_elem_t *head = NULL;
  349. union dp_rx_desc_list_elem_t *tail = NULL;
  350. uint32_t rx_bufs_used = 0;
  351. uint8_t buf_type;
  352. uint8_t error, rbm;
  353. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  354. struct hal_buf_info hbi;
  355. /* Debug -- Remove later */
  356. qdf_assert(soc && hal_ring);
  357. hal_soc = soc->hal_soc;
  358. /* Debug -- Remove later */
  359. qdf_assert(hal_soc);
  360. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  361. /* TODO */
  362. /*
  363. * Need API to convert from hal_ring pointer to
  364. * Ring Type / Ring Id combo
  365. */
  366. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  367. FL("HAL RING Access Failed -- %p"), hal_ring);
  368. goto done;
  369. }
  370. while (qdf_likely((ring_desc =
  371. hal_srng_dst_get_next(hal_soc, hal_ring))
  372. && quota--)) {
  373. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  374. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  375. /*
  376. * Check if the buffer is to be processed on this processor
  377. */
  378. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  379. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  380. /* TODO */
  381. /* Call appropriate handler */
  382. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  383. FL("Invalid RBM %d"), rbm);
  384. continue;
  385. }
  386. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  387. /*
  388. * For REO error ring, expect only MSDU LINK DESC
  389. */
  390. qdf_assert(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  391. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  392. /* Get the MPDU DESC info */
  393. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  394. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  395. /* TODO */
  396. rx_bufs_used += dp_rx_frag_handle(soc,
  397. ring_desc, &mpdu_desc_info,
  398. &head, &tail, quota);
  399. continue;
  400. }
  401. if (hal_rx_reo_is_pn_error(ring_desc)) {
  402. /* TOD0 */
  403. rx_bufs_used += dp_rx_pn_error_handle(soc,
  404. ring_desc, &mpdu_desc_info,
  405. &head, &tail, quota);
  406. continue;
  407. }
  408. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  409. /* TOD0 */
  410. rx_bufs_used += dp_rx_2k_jump_handle(soc,
  411. ring_desc, &mpdu_desc_info,
  412. &head, &tail, quota);
  413. continue;
  414. }
  415. /* Return link descriptor through WBM ring (SW2WBM)*/
  416. dp_rx_link_desc_return(soc, ring_desc);
  417. }
  418. done:
  419. hal_srng_access_end(hal_soc, hal_ring);
  420. /* Assume MAC id = 0, owner = 0 */
  421. if (rx_bufs_used)
  422. dp_rx_buffers_replenish(soc, 0, rx_bufs_used, &head, &tail,
  423. HAL_RX_BUF_RBM_SW3_BM);
  424. return rx_bufs_used; /* Assume no scale factor for now */
  425. }
  426. /**
  427. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  428. *
  429. * @soc: core txrx main context
  430. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  431. * @quota: No. of units (packets) that can be serviced in one shot.
  432. *
  433. * This function implements error processing and top level demultiplexer
  434. * for all the frames routed to WBM2HOST sw release ring.
  435. *
  436. * Return: uint32_t: No. of elements processed
  437. */
  438. uint32_t
  439. dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  440. {
  441. void *hal_soc;
  442. void *ring_desc;
  443. struct dp_rx_desc *rx_desc;
  444. union dp_rx_desc_list_elem_t *head = NULL;
  445. union dp_rx_desc_list_elem_t *tail = NULL;
  446. uint32_t rx_bufs_used = 0;
  447. uint8_t buf_type, rbm;
  448. uint8_t wbm_err_src;
  449. uint32_t rx_buf_cookie;
  450. /* Debug -- Remove later */
  451. qdf_assert(soc && hal_ring);
  452. hal_soc = soc->hal_soc;
  453. /* Debug -- Remove later */
  454. qdf_assert(hal_soc);
  455. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  456. /* TODO */
  457. /*
  458. * Need API to convert from hal_ring pointer to
  459. * Ring Type / Ring Id combo
  460. */
  461. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  462. FL("HAL RING Access Failed -- %p"), hal_ring);
  463. goto done;
  464. }
  465. while (qdf_likely((ring_desc =
  466. hal_srng_dst_get_next(hal_soc, hal_ring))
  467. && quota--)) {
  468. /* XXX */
  469. wbm_err_src = HAL_RX_WBM_ERR_SRC_GET(ring_desc);
  470. qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  471. (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
  472. /*
  473. * Check if the buffer is to be processed on this processor
  474. */
  475. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  476. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  477. /* TODO */
  478. /* Call appropriate handler */
  479. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  480. FL("Invalid RBM %d"), rbm);
  481. continue;
  482. }
  483. /* XXX */
  484. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  485. /*
  486. * For WBM ring, expect only MSDU buffers
  487. */
  488. qdf_assert(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  489. if (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  490. uint8_t push_reason =
  491. HAL_RX_WBM_REO_PUSH_REASON_GET(ring_desc);
  492. if (push_reason == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  493. uint8_t reo_error_code =
  494. HAL_RX_WBM_REO_ERROR_CODE_GET(ring_desc);
  495. switch (reo_error_code) {
  496. /*
  497. * Handling for packets which have NULL REO
  498. * queue descriptor
  499. */
  500. case HAL_REO_ERR_QUEUE_DESC_ADDR_0:
  501. case HAL_REO_ERR_REGULAR_FRAME_2K_JUMP:
  502. case HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET:
  503. QDF_TRACE(QDF_MODULE_ID_DP,
  504. QDF_TRACE_LEVEL_WARN,
  505. "Got pkt with REO ERROR: %d",
  506. reo_error_code);
  507. rx_bufs_used +=
  508. dp_rx_null_q_desc_handle(soc,
  509. ring_desc, &head, &tail, quota);
  510. continue;
  511. /* TODO */
  512. /* Add per error code accounting */
  513. default:
  514. QDF_TRACE(QDF_MODULE_ID_DP,
  515. QDF_TRACE_LEVEL_ERROR,
  516. "REO error %d detected",
  517. reo_error_code);
  518. }
  519. }
  520. } else if (wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) {
  521. uint8_t push_reason =
  522. HAL_RX_WBM_RXDMA_PUSH_REASON_GET(ring_desc);
  523. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  524. uint8_t rxdma_error_code =
  525. HAL_RX_WBM_RXDMA_ERROR_CODE_GET(ring_desc);
  526. switch (rxdma_error_code) {
  527. /* TODO */
  528. /* Add per error code accounting */
  529. default:
  530. QDF_TRACE(QDF_MODULE_ID_DP,
  531. QDF_TRACE_LEVEL_ERROR,
  532. "RXDMA error %d detected",
  533. rxdma_error_code);
  534. }
  535. }
  536. } else {
  537. /* Should not come here */
  538. qdf_assert(0);
  539. }
  540. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  541. rx_desc = dp_rx_cookie_2_va(soc, rx_buf_cookie);
  542. qdf_assert(rx_desc);
  543. rx_bufs_used++;
  544. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  545. QDF_DMA_BIDIRECTIONAL);
  546. qdf_nbuf_free(rx_desc->nbuf);
  547. dp_rx_add_to_free_desc_list(&head, &tail, rx_desc);
  548. }
  549. done:
  550. hal_srng_access_end(hal_soc, hal_ring);
  551. /* Assume MAC id = 0, owner = 0 */
  552. if (rx_bufs_used)
  553. dp_rx_buffers_replenish(soc, 0, rx_bufs_used, &head, &tail,
  554. HAL_RX_BUF_RBM_SW3_BM);
  555. return rx_bufs_used; /* Assume no scale factor for now */
  556. }