dp_rx_err.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. /*
  2. * Copyright (c) 2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "hal_api.h"
  22. #include "qdf_trace.h"
  23. #include "qdf_nbuf.h"
  24. /**
  25. * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
  26. * the MSDU Link Descriptor
  27. * @soc: core txrx main context
  28. * @cookie: cookie used to lookup virtual address of link descriptor
  29. * Normally this is just an index into a per SOC array.
  30. *
  31. * This is the VA of the link descriptor, that HAL layer later uses to
  32. * retrieve the list of MSDU's for a given .
  33. *
  34. * Return: void *: Virtual Address of the Rx descriptor
  35. */
  36. static inline
  37. void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
  38. struct hal_buf_info *buf_info)
  39. {
  40. void *link_desc_va;
  41. /* TODO */
  42. /* Add sanity for cookie */
  43. link_desc_va = soc->link_desc_banks[buf_info->sw_cookie].base_vaddr +
  44. (buf_info->paddr -
  45. soc->link_desc_banks[buf_info->sw_cookie].base_paddr);
  46. return link_desc_va;
  47. }
  48. /**
  49. * dp_rx_frag_handle() - Handles fragmented Rx frames
  50. *
  51. * @soc: core txrx main context
  52. * @ring_desc: opaque pointer to the REO error ring descriptor
  53. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  54. * @head: head of the local descriptor free-list
  55. * @tail: tail of the local descriptor free-list
  56. * @quota: No. of units (packets) that can be serviced in one shot.
  57. *
  58. * This function implements RX 802.11 fragmentation handling
  59. * The handling is mostly same as legacy fragmentation handling.
  60. * If required, this function can re-inject the frames back to
  61. * REO ring (with proper setting to by-pass fragmentation check
  62. * but use duplicate detection / re-ordering and routing these frames
  63. * to a different core.
  64. *
  65. * Return: uint32_t: No. of elements processed
  66. */
  67. uint32_t
  68. dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
  69. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  70. union dp_rx_desc_list_elem_t **head,
  71. union dp_rx_desc_list_elem_t **tail,
  72. uint32_t quota)
  73. {
  74. uint32_t rx_bufs_used = 0;
  75. return rx_bufs_used;
  76. }
  77. /**
  78. * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
  79. *
  80. * @soc: core txrx main context
  81. * @ring_desc: opaque pointer to the REO error ring descriptor
  82. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  83. * @head: head of the local descriptor free-list
  84. * @tail: tail of the local descriptor free-list
  85. * @quota: No. of units (packets) that can be serviced in one shot.
  86. *
  87. * This function is used to drop all MSDU in an MPDU
  88. *
  89. * Return: uint32_t: No. of elements processed
  90. */
  91. static uint32_t
  92. dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
  93. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  94. union dp_rx_desc_list_elem_t **head,
  95. union dp_rx_desc_list_elem_t **tail,
  96. uint32_t quota)
  97. {
  98. uint8_t num_msdus;
  99. uint32_t rx_bufs_used = 0;
  100. void *link_desc_va;
  101. struct hal_buf_info buf_info;
  102. struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
  103. int i;
  104. hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
  105. link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
  106. qdf_assert(rx_msdu_link_desc);
  107. /* No UNMAP required -- this is "malloc_consistent" memory */
  108. hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus);
  109. for (i = 0; (i < HAL_RX_NUM_MSDU_DESC) && quota--; i++) {
  110. struct dp_rx_desc *rx_desc =
  111. dp_rx_cookie_2_va(soc, msdu_list.sw_cookie[i]);
  112. qdf_assert(rx_desc);
  113. rx_bufs_used++;
  114. /* Just free the buffers */
  115. qdf_nbuf_free(rx_desc->nbuf);
  116. dp_rx_add_to_free_desc_list(head, tail, rx_desc);
  117. }
  118. return rx_bufs_used;
  119. }
  120. /**
  121. * dp_rx_pn_error_handle() - Handles PN check errors
  122. *
  123. * @soc: core txrx main context
  124. * @ring_desc: opaque pointer to the REO error ring descriptor
  125. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  126. * @head: head of the local descriptor free-list
  127. * @tail: tail of the local descriptor free-list
  128. * @quota: No. of units (packets) that can be serviced in one shot.
  129. *
  130. * This function implements PN error handling
  131. * If the peer is configured to ignore the PN check errors
  132. * or if DP feels, that this frame is still OK, the frame can be
  133. * re-injected back to REO to use some of the other features
  134. * of REO e.g. duplicate detection/routing to other cores
  135. *
  136. * Return: uint32_t: No. of elements processed
  137. */
  138. uint32_t
  139. dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
  140. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  141. union dp_rx_desc_list_elem_t **head,
  142. union dp_rx_desc_list_elem_t **tail,
  143. uint32_t quota)
  144. {
  145. uint16_t peer_id;
  146. uint32_t rx_bufs_used = 0;
  147. struct dp_peer *peer;
  148. bool peer_pn_policy = false;
  149. peer_id = DP_PEER_METADATA_PEER_ID_GET(
  150. mpdu_desc_info->peer_meta_data);
  151. peer = dp_peer_find_by_id(soc, peer_id);
  152. if (qdf_likely(peer)) {
  153. /*
  154. * TODO: Check for peer specific policies & set peer_pn_policy
  155. */
  156. }
  157. /* No peer PN policy -- definitely drop */
  158. if (!peer_pn_policy)
  159. rx_bufs_used = dp_rx_msdus_drop(soc, ring_desc,
  160. mpdu_desc_info,
  161. head, tail, quota);
  162. return rx_bufs_used;
  163. }
  164. /**
  165. * dp_rx_2k_jump_handle() - Handles Sequence Number Jump by 2K
  166. *
  167. * @soc: core txrx main context
  168. * @ring_desc: opaque pointer to the REO error ring descriptor
  169. * @mpdu_desc_info: MPDU descriptor information from ring descriptor
  170. * @head: head of the local descriptor free-list
  171. * @tail: tail of the local descriptor free-list
  172. * @quota: No. of units (packets) that can be serviced in one shot.
  173. *
  174. * This function implements the error handling when sequence number
  175. * of the MPDU jumps suddenly by 2K.Today there are 2 cases that
  176. * need to be handled:
  177. * A) CSN (Current Sequence Number) = Last Valid SN (LSN) + 2K
  178. * B) CSN = LSN + 2K, but falls within a "BA sized window" of the SSN
  179. * For case A) the protocol stack is invoked to generate DELBA/DEAUTH frame
  180. * For case B), the frame is normally dropped, no more action is taken
  181. *
  182. * Return: uint32_t: No. of elements processed
  183. */
  184. uint32_t
  185. dp_rx_2k_jump_handle(struct dp_soc *soc, void *ring_desc,
  186. struct hal_rx_mpdu_desc_info *mpdu_desc_info,
  187. union dp_rx_desc_list_elem_t **head,
  188. union dp_rx_desc_list_elem_t **tail,
  189. uint32_t quota)
  190. {
  191. return dp_rx_msdus_drop(soc, ring_desc, mpdu_desc_info,
  192. head, tail, quota);
  193. }
  194. /**
  195. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  196. * (WBM), following error handling
  197. *
  198. * @soc: core DP main context
  199. * @ring_desc: opaque pointer to the REO error ring descriptor
  200. *
  201. * Return: QDF_STATUS
  202. */
  203. static QDF_STATUS
  204. dp_rx_link_desc_return(struct dp_soc *soc, void *ring_desc)
  205. {
  206. void *buf_addr_info = HAL_RX_REO_BUF_ADDR_INFO_GET(ring_desc);
  207. struct dp_srng *wbm_desc_rel_ring = &soc->wbm_desc_rel_ring;
  208. void *wbm_rel_srng = wbm_desc_rel_ring->hal_srng;
  209. void *hal_soc = soc->hal_soc;
  210. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  211. void *src_srng_desc;
  212. if (!wbm_rel_srng) {
  213. qdf_print("WBM RELEASE RING not initialized\n");
  214. return status;
  215. }
  216. if (qdf_unlikely(hal_srng_access_start(hal_soc, wbm_rel_srng))) {
  217. /* TODO */
  218. /*
  219. * Need API to convert from hal_ring pointer to
  220. * Ring Type / Ring Id combo
  221. */
  222. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  223. "%s %d : "
  224. "HAL RING Access For WBM Release SRNG Failed -- %p\n",
  225. __func__, __LINE__, wbm_rel_srng);
  226. goto done;
  227. }
  228. src_srng_desc = hal_srng_src_get_next(hal_soc, wbm_rel_srng);
  229. if (qdf_likely(src_srng_desc)) {
  230. /* Return link descriptor through WBM ring (SW2WBM)*/
  231. hal_rx_msdu_link_desc_set(hal_soc,
  232. src_srng_desc, buf_addr_info);
  233. status = QDF_STATUS_SUCCESS;
  234. } else {
  235. struct hal_srng *srng = (struct hal_srng *)wbm_rel_srng;
  236. qdf_print("%s %d -- WBM Release Ring (Id %d) Full\n",
  237. __func__, __LINE__, srng->ring_id);
  238. qdf_print("HP 0x%x Reap HP 0x%x TP 0x%x Cached TP 0x%x\n",
  239. *srng->u.src_ring.hp_addr, srng->u.src_ring.reap_hp,
  240. *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp);
  241. }
  242. done:
  243. hal_srng_access_end(hal_soc, wbm_rel_srng);
  244. return status;
  245. }
  246. /**
  247. * dp_rx_err_process() - Processes error frames routed to REO error ring
  248. *
  249. * @soc: core txrx main context
  250. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  251. * @quota: No. of units (packets) that can be serviced in one shot.
  252. *
  253. * This function implements error processing and top level demultiplexer
  254. * for all the frames routed to REO error ring.
  255. *
  256. * Return: uint32_t: No. of elements processed
  257. */
  258. uint32_t
  259. dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  260. {
  261. void *hal_soc;
  262. void *ring_desc;
  263. union dp_rx_desc_list_elem_t *head = NULL;
  264. union dp_rx_desc_list_elem_t *tail = NULL;
  265. uint32_t rx_bufs_used = 0;
  266. uint8_t buf_type;
  267. uint8_t error, rbm;
  268. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  269. struct hal_buf_info hbi;
  270. /* Debug -- Remove later */
  271. qdf_assert(soc && hal_ring);
  272. hal_soc = soc->hal_soc;
  273. /* Debug -- Remove later */
  274. qdf_assert(hal_soc);
  275. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  276. /* TODO */
  277. /*
  278. * Need API to convert from hal_ring pointer to
  279. * Ring Type / Ring Id combo
  280. */
  281. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  282. "%s %d : HAL RING Access Failed -- %p\n",
  283. __func__, __LINE__, hal_ring);
  284. goto done;
  285. }
  286. while (qdf_likely((ring_desc =
  287. hal_srng_dst_get_next(hal_soc, hal_ring))
  288. && quota--)) {
  289. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  290. qdf_assert(error == HAL_REO_ERROR_DETECTED);
  291. /*
  292. * Check if the buffer is to be processed on this processor
  293. */
  294. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  295. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  296. /* TODO */
  297. /* Call appropriate handler */
  298. qdf_print("%s %d: Invalid RBM %d\n",
  299. __func__, __LINE__, rbm);
  300. continue;
  301. }
  302. buf_type = HAL_RX_REO_BUF_TYPE_GET(ring_desc);
  303. /*
  304. * For REO error ring, expect only MSDU LINK DESC
  305. */
  306. qdf_assert(buf_type == HAL_RX_REO_MSDU_LINK_DESC_TYPE);
  307. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  308. /* Get the MPDU DESC info */
  309. hal_rx_mpdu_info_get(ring_desc, &mpdu_desc_info);
  310. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_FRAGMENT) {
  311. /* TODO */
  312. rx_bufs_used += dp_rx_frag_handle(soc,
  313. ring_desc, &mpdu_desc_info,
  314. &head, &tail, quota);
  315. continue;
  316. }
  317. if (hal_rx_reo_is_pn_error(ring_desc)) {
  318. /* TOD0 */
  319. rx_bufs_used += dp_rx_pn_error_handle(soc,
  320. ring_desc, &mpdu_desc_info,
  321. &head, &tail, quota);
  322. continue;
  323. }
  324. if (hal_rx_reo_is_2k_jump(ring_desc)) {
  325. /* TOD0 */
  326. rx_bufs_used += dp_rx_2k_jump_handle(soc,
  327. ring_desc, &mpdu_desc_info,
  328. &head, &tail, quota);
  329. continue;
  330. }
  331. /* Return link descriptor through WBM ring (SW2WBM)*/
  332. dp_rx_link_desc_return(soc, ring_desc);
  333. }
  334. done:
  335. hal_srng_access_end(hal_soc, hal_ring);
  336. /* Assume MAC id = 0, owner = 0 */
  337. dp_rx_buffers_replenish(soc, 0, rx_bufs_used, &head, &tail,
  338. HAL_RX_BUF_RBM_SW3_BM);
  339. return rx_bufs_used; /* Assume no scale factor for now */
  340. }
  341. /**
  342. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  343. *
  344. * @soc: core txrx main context
  345. * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced
  346. * @quota: No. of units (packets) that can be serviced in one shot.
  347. *
  348. * This function implements error processing and top level demultiplexer
  349. * for all the frames routed to WBM2HOST sw release ring.
  350. *
  351. * Return: uint32_t: No. of elements processed
  352. */
  353. uint32_t
  354. dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
  355. {
  356. void *hal_soc;
  357. void *ring_desc;
  358. struct dp_rx_desc *rx_desc;
  359. union dp_rx_desc_list_elem_t *head = NULL;
  360. union dp_rx_desc_list_elem_t *tail = NULL;
  361. uint32_t rx_bufs_used = 0;
  362. uint8_t buf_type, rbm;
  363. uint8_t wbm_err_src;
  364. uint32_t rx_buf_cookie;
  365. /* Debug -- Remove later */
  366. qdf_assert(soc && hal_ring);
  367. hal_soc = soc->hal_soc;
  368. /* Debug -- Remove later */
  369. qdf_assert(hal_soc);
  370. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
  371. /* TODO */
  372. /*
  373. * Need API to convert from hal_ring pointer to
  374. * Ring Type / Ring Id combo
  375. */
  376. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  377. "%s %d : HAL RING Access Failed -- %p\n",
  378. __func__, __LINE__, hal_ring);
  379. goto done;
  380. }
  381. while (qdf_likely((ring_desc =
  382. hal_srng_dst_get_next(hal_soc, hal_ring))
  383. && quota--)) {
  384. /* XXX */
  385. wbm_err_src = HAL_RX_WBM_ERR_SRC_GET(ring_desc);
  386. qdf_assert((wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) ||
  387. (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO));
  388. /*
  389. * Check if the buffer is to be processed on this processor
  390. */
  391. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  392. if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
  393. /* TODO */
  394. /* Call appropriate handler */
  395. qdf_print("%s %d: Invalid RBM %d\n",
  396. __func__, __LINE__, rbm);
  397. continue;
  398. }
  399. /* XXX */
  400. buf_type = HAL_RX_WBM_BUF_TYPE_GET(ring_desc);
  401. /*
  402. * For WBM ring, expect only MSDU buffers
  403. */
  404. qdf_assert(buf_type == HAL_RX_WBM_BUF_TYPE_REL_BUF);
  405. if (wbm_err_src == HAL_RX_WBM_ERR_SRC_REO) {
  406. uint8_t push_reason =
  407. HAL_RX_WBM_REO_PUSH_REASON_GET(ring_desc);
  408. if (push_reason == HAL_RX_WBM_REO_PSH_RSN_ERROR) {
  409. uint8_t reo_error_code =
  410. HAL_RX_WBM_REO_ERROR_CODE_GET(ring_desc);
  411. switch (reo_error_code) {
  412. /* TODO */
  413. /* Add per error code accounting */
  414. default:
  415. qdf_print(
  416. "%s %d: REO error %d detected\n",
  417. __func__, __LINE__, reo_error_code);
  418. }
  419. }
  420. } else if (wbm_err_src == HAL_RX_WBM_ERR_SRC_RXDMA) {
  421. uint8_t push_reason =
  422. HAL_RX_WBM_RXDMA_PUSH_REASON_GET(ring_desc);
  423. if (push_reason == HAL_RX_WBM_RXDMA_PSH_RSN_ERROR) {
  424. uint8_t rxdma_error_code =
  425. HAL_RX_WBM_RXDMA_ERROR_CODE_GET(ring_desc);
  426. switch (rxdma_error_code) {
  427. /* TODO */
  428. /* Add per error code accounting */
  429. default:
  430. qdf_print(
  431. "%s %d: RXDMA error %d detected\n",
  432. __func__, __LINE__, rxdma_error_code);
  433. }
  434. }
  435. } else {
  436. /* Should not come here */
  437. qdf_assert(0);
  438. }
  439. rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
  440. rx_desc = dp_rx_cookie_2_va(soc, rx_buf_cookie);
  441. qdf_assert(rx_desc);
  442. rx_bufs_used++;
  443. qdf_nbuf_unmap_single(soc->osdev, rx_desc->nbuf,
  444. QDF_DMA_BIDIRECTIONAL);
  445. qdf_nbuf_free(rx_desc->nbuf);
  446. dp_rx_add_to_free_desc_list(&head, &tail, rx_desc);
  447. }
  448. done:
  449. hal_srng_access_end(hal_soc, hal_ring);
  450. /* Assume MAC id = 0, owner = 0 */
  451. dp_rx_buffers_replenish(soc, 0, rx_bufs_used, &head, &tail,
  452. HAL_RX_BUF_RBM_SW3_BM);
  453. return rx_bufs_used; /* Assume no scale factor for now */
  454. }