dp_li.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_types.h"
  20. #include <dp_internal.h>
  21. #include <dp_htt.h>
  22. #include "dp_li.h"
  23. #include "dp_li_tx.h"
  24. #include "dp_tx_desc.h"
  25. #include "dp_li_rx.h"
  26. #include "dp_peer.h"
  27. #include <wlan_utility.h>
  28. #include "dp_ipa.h"
  29. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  30. static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
  31. {.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
  32. /*
  33. * INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
  34. * as indicated by rbm id.
  35. */
  36. {1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
  37. {2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
  38. };
  39. #else
  40. static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
  41. {.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
  42. {1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
  43. {2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
  44. /*
  45. * Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
  46. * with the tx_mask in dp_service_srngs. Please be carefull while using
  47. * this table anywhere else.
  48. */
  49. {3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
  50. };
  51. #endif
  52. #ifdef IPA_WDI3_TX_TWO_PIPES
  53. static inline void
  54. dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
  55. {
  56. if (!cfg_ctx->ipa_enabled)
  57. return;
  58. cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
  59. cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
  60. HAL_LI_WBM_SW4_BM_ID;
  61. }
  62. #else
  63. static inline void
  64. dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
  65. {
  66. }
  67. #endif
  68. static void dp_soc_cfg_attach_li(struct dp_soc *soc)
  69. {
  70. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
  71. wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
  72. soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
  73. dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
  74. }
  75. qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
  76. {
  77. switch (context_type) {
  78. case DP_CONTEXT_TYPE_SOC:
  79. return sizeof(struct dp_soc_li);
  80. case DP_CONTEXT_TYPE_PDEV:
  81. return sizeof(struct dp_pdev_li);
  82. case DP_CONTEXT_TYPE_VDEV:
  83. return sizeof(struct dp_vdev_li);
  84. case DP_CONTEXT_TYPE_PEER:
  85. return sizeof(struct dp_peer_li);
  86. default:
  87. return 0;
  88. }
  89. }
  90. qdf_size_t dp_mon_get_context_size_li(enum dp_context_type context_type)
  91. {
  92. switch (context_type) {
  93. case DP_CONTEXT_TYPE_MON_PDEV:
  94. return sizeof(struct dp_mon_pdev_li);
  95. case DP_CONTEXT_TYPE_MON_SOC:
  96. return sizeof(struct dp_mon_soc_li);
  97. default:
  98. return 0;
  99. }
  100. }
  101. static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
  102. struct cdp_soc_attach_params *params)
  103. {
  104. soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
  105. return QDF_STATUS_SUCCESS;
  106. }
  107. static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
  108. {
  109. return QDF_STATUS_SUCCESS;
  110. }
  111. static QDF_STATUS dp_soc_init_li(struct dp_soc *soc)
  112. {
  113. return QDF_STATUS_SUCCESS;
  114. }
  115. static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
  116. {
  117. return QDF_STATUS_SUCCESS;
  118. }
  119. static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
  120. struct cdp_pdev_attach_params *params)
  121. {
  122. return QDF_STATUS_SUCCESS;
  123. }
  124. static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
  125. {
  126. return QDF_STATUS_SUCCESS;
  127. }
  128. static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
  129. {
  130. return QDF_STATUS_SUCCESS;
  131. }
  132. static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
  133. {
  134. return QDF_STATUS_SUCCESS;
  135. }
  136. #ifdef AST_OFFLOAD_ENABLE
  137. static void dp_peer_map_detach_li(struct dp_soc *soc)
  138. {
  139. dp_soc_wds_detach(soc);
  140. dp_peer_ast_table_detach(soc);
  141. dp_peer_ast_hash_detach(soc);
  142. dp_peer_mec_hash_detach(soc);
  143. }
  144. static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
  145. {
  146. QDF_STATUS status;
  147. soc->max_peer_id = soc->max_peers;
  148. status = dp_peer_ast_table_attach(soc);
  149. if (!QDF_IS_STATUS_SUCCESS(status))
  150. return status;
  151. status = dp_peer_ast_hash_attach(soc);
  152. if (!QDF_IS_STATUS_SUCCESS(status))
  153. goto ast_table_detach;
  154. status = dp_peer_mec_hash_attach(soc);
  155. if (!QDF_IS_STATUS_SUCCESS(status))
  156. goto hash_detach;
  157. dp_soc_wds_attach(soc);
  158. return QDF_STATUS_SUCCESS;
  159. hash_detach:
  160. dp_peer_ast_hash_detach(soc);
  161. ast_table_detach:
  162. dp_peer_ast_table_detach(soc);
  163. return status;
  164. }
  165. #else
  166. static void dp_peer_map_detach_li(struct dp_soc *soc)
  167. {
  168. }
  169. static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
  170. {
  171. soc->max_peer_id = soc->max_peers;
  172. return QDF_STATUS_SUCCESS;
  173. }
  174. #endif
  175. qdf_size_t dp_get_soc_context_size_li(void)
  176. {
  177. return sizeof(struct dp_soc);
  178. }
  179. #ifdef NO_RX_PKT_HDR_TLV
  180. /**
  181. * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
  182. * @soc: Common DP soc handle
  183. *
  184. * Return: QDF_STATUS
  185. */
  186. static QDF_STATUS
  187. dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
  188. {
  189. int i;
  190. int mac_id;
  191. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  192. struct dp_srng *rx_mac_srng;
  193. QDF_STATUS status = QDF_STATUS_SUCCESS;
  194. htt_tlv_filter.mpdu_start = 1;
  195. htt_tlv_filter.msdu_start = 1;
  196. htt_tlv_filter.mpdu_end = 1;
  197. htt_tlv_filter.msdu_end = 1;
  198. htt_tlv_filter.attention = 1;
  199. htt_tlv_filter.packet = 1;
  200. htt_tlv_filter.packet_header = 0;
  201. htt_tlv_filter.ppdu_start = 0;
  202. htt_tlv_filter.ppdu_end = 0;
  203. htt_tlv_filter.ppdu_end_user_stats = 0;
  204. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  205. htt_tlv_filter.ppdu_end_status_done = 0;
  206. htt_tlv_filter.enable_fp = 1;
  207. htt_tlv_filter.enable_md = 0;
  208. htt_tlv_filter.enable_md = 0;
  209. htt_tlv_filter.enable_mo = 0;
  210. htt_tlv_filter.fp_mgmt_filter = 0;
  211. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  212. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  213. FILTER_DATA_MCAST |
  214. FILTER_DATA_DATA);
  215. htt_tlv_filter.mo_mgmt_filter = 0;
  216. htt_tlv_filter.mo_ctrl_filter = 0;
  217. htt_tlv_filter.mo_data_filter = 0;
  218. htt_tlv_filter.md_data_filter = 0;
  219. htt_tlv_filter.offset_valid = true;
  220. htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
  221. /*Not subscribing rx_pkt_header*/
  222. htt_tlv_filter.rx_header_offset = 0;
  223. htt_tlv_filter.rx_mpdu_start_offset =
  224. hal_rx_mpdu_start_offset_get(soc->hal_soc);
  225. htt_tlv_filter.rx_mpdu_end_offset =
  226. hal_rx_mpdu_end_offset_get(soc->hal_soc);
  227. htt_tlv_filter.rx_msdu_start_offset =
  228. hal_rx_msdu_start_offset_get(soc->hal_soc);
  229. htt_tlv_filter.rx_msdu_end_offset =
  230. hal_rx_msdu_end_offset_get(soc->hal_soc);
  231. htt_tlv_filter.rx_attn_offset =
  232. hal_rx_attn_offset_get(soc->hal_soc);
  233. for (i = 0; i < MAX_PDEV_CNT; i++) {
  234. struct dp_pdev *pdev = soc->pdev_list[i];
  235. if (!pdev)
  236. continue;
  237. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  238. int mac_for_pdev =
  239. dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  240. /*
  241. * Obtain lmac id from pdev to access the LMAC ring
  242. * in soc context
  243. */
  244. int lmac_id =
  245. dp_get_lmac_id_for_pdev_id(soc, mac_id,
  246. pdev->pdev_id);
  247. rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
  248. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  249. rx_mac_srng->hal_srng,
  250. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  251. &htt_tlv_filter);
  252. }
  253. }
  254. return status;
  255. }
  256. #else
  257. static QDF_STATUS
  258. dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
  259. {
  260. int i;
  261. int mac_id;
  262. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  263. struct dp_srng *rx_mac_srng;
  264. QDF_STATUS status = QDF_STATUS_SUCCESS;
  265. htt_tlv_filter.mpdu_start = 1;
  266. htt_tlv_filter.msdu_start = 1;
  267. htt_tlv_filter.mpdu_end = 1;
  268. htt_tlv_filter.msdu_end = 1;
  269. htt_tlv_filter.attention = 1;
  270. htt_tlv_filter.packet = 1;
  271. htt_tlv_filter.packet_header = 1;
  272. htt_tlv_filter.ppdu_start = 0;
  273. htt_tlv_filter.ppdu_end = 0;
  274. htt_tlv_filter.ppdu_end_user_stats = 0;
  275. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  276. htt_tlv_filter.ppdu_end_status_done = 0;
  277. htt_tlv_filter.enable_fp = 1;
  278. htt_tlv_filter.enable_md = 0;
  279. htt_tlv_filter.enable_md = 0;
  280. htt_tlv_filter.enable_mo = 0;
  281. htt_tlv_filter.fp_mgmt_filter = 0;
  282. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  283. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  284. FILTER_DATA_MCAST |
  285. FILTER_DATA_DATA);
  286. htt_tlv_filter.mo_mgmt_filter = 0;
  287. htt_tlv_filter.mo_ctrl_filter = 0;
  288. htt_tlv_filter.mo_data_filter = 0;
  289. htt_tlv_filter.md_data_filter = 0;
  290. htt_tlv_filter.offset_valid = true;
  291. htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
  292. htt_tlv_filter.rx_header_offset =
  293. hal_rx_pkt_tlv_offset_get(soc->hal_soc);
  294. htt_tlv_filter.rx_mpdu_start_offset =
  295. hal_rx_mpdu_start_offset_get(soc->hal_soc);
  296. htt_tlv_filter.rx_mpdu_end_offset =
  297. hal_rx_mpdu_end_offset_get(soc->hal_soc);
  298. htt_tlv_filter.rx_msdu_start_offset =
  299. hal_rx_msdu_start_offset_get(soc->hal_soc);
  300. htt_tlv_filter.rx_msdu_end_offset =
  301. hal_rx_msdu_end_offset_get(soc->hal_soc);
  302. htt_tlv_filter.rx_attn_offset =
  303. hal_rx_attn_offset_get(soc->hal_soc);
  304. for (i = 0; i < MAX_PDEV_CNT; i++) {
  305. struct dp_pdev *pdev = soc->pdev_list[i];
  306. if (!pdev)
  307. continue;
  308. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  309. int mac_for_pdev =
  310. dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  311. /*
  312. * Obtain lmac id from pdev to access the LMAC ring
  313. * in soc context
  314. */
  315. int lmac_id =
  316. dp_get_lmac_id_for_pdev_id(soc, mac_id,
  317. pdev->pdev_id);
  318. rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
  319. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  320. rx_mac_srng->hal_srng,
  321. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  322. &htt_tlv_filter);
  323. }
  324. }
  325. return status;
  326. }
  327. #endif
  328. #ifdef QCA_DP_ENABLE_TX_COMP_RING4
  329. static inline
  330. void dp_deinit_txcomp_ring4(struct dp_soc *soc)
  331. {
  332. if (soc) {
  333. wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
  334. soc->tx_comp_ring[3].alloc_size,
  335. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
  336. "Transmit_completion_ring");
  337. dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
  338. }
  339. }
  340. static inline
  341. QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
  342. {
  343. if (soc) {
  344. if (dp_srng_init(soc, &soc->tx_comp_ring[3],
  345. WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
  346. dp_err("%pK: dp_srng_init failed for rx_rel_ring",
  347. soc);
  348. return QDF_STATUS_E_FAILURE;
  349. }
  350. wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
  351. soc->tx_comp_ring[3].alloc_size,
  352. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
  353. "Transmit_completion_ring");
  354. }
  355. return QDF_STATUS_SUCCESS;
  356. }
  357. static inline
  358. void dp_free_txcomp_ring4(struct dp_soc *soc)
  359. {
  360. if (soc)
  361. dp_srng_free(soc, &soc->tx_comp_ring[3]);
  362. }
  363. static inline
  364. QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
  365. uint32_t cached)
  366. {
  367. if (soc) {
  368. if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
  369. tx_comp_ring_size, cached)) {
  370. dp_err("dp_srng_alloc failed for tx_comp_ring");
  371. return QDF_STATUS_E_FAILURE;
  372. }
  373. }
  374. return QDF_STATUS_SUCCESS;
  375. }
  376. #else
  377. static inline
  378. void dp_deinit_txcomp_ring4(struct dp_soc *soc)
  379. {
  380. }
  381. static inline
  382. QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
  383. {
  384. return QDF_STATUS_SUCCESS;
  385. }
  386. static inline
  387. void dp_free_txcomp_ring4(struct dp_soc *soc)
  388. {
  389. }
  390. static inline
  391. QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
  392. uint32_t cached)
  393. {
  394. return QDF_STATUS_SUCCESS;
  395. }
  396. #endif
  397. static void dp_soc_srng_deinit_li(struct dp_soc *soc)
  398. {
  399. /* Tx Complete ring */
  400. dp_deinit_txcomp_ring4(soc);
  401. }
  402. static void dp_soc_srng_free_li(struct dp_soc *soc)
  403. {
  404. dp_free_txcomp_ring4(soc);
  405. }
  406. static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
  407. {
  408. uint32_t tx_comp_ring_size;
  409. uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
  410. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  411. soc_cfg_ctx = soc->wlan_cfg_ctx;
  412. tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  413. /* Disable cached desc if NSS offload is enabled */
  414. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
  415. cached = 0;
  416. if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
  417. goto fail1;
  418. return QDF_STATUS_SUCCESS;
  419. fail1:
  420. dp_soc_srng_free_li(soc);
  421. return QDF_STATUS_E_NOMEM;
  422. }
  423. static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
  424. {
  425. /* Tx comp ring 3 */
  426. if (dp_init_txcomp_ring4(soc))
  427. goto fail1;
  428. return QDF_STATUS_SUCCESS;
  429. fail1:
  430. /*
  431. * Cleanup will be done as part of soc_detach, which will
  432. * be called on pdev attach failure
  433. */
  434. dp_soc_srng_deinit_li(soc);
  435. return QDF_STATUS_E_FAILURE;
  436. }
  437. static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
  438. uint8_t tx_ring_id,
  439. uint8_t bm_id)
  440. {
  441. }
  442. static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
  443. struct dp_vdev *vdev,
  444. enum cdp_vdev_param_type param,
  445. cdp_config_param_type val)
  446. {
  447. return QDF_STATUS_SUCCESS;
  448. }
  449. bool
  450. dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  451. qdf_nbuf_t nbuf_copy,
  452. struct cdp_tid_rx_stats *tid_stats)
  453. {
  454. return false;
  455. }
  456. static struct dp_peer *dp_find_peer_by_destmac_li(struct dp_soc *soc,
  457. uint8_t *dest_mac,
  458. uint8_t vdev_id)
  459. {
  460. struct dp_peer *peer = NULL;
  461. struct dp_ast_entry *ast_entry = NULL;
  462. uint16_t peer_id;
  463. qdf_spin_lock_bh(&soc->ast_lock);
  464. ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, dest_mac, vdev_id);
  465. if (!ast_entry) {
  466. qdf_spin_unlock_bh(&soc->ast_lock);
  467. dp_err("NULL ast entry");
  468. return NULL;
  469. }
  470. peer_id = ast_entry->peer_id;
  471. qdf_spin_unlock_bh(&soc->ast_lock);
  472. if (peer_id == HTT_INVALID_PEER)
  473. return NULL;
  474. peer = dp_peer_get_ref_by_id(soc, peer_id,
  475. DP_MOD_ID_SAWF);
  476. return peer;
  477. }
  478. static void dp_get_rx_hash_key_li(struct dp_soc *soc,
  479. struct cdp_lro_hash_config *lro_hash)
  480. {
  481. dp_get_rx_hash_key_bytes(lro_hash);
  482. }
  483. void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
  484. {
  485. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  486. arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
  487. arch_ops->dp_rx_process = dp_rx_process_li;
  488. arch_ops->tx_comp_get_params_from_hal_desc =
  489. dp_tx_comp_get_params_from_hal_desc_li;
  490. arch_ops->dp_tx_process_htt_completion =
  491. dp_tx_process_htt_completion_li;
  492. arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
  493. dp_wbm_get_rx_desc_from_hal_desc_li;
  494. arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
  495. arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
  496. arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
  497. arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
  498. arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
  499. #else
  500. arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
  501. arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
  502. #endif
  503. arch_ops->txrx_get_context_size = dp_get_context_size_li;
  504. arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
  505. arch_ops->txrx_soc_attach = dp_soc_attach_li;
  506. arch_ops->txrx_soc_detach = dp_soc_detach_li;
  507. arch_ops->txrx_soc_init = dp_soc_init_li;
  508. arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
  509. arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
  510. arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
  511. arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
  512. arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
  513. arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
  514. arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
  515. arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
  516. arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
  517. arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
  518. arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
  519. arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
  520. arch_ops->dp_rx_desc_cookie_2_va =
  521. dp_rx_desc_cookie_2_va_li;
  522. arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_li;
  523. arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
  524. arch_ops->dp_rx_peer_metadata_peer_id_get =
  525. dp_rx_peer_metadata_peer_id_get_li;
  526. arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
  527. arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
  528. arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
  529. arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
  530. arch_ops->dp_peer_rx_reorder_queue_setup =
  531. dp_peer_rx_reorder_queue_setup_li;
  532. arch_ops->dp_find_peer_by_destmac = dp_find_peer_by_destmac_li;
  533. }
  534. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  535. void dp_tx_comp_get_prefetched_params_from_hal_desc(
  536. struct dp_soc *soc,
  537. void *tx_comp_hal_desc,
  538. struct dp_tx_desc_s **r_tx_desc)
  539. {
  540. uint8_t pool_id;
  541. uint32_t tx_desc_id;
  542. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  543. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  544. DP_TX_DESC_ID_POOL_OS;
  545. /* Find Tx descriptor */
  546. *r_tx_desc = dp_tx_desc_find(soc, pool_id,
  547. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  548. DP_TX_DESC_ID_PAGE_OS,
  549. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  550. DP_TX_DESC_ID_OFFSET_OS);
  551. qdf_prefetch((uint8_t *)*r_tx_desc);
  552. }
  553. #endif
  554. #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
  555. void dp_pkt_add_timestamp(struct dp_vdev *vdev,
  556. enum qdf_pkt_timestamp_index index, uint64_t time,
  557. qdf_nbuf_t nbuf)
  558. {
  559. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
  560. uint64_t tsf_time;
  561. if (vdev->get_tsf_time) {
  562. vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
  563. qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
  564. }
  565. }
  566. }
  567. void dp_pkt_get_timestamp(uint64_t *time)
  568. {
  569. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
  570. *time = qdf_get_log_timestamp();
  571. }
  572. #endif