dp_li.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_types.h"
  20. #include <dp_internal.h>
  21. #include <dp_htt.h>
  22. #include "dp_li.h"
  23. #include "dp_li_tx.h"
  24. #include "dp_tx_desc.h"
  25. #include "dp_li_rx.h"
  26. #include "dp_peer.h"
  27. #include <wlan_utility.h>
  28. #include "dp_ipa.h"
  29. #ifdef WIFI_MONITOR_SUPPORT
  30. #include <dp_mon_1.0.h>
  31. #endif
  32. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  33. static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
  34. {.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
  35. /*
  36. * INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
  37. * as indicated by rbm id.
  38. */
  39. {1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
  40. {2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
  41. };
  42. #else
  43. static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
  44. {.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
  45. {1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
  46. {2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
  47. /*
  48. * Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
  49. * with the tx_mask in dp_service_srngs. Please be careful while using
  50. * this table anywhere else.
  51. */
  52. {3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
  53. };
  54. #endif
  55. #ifdef IPA_WDI3_TX_TWO_PIPES
  56. static inline void
  57. dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
  58. {
  59. if (!cfg_ctx->ipa_enabled)
  60. return;
  61. cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
  62. cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
  63. HAL_LI_WBM_SW4_BM_ID;
  64. }
  65. #else
  66. static inline void
  67. dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
  68. {
  69. }
  70. #endif
  71. static void dp_soc_cfg_attach_li(struct dp_soc *soc)
  72. {
  73. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
  74. wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
  75. soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
  76. dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
  77. }
  78. qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
  79. {
  80. switch (context_type) {
  81. case DP_CONTEXT_TYPE_SOC:
  82. return sizeof(struct dp_soc_li);
  83. case DP_CONTEXT_TYPE_PDEV:
  84. return sizeof(struct dp_pdev_li);
  85. case DP_CONTEXT_TYPE_VDEV:
  86. return sizeof(struct dp_vdev_li);
  87. case DP_CONTEXT_TYPE_PEER:
  88. return sizeof(struct dp_peer_li);
  89. default:
  90. return 0;
  91. }
  92. }
  93. static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
  94. struct cdp_soc_attach_params *params)
  95. {
  96. soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
  97. return QDF_STATUS_SUCCESS;
  98. }
  99. static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
  100. {
  101. return QDF_STATUS_SUCCESS;
  102. }
  103. static QDF_STATUS dp_soc_init_li(struct dp_soc *soc)
  104. {
  105. return QDF_STATUS_SUCCESS;
  106. }
  107. static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
  108. {
  109. return QDF_STATUS_SUCCESS;
  110. }
  111. static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
  112. struct cdp_pdev_attach_params *params)
  113. {
  114. return QDF_STATUS_SUCCESS;
  115. }
  116. static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
  117. {
  118. return QDF_STATUS_SUCCESS;
  119. }
  120. static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
  121. {
  122. return QDF_STATUS_SUCCESS;
  123. }
  124. static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
  125. {
  126. return QDF_STATUS_SUCCESS;
  127. }
  128. #ifdef AST_OFFLOAD_ENABLE
  129. static void dp_peer_map_detach_li(struct dp_soc *soc)
  130. {
  131. dp_soc_wds_detach(soc);
  132. dp_peer_ast_table_detach(soc);
  133. dp_peer_ast_hash_detach(soc);
  134. dp_peer_mec_hash_detach(soc);
  135. }
  136. static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
  137. {
  138. QDF_STATUS status;
  139. soc->max_peer_id = soc->max_peers;
  140. status = dp_peer_ast_table_attach(soc);
  141. if (!QDF_IS_STATUS_SUCCESS(status))
  142. return status;
  143. status = dp_peer_ast_hash_attach(soc);
  144. if (!QDF_IS_STATUS_SUCCESS(status))
  145. goto ast_table_detach;
  146. status = dp_peer_mec_hash_attach(soc);
  147. if (!QDF_IS_STATUS_SUCCESS(status))
  148. goto hash_detach;
  149. dp_soc_wds_attach(soc);
  150. return QDF_STATUS_SUCCESS;
  151. hash_detach:
  152. dp_peer_ast_hash_detach(soc);
  153. ast_table_detach:
  154. dp_peer_ast_table_detach(soc);
  155. return status;
  156. }
  157. #else
  158. static void dp_peer_map_detach_li(struct dp_soc *soc)
  159. {
  160. }
  161. static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
  162. {
  163. soc->max_peer_id = soc->max_peers;
  164. return QDF_STATUS_SUCCESS;
  165. }
  166. #endif
  167. static QDF_STATUS dp_peer_setup_li(struct dp_soc *soc, struct dp_peer *peer)
  168. {
  169. return QDF_STATUS_SUCCESS;
  170. }
  171. qdf_size_t dp_get_soc_context_size_li(void)
  172. {
  173. return sizeof(struct dp_soc);
  174. }
  175. #ifdef NO_RX_PKT_HDR_TLV
  176. /**
  177. * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
  178. * @soc: Common DP soc handle
  179. *
  180. * Return: QDF_STATUS
  181. */
  182. static QDF_STATUS
  183. dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
  184. {
  185. int i;
  186. int mac_id;
  187. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  188. struct dp_srng *rx_mac_srng;
  189. QDF_STATUS status = QDF_STATUS_SUCCESS;
  190. htt_tlv_filter.mpdu_start = 1;
  191. htt_tlv_filter.msdu_start = 1;
  192. htt_tlv_filter.mpdu_end = 1;
  193. htt_tlv_filter.msdu_end = 1;
  194. htt_tlv_filter.attention = 1;
  195. htt_tlv_filter.packet = 1;
  196. htt_tlv_filter.packet_header = 0;
  197. htt_tlv_filter.ppdu_start = 0;
  198. htt_tlv_filter.ppdu_end = 0;
  199. htt_tlv_filter.ppdu_end_user_stats = 0;
  200. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  201. htt_tlv_filter.ppdu_end_status_done = 0;
  202. htt_tlv_filter.enable_fp = 1;
  203. htt_tlv_filter.enable_md = 0;
  204. htt_tlv_filter.enable_md = 0;
  205. htt_tlv_filter.enable_mo = 0;
  206. htt_tlv_filter.fp_mgmt_filter = 0;
  207. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  208. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  209. FILTER_DATA_MCAST |
  210. FILTER_DATA_DATA);
  211. htt_tlv_filter.mo_mgmt_filter = 0;
  212. htt_tlv_filter.mo_ctrl_filter = 0;
  213. htt_tlv_filter.mo_data_filter = 0;
  214. htt_tlv_filter.md_data_filter = 0;
  215. htt_tlv_filter.offset_valid = true;
  216. htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
  217. /*Not subscribing rx_pkt_header*/
  218. htt_tlv_filter.rx_header_offset = 0;
  219. htt_tlv_filter.rx_mpdu_start_offset =
  220. hal_rx_mpdu_start_offset_get(soc->hal_soc);
  221. htt_tlv_filter.rx_mpdu_end_offset =
  222. hal_rx_mpdu_end_offset_get(soc->hal_soc);
  223. htt_tlv_filter.rx_msdu_start_offset =
  224. hal_rx_msdu_start_offset_get(soc->hal_soc);
  225. htt_tlv_filter.rx_msdu_end_offset =
  226. hal_rx_msdu_end_offset_get(soc->hal_soc);
  227. htt_tlv_filter.rx_attn_offset =
  228. hal_rx_attn_offset_get(soc->hal_soc);
  229. for (i = 0; i < MAX_PDEV_CNT; i++) {
  230. struct dp_pdev *pdev = soc->pdev_list[i];
  231. if (!pdev)
  232. continue;
  233. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  234. int mac_for_pdev =
  235. dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  236. /*
  237. * Obtain lmac id from pdev to access the LMAC ring
  238. * in soc context
  239. */
  240. int lmac_id =
  241. dp_get_lmac_id_for_pdev_id(soc, mac_id,
  242. pdev->pdev_id);
  243. rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
  244. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  245. rx_mac_srng->hal_srng,
  246. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  247. &htt_tlv_filter);
  248. }
  249. }
  250. return status;
  251. }
  252. #else
  253. static QDF_STATUS
  254. dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
  255. {
  256. int i;
  257. int mac_id;
  258. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  259. struct dp_srng *rx_mac_srng;
  260. QDF_STATUS status = QDF_STATUS_SUCCESS;
  261. htt_tlv_filter.mpdu_start = 1;
  262. htt_tlv_filter.msdu_start = 1;
  263. htt_tlv_filter.mpdu_end = 1;
  264. htt_tlv_filter.msdu_end = 1;
  265. htt_tlv_filter.attention = 1;
  266. htt_tlv_filter.packet = 1;
  267. htt_tlv_filter.packet_header = 1;
  268. htt_tlv_filter.ppdu_start = 0;
  269. htt_tlv_filter.ppdu_end = 0;
  270. htt_tlv_filter.ppdu_end_user_stats = 0;
  271. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  272. htt_tlv_filter.ppdu_end_status_done = 0;
  273. htt_tlv_filter.enable_fp = 1;
  274. htt_tlv_filter.enable_md = 0;
  275. htt_tlv_filter.enable_md = 0;
  276. htt_tlv_filter.enable_mo = 0;
  277. htt_tlv_filter.fp_mgmt_filter = 0;
  278. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  279. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  280. FILTER_DATA_MCAST |
  281. FILTER_DATA_DATA);
  282. htt_tlv_filter.mo_mgmt_filter = 0;
  283. htt_tlv_filter.mo_ctrl_filter = 0;
  284. htt_tlv_filter.mo_data_filter = 0;
  285. htt_tlv_filter.md_data_filter = 0;
  286. htt_tlv_filter.offset_valid = true;
  287. htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
  288. htt_tlv_filter.rx_header_offset =
  289. hal_rx_pkt_tlv_offset_get(soc->hal_soc);
  290. htt_tlv_filter.rx_mpdu_start_offset =
  291. hal_rx_mpdu_start_offset_get(soc->hal_soc);
  292. htt_tlv_filter.rx_mpdu_end_offset =
  293. hal_rx_mpdu_end_offset_get(soc->hal_soc);
  294. htt_tlv_filter.rx_msdu_start_offset =
  295. hal_rx_msdu_start_offset_get(soc->hal_soc);
  296. htt_tlv_filter.rx_msdu_end_offset =
  297. hal_rx_msdu_end_offset_get(soc->hal_soc);
  298. htt_tlv_filter.rx_attn_offset =
  299. hal_rx_attn_offset_get(soc->hal_soc);
  300. for (i = 0; i < MAX_PDEV_CNT; i++) {
  301. struct dp_pdev *pdev = soc->pdev_list[i];
  302. if (!pdev)
  303. continue;
  304. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  305. int mac_for_pdev =
  306. dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  307. /*
  308. * Obtain lmac id from pdev to access the LMAC ring
  309. * in soc context
  310. */
  311. int lmac_id =
  312. dp_get_lmac_id_for_pdev_id(soc, mac_id,
  313. pdev->pdev_id);
  314. rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
  315. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  316. rx_mac_srng->hal_srng,
  317. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  318. &htt_tlv_filter);
  319. }
  320. }
  321. return status;
  322. }
  323. #endif
  324. #ifdef QCA_DP_ENABLE_TX_COMP_RING4
  325. static inline
  326. void dp_deinit_txcomp_ring4(struct dp_soc *soc)
  327. {
  328. if (soc) {
  329. wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
  330. soc->tx_comp_ring[3].alloc_size,
  331. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
  332. "Transmit_completion_ring");
  333. dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
  334. }
  335. }
  336. static inline
  337. QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
  338. {
  339. if (soc) {
  340. if (dp_srng_init(soc, &soc->tx_comp_ring[3],
  341. WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
  342. dp_err("%pK: dp_srng_init failed for rx_rel_ring",
  343. soc);
  344. return QDF_STATUS_E_FAILURE;
  345. }
  346. wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
  347. soc->tx_comp_ring[3].alloc_size,
  348. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
  349. "Transmit_completion_ring");
  350. }
  351. return QDF_STATUS_SUCCESS;
  352. }
  353. static inline
  354. void dp_free_txcomp_ring4(struct dp_soc *soc)
  355. {
  356. if (soc)
  357. dp_srng_free(soc, &soc->tx_comp_ring[3]);
  358. }
  359. static inline
  360. QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
  361. uint32_t cached)
  362. {
  363. if (soc) {
  364. if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
  365. tx_comp_ring_size, cached)) {
  366. dp_err("dp_srng_alloc failed for tx_comp_ring");
  367. return QDF_STATUS_E_FAILURE;
  368. }
  369. }
  370. return QDF_STATUS_SUCCESS;
  371. }
  372. #else
  373. static inline
  374. void dp_deinit_txcomp_ring4(struct dp_soc *soc)
  375. {
  376. }
  377. static inline
  378. QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
  379. {
  380. return QDF_STATUS_SUCCESS;
  381. }
  382. static inline
  383. void dp_free_txcomp_ring4(struct dp_soc *soc)
  384. {
  385. }
  386. static inline
  387. QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
  388. uint32_t cached)
  389. {
  390. return QDF_STATUS_SUCCESS;
  391. }
  392. #endif
  393. static void dp_soc_srng_deinit_li(struct dp_soc *soc)
  394. {
  395. /* Tx Complete ring */
  396. dp_deinit_txcomp_ring4(soc);
  397. }
  398. static void dp_soc_srng_free_li(struct dp_soc *soc)
  399. {
  400. dp_free_txcomp_ring4(soc);
  401. }
  402. static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
  403. {
  404. uint32_t tx_comp_ring_size;
  405. uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
  406. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  407. soc_cfg_ctx = soc->wlan_cfg_ctx;
  408. tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  409. /* Disable cached desc if NSS offload is enabled */
  410. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
  411. cached = 0;
  412. if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
  413. goto fail1;
  414. return QDF_STATUS_SUCCESS;
  415. fail1:
  416. dp_soc_srng_free_li(soc);
  417. return QDF_STATUS_E_NOMEM;
  418. }
  419. static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
  420. {
  421. /* Tx comp ring 3 */
  422. if (dp_init_txcomp_ring4(soc))
  423. goto fail1;
  424. return QDF_STATUS_SUCCESS;
  425. fail1:
  426. /*
  427. * Cleanup will be done as part of soc_detach, which will
  428. * be called on pdev attach failure
  429. */
  430. dp_soc_srng_deinit_li(soc);
  431. return QDF_STATUS_E_FAILURE;
  432. }
  433. static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
  434. uint8_t tx_ring_id,
  435. uint8_t bm_id)
  436. {
  437. }
  438. static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
  439. struct dp_vdev *vdev,
  440. enum cdp_vdev_param_type param,
  441. cdp_config_param_type val)
  442. {
  443. return QDF_STATUS_SUCCESS;
  444. }
  445. bool
  446. dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  447. qdf_nbuf_t nbuf_copy,
  448. struct cdp_tid_rx_stats *tid_stats,
  449. uint8_t link_id)
  450. {
  451. return false;
  452. }
  453. static void dp_rx_word_mask_subscribe_li(struct dp_soc *soc,
  454. uint32_t *msg_word,
  455. void *rx_filter)
  456. {
  457. }
  458. static struct dp_peer *dp_find_peer_by_destmac_li(struct dp_soc *soc,
  459. uint8_t *dest_mac,
  460. uint8_t vdev_id)
  461. {
  462. struct dp_peer *peer = NULL;
  463. struct dp_ast_entry *ast_entry = NULL;
  464. uint16_t peer_id;
  465. qdf_spin_lock_bh(&soc->ast_lock);
  466. ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, dest_mac, vdev_id);
  467. if (!ast_entry) {
  468. qdf_spin_unlock_bh(&soc->ast_lock);
  469. dp_err("NULL ast entry");
  470. return NULL;
  471. }
  472. peer_id = ast_entry->peer_id;
  473. qdf_spin_unlock_bh(&soc->ast_lock);
  474. if (peer_id == HTT_INVALID_PEER)
  475. return NULL;
  476. peer = dp_peer_get_ref_by_id(soc, peer_id,
  477. DP_MOD_ID_SAWF);
  478. return peer;
  479. }
  480. static void dp_get_rx_hash_key_li(struct dp_soc *soc,
  481. struct cdp_lro_hash_config *lro_hash)
  482. {
  483. dp_get_rx_hash_key_bytes(lro_hash);
  484. }
  485. static void dp_peer_get_reo_hash_li(struct dp_vdev *vdev,
  486. struct cdp_peer_setup_info *setup_info,
  487. enum cdp_host_reo_dest_ring *reo_dest,
  488. bool *hash_based,
  489. uint8_t *lmac_peer_id_msb)
  490. {
  491. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  492. }
  493. static bool dp_reo_remap_config_li(struct dp_soc *soc,
  494. uint32_t *remap0,
  495. uint32_t *remap1,
  496. uint32_t *remap2)
  497. {
  498. return dp_reo_remap_config(soc, remap0, remap1, remap2);
  499. }
  500. static struct dp_soc *dp_rx_replensih_soc_get_li(struct dp_soc *soc,
  501. uint8_t chip_id)
  502. {
  503. return soc;
  504. }
  505. static uint8_t dp_soc_get_num_soc_li(struct dp_soc *soc)
  506. {
  507. return 1;
  508. }
  509. static QDF_STATUS dp_txrx_get_vdev_mcast_param_li(struct dp_soc *soc,
  510. struct dp_vdev *vdev,
  511. cdp_config_param_type *val)
  512. {
  513. return QDF_STATUS_SUCCESS;
  514. }
  515. void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
  516. {
  517. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  518. arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
  519. arch_ops->dp_rx_process = dp_rx_process_li;
  520. arch_ops->dp_tx_send_fast = dp_tx_send;
  521. arch_ops->tx_comp_get_params_from_hal_desc =
  522. dp_tx_comp_get_params_from_hal_desc_li;
  523. arch_ops->dp_tx_process_htt_completion =
  524. dp_tx_process_htt_completion_li;
  525. arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
  526. dp_wbm_get_rx_desc_from_hal_desc_li;
  527. arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
  528. arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
  529. arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
  530. arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
  531. arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
  532. arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_li;
  533. arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_li;
  534. arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_li;
  535. #else
  536. arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
  537. arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
  538. #endif
  539. arch_ops->txrx_get_context_size = dp_get_context_size_li;
  540. #ifdef WIFI_MONITOR_SUPPORT
  541. arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
  542. #endif
  543. arch_ops->txrx_soc_attach = dp_soc_attach_li;
  544. arch_ops->txrx_soc_detach = dp_soc_detach_li;
  545. arch_ops->txrx_soc_init = dp_soc_init_li;
  546. arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
  547. arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
  548. arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
  549. arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
  550. arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
  551. arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
  552. arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
  553. arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
  554. arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
  555. arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
  556. arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
  557. arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
  558. arch_ops->dp_set_rx_fst = NULL;
  559. arch_ops->dp_get_rx_fst = NULL;
  560. arch_ops->dp_rx_fst_ref = NULL;
  561. arch_ops->dp_rx_fst_deref = NULL;
  562. arch_ops->txrx_peer_setup = dp_peer_setup_li;
  563. arch_ops->dp_rx_desc_cookie_2_va =
  564. dp_rx_desc_cookie_2_va_li;
  565. arch_ops->dp_rx_intrabss_mcast_handler =
  566. dp_rx_intrabss_handle_nawds_li;
  567. arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_li;
  568. arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
  569. arch_ops->dp_rx_peer_metadata_peer_id_get =
  570. dp_rx_peer_metadata_peer_id_get_li;
  571. arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
  572. arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
  573. arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
  574. arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
  575. arch_ops->dp_peer_rx_reorder_queue_setup =
  576. dp_peer_rx_reorder_queue_setup_li;
  577. arch_ops->dp_find_peer_by_destmac = dp_find_peer_by_destmac_li;
  578. arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_li;
  579. arch_ops->reo_remap_config = dp_reo_remap_config_li;
  580. arch_ops->dp_rx_replenish_soc_get = dp_rx_replensih_soc_get_li;
  581. arch_ops->dp_soc_get_num_soc = dp_soc_get_num_soc_li;
  582. arch_ops->get_reo_qdesc_addr = dp_rx_get_reo_qdesc_addr_li;
  583. arch_ops->txrx_get_vdev_mcast_param = dp_txrx_get_vdev_mcast_param_li;
  584. }
  585. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  586. void dp_tx_comp_get_prefetched_params_from_hal_desc(
  587. struct dp_soc *soc,
  588. void *tx_comp_hal_desc,
  589. struct dp_tx_desc_s **r_tx_desc)
  590. {
  591. uint8_t pool_id;
  592. uint32_t tx_desc_id;
  593. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  594. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  595. DP_TX_DESC_ID_POOL_OS;
  596. /* Find Tx descriptor */
  597. *r_tx_desc = dp_tx_desc_find(soc, pool_id,
  598. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  599. DP_TX_DESC_ID_PAGE_OS,
  600. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  601. DP_TX_DESC_ID_OFFSET_OS);
  602. qdf_prefetch((uint8_t *)*r_tx_desc);
  603. }
  604. #endif