dp_li.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_types.h"
  20. #include <dp_internal.h>
  21. #include <dp_htt.h>
  22. #include "dp_li.h"
  23. #include "dp_li_tx.h"
  24. #include "dp_tx_desc.h"
  25. #include "dp_li_rx.h"
  26. #include "dp_peer.h"
  27. #include <wlan_utility.h>
  28. #include "dp_ipa.h"
  29. #ifdef WIFI_MONITOR_SUPPORT
  30. #include <dp_mon_1.0.h>
  31. #endif
  32. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  33. static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
  34. {.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
  35. /*
  36. * INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
  37. * as indicated by rbm id.
  38. */
  39. {1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
  40. {2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
  41. };
  42. #else
  43. static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
  44. {.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
  45. {1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
  46. {2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
  47. /*
  48. * Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
  49. * with the tx_mask in dp_service_srngs. Please be careful while using
  50. * this table anywhere else.
  51. */
  52. {3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
  53. };
  54. #endif
  55. #ifdef IPA_WDI3_TX_TWO_PIPES
  56. static inline void
  57. dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
  58. {
  59. if (!cfg_ctx->ipa_enabled)
  60. return;
  61. cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
  62. cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
  63. HAL_LI_WBM_SW4_BM_ID;
  64. }
  65. #else
  66. static inline void
  67. dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
  68. {
  69. }
  70. #endif
  71. static void dp_soc_cfg_attach_li(struct dp_soc *soc)
  72. {
  73. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
  74. wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
  75. soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
  76. dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
  77. }
  78. qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
  79. {
  80. switch (context_type) {
  81. case DP_CONTEXT_TYPE_SOC:
  82. return sizeof(struct dp_soc_li);
  83. case DP_CONTEXT_TYPE_PDEV:
  84. return sizeof(struct dp_pdev_li);
  85. case DP_CONTEXT_TYPE_VDEV:
  86. return sizeof(struct dp_vdev_li);
  87. case DP_CONTEXT_TYPE_PEER:
  88. return sizeof(struct dp_peer_li);
  89. default:
  90. return 0;
  91. }
  92. }
  93. static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
  94. struct cdp_soc_attach_params *params)
  95. {
  96. soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
  97. return QDF_STATUS_SUCCESS;
  98. }
  99. static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
  100. {
  101. return QDF_STATUS_SUCCESS;
  102. }
  103. static QDF_STATUS dp_soc_init_li(struct dp_soc *soc)
  104. {
  105. return QDF_STATUS_SUCCESS;
  106. }
  107. static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
  108. {
  109. return QDF_STATUS_SUCCESS;
  110. }
  111. static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
  112. struct cdp_pdev_attach_params *params)
  113. {
  114. return QDF_STATUS_SUCCESS;
  115. }
  116. static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
  117. {
  118. return QDF_STATUS_SUCCESS;
  119. }
  120. static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
  121. {
  122. return QDF_STATUS_SUCCESS;
  123. }
  124. static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
  125. {
  126. return QDF_STATUS_SUCCESS;
  127. }
  128. #ifdef AST_OFFLOAD_ENABLE
  129. static void dp_peer_map_detach_li(struct dp_soc *soc)
  130. {
  131. dp_soc_wds_detach(soc);
  132. dp_peer_ast_table_detach(soc);
  133. dp_peer_ast_hash_detach(soc);
  134. dp_peer_mec_hash_detach(soc);
  135. }
  136. static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
  137. {
  138. QDF_STATUS status;
  139. soc->max_peer_id = soc->max_peers;
  140. status = dp_peer_ast_table_attach(soc);
  141. if (!QDF_IS_STATUS_SUCCESS(status))
  142. return status;
  143. status = dp_peer_ast_hash_attach(soc);
  144. if (!QDF_IS_STATUS_SUCCESS(status))
  145. goto ast_table_detach;
  146. status = dp_peer_mec_hash_attach(soc);
  147. if (!QDF_IS_STATUS_SUCCESS(status))
  148. goto hash_detach;
  149. dp_soc_wds_attach(soc);
  150. return QDF_STATUS_SUCCESS;
  151. hash_detach:
  152. dp_peer_ast_hash_detach(soc);
  153. ast_table_detach:
  154. dp_peer_ast_table_detach(soc);
  155. return status;
  156. }
  157. #else
  158. static void dp_peer_map_detach_li(struct dp_soc *soc)
  159. {
  160. }
  161. static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
  162. {
  163. soc->max_peer_id = soc->max_peers;
  164. return QDF_STATUS_SUCCESS;
  165. }
  166. #endif
  167. qdf_size_t dp_get_soc_context_size_li(void)
  168. {
  169. return sizeof(struct dp_soc);
  170. }
  171. #ifdef NO_RX_PKT_HDR_TLV
  172. /**
  173. * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
  174. * @soc: Common DP soc handle
  175. *
  176. * Return: QDF_STATUS
  177. */
  178. static QDF_STATUS
  179. dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
  180. {
  181. int i;
  182. int mac_id;
  183. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  184. struct dp_srng *rx_mac_srng;
  185. QDF_STATUS status = QDF_STATUS_SUCCESS;
  186. htt_tlv_filter.mpdu_start = 1;
  187. htt_tlv_filter.msdu_start = 1;
  188. htt_tlv_filter.mpdu_end = 1;
  189. htt_tlv_filter.msdu_end = 1;
  190. htt_tlv_filter.attention = 1;
  191. htt_tlv_filter.packet = 1;
  192. htt_tlv_filter.packet_header = 0;
  193. htt_tlv_filter.ppdu_start = 0;
  194. htt_tlv_filter.ppdu_end = 0;
  195. htt_tlv_filter.ppdu_end_user_stats = 0;
  196. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  197. htt_tlv_filter.ppdu_end_status_done = 0;
  198. htt_tlv_filter.enable_fp = 1;
  199. htt_tlv_filter.enable_md = 0;
  200. htt_tlv_filter.enable_md = 0;
  201. htt_tlv_filter.enable_mo = 0;
  202. htt_tlv_filter.fp_mgmt_filter = 0;
  203. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  204. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  205. FILTER_DATA_MCAST |
  206. FILTER_DATA_DATA);
  207. htt_tlv_filter.mo_mgmt_filter = 0;
  208. htt_tlv_filter.mo_ctrl_filter = 0;
  209. htt_tlv_filter.mo_data_filter = 0;
  210. htt_tlv_filter.md_data_filter = 0;
  211. htt_tlv_filter.offset_valid = true;
  212. htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
  213. /*Not subscribing rx_pkt_header*/
  214. htt_tlv_filter.rx_header_offset = 0;
  215. htt_tlv_filter.rx_mpdu_start_offset =
  216. hal_rx_mpdu_start_offset_get(soc->hal_soc);
  217. htt_tlv_filter.rx_mpdu_end_offset =
  218. hal_rx_mpdu_end_offset_get(soc->hal_soc);
  219. htt_tlv_filter.rx_msdu_start_offset =
  220. hal_rx_msdu_start_offset_get(soc->hal_soc);
  221. htt_tlv_filter.rx_msdu_end_offset =
  222. hal_rx_msdu_end_offset_get(soc->hal_soc);
  223. htt_tlv_filter.rx_attn_offset =
  224. hal_rx_attn_offset_get(soc->hal_soc);
  225. for (i = 0; i < MAX_PDEV_CNT; i++) {
  226. struct dp_pdev *pdev = soc->pdev_list[i];
  227. if (!pdev)
  228. continue;
  229. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  230. int mac_for_pdev =
  231. dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  232. /*
  233. * Obtain lmac id from pdev to access the LMAC ring
  234. * in soc context
  235. */
  236. int lmac_id =
  237. dp_get_lmac_id_for_pdev_id(soc, mac_id,
  238. pdev->pdev_id);
  239. rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
  240. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  241. rx_mac_srng->hal_srng,
  242. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  243. &htt_tlv_filter);
  244. }
  245. }
  246. return status;
  247. }
  248. #else
  249. static QDF_STATUS
  250. dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
  251. {
  252. int i;
  253. int mac_id;
  254. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  255. struct dp_srng *rx_mac_srng;
  256. QDF_STATUS status = QDF_STATUS_SUCCESS;
  257. htt_tlv_filter.mpdu_start = 1;
  258. htt_tlv_filter.msdu_start = 1;
  259. htt_tlv_filter.mpdu_end = 1;
  260. htt_tlv_filter.msdu_end = 1;
  261. htt_tlv_filter.attention = 1;
  262. htt_tlv_filter.packet = 1;
  263. htt_tlv_filter.packet_header = 1;
  264. htt_tlv_filter.ppdu_start = 0;
  265. htt_tlv_filter.ppdu_end = 0;
  266. htt_tlv_filter.ppdu_end_user_stats = 0;
  267. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  268. htt_tlv_filter.ppdu_end_status_done = 0;
  269. htt_tlv_filter.enable_fp = 1;
  270. htt_tlv_filter.enable_md = 0;
  271. htt_tlv_filter.enable_md = 0;
  272. htt_tlv_filter.enable_mo = 0;
  273. htt_tlv_filter.fp_mgmt_filter = 0;
  274. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  275. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  276. FILTER_DATA_MCAST |
  277. FILTER_DATA_DATA);
  278. htt_tlv_filter.mo_mgmt_filter = 0;
  279. htt_tlv_filter.mo_ctrl_filter = 0;
  280. htt_tlv_filter.mo_data_filter = 0;
  281. htt_tlv_filter.md_data_filter = 0;
  282. htt_tlv_filter.offset_valid = true;
  283. htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
  284. htt_tlv_filter.rx_header_offset =
  285. hal_rx_pkt_tlv_offset_get(soc->hal_soc);
  286. htt_tlv_filter.rx_mpdu_start_offset =
  287. hal_rx_mpdu_start_offset_get(soc->hal_soc);
  288. htt_tlv_filter.rx_mpdu_end_offset =
  289. hal_rx_mpdu_end_offset_get(soc->hal_soc);
  290. htt_tlv_filter.rx_msdu_start_offset =
  291. hal_rx_msdu_start_offset_get(soc->hal_soc);
  292. htt_tlv_filter.rx_msdu_end_offset =
  293. hal_rx_msdu_end_offset_get(soc->hal_soc);
  294. htt_tlv_filter.rx_attn_offset =
  295. hal_rx_attn_offset_get(soc->hal_soc);
  296. for (i = 0; i < MAX_PDEV_CNT; i++) {
  297. struct dp_pdev *pdev = soc->pdev_list[i];
  298. if (!pdev)
  299. continue;
  300. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  301. int mac_for_pdev =
  302. dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  303. /*
  304. * Obtain lmac id from pdev to access the LMAC ring
  305. * in soc context
  306. */
  307. int lmac_id =
  308. dp_get_lmac_id_for_pdev_id(soc, mac_id,
  309. pdev->pdev_id);
  310. rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
  311. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  312. rx_mac_srng->hal_srng,
  313. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  314. &htt_tlv_filter);
  315. }
  316. }
  317. return status;
  318. }
  319. #endif
  320. #ifdef QCA_DP_ENABLE_TX_COMP_RING4
  321. static inline
  322. void dp_deinit_txcomp_ring4(struct dp_soc *soc)
  323. {
  324. if (soc) {
  325. wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
  326. soc->tx_comp_ring[3].alloc_size,
  327. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
  328. "Transmit_completion_ring");
  329. dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
  330. }
  331. }
  332. static inline
  333. QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
  334. {
  335. if (soc) {
  336. if (dp_srng_init(soc, &soc->tx_comp_ring[3],
  337. WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
  338. dp_err("%pK: dp_srng_init failed for rx_rel_ring",
  339. soc);
  340. return QDF_STATUS_E_FAILURE;
  341. }
  342. wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
  343. soc->tx_comp_ring[3].alloc_size,
  344. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
  345. "Transmit_completion_ring");
  346. }
  347. return QDF_STATUS_SUCCESS;
  348. }
  349. static inline
  350. void dp_free_txcomp_ring4(struct dp_soc *soc)
  351. {
  352. if (soc)
  353. dp_srng_free(soc, &soc->tx_comp_ring[3]);
  354. }
  355. static inline
  356. QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
  357. uint32_t cached)
  358. {
  359. if (soc) {
  360. if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
  361. tx_comp_ring_size, cached)) {
  362. dp_err("dp_srng_alloc failed for tx_comp_ring");
  363. return QDF_STATUS_E_FAILURE;
  364. }
  365. }
  366. return QDF_STATUS_SUCCESS;
  367. }
  368. #else
  369. static inline
  370. void dp_deinit_txcomp_ring4(struct dp_soc *soc)
  371. {
  372. }
  373. static inline
  374. QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
  375. {
  376. return QDF_STATUS_SUCCESS;
  377. }
  378. static inline
  379. void dp_free_txcomp_ring4(struct dp_soc *soc)
  380. {
  381. }
  382. static inline
  383. QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
  384. uint32_t cached)
  385. {
  386. return QDF_STATUS_SUCCESS;
  387. }
  388. #endif
  389. static void dp_soc_srng_deinit_li(struct dp_soc *soc)
  390. {
  391. /* Tx Complete ring */
  392. dp_deinit_txcomp_ring4(soc);
  393. }
  394. static void dp_soc_srng_free_li(struct dp_soc *soc)
  395. {
  396. dp_free_txcomp_ring4(soc);
  397. }
  398. static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
  399. {
  400. uint32_t tx_comp_ring_size;
  401. uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
  402. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  403. soc_cfg_ctx = soc->wlan_cfg_ctx;
  404. tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  405. /* Disable cached desc if NSS offload is enabled */
  406. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
  407. cached = 0;
  408. if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
  409. goto fail1;
  410. return QDF_STATUS_SUCCESS;
  411. fail1:
  412. dp_soc_srng_free_li(soc);
  413. return QDF_STATUS_E_NOMEM;
  414. }
  415. static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
  416. {
  417. /* Tx comp ring 3 */
  418. if (dp_init_txcomp_ring4(soc))
  419. goto fail1;
  420. return QDF_STATUS_SUCCESS;
  421. fail1:
  422. /*
  423. * Cleanup will be done as part of soc_detach, which will
  424. * be called on pdev attach failure
  425. */
  426. dp_soc_srng_deinit_li(soc);
  427. return QDF_STATUS_E_FAILURE;
  428. }
  429. static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
  430. uint8_t tx_ring_id,
  431. uint8_t bm_id)
  432. {
  433. }
  434. static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
  435. struct dp_vdev *vdev,
  436. enum cdp_vdev_param_type param,
  437. cdp_config_param_type val)
  438. {
  439. return QDF_STATUS_SUCCESS;
  440. }
  441. bool
  442. dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  443. qdf_nbuf_t nbuf_copy,
  444. struct cdp_tid_rx_stats *tid_stats)
  445. {
  446. return false;
  447. }
  448. static void dp_rx_word_mask_subscribe_li(struct dp_soc *soc,
  449. uint32_t *msg_word,
  450. void *rx_filter)
  451. {
  452. }
  453. static struct dp_peer *dp_find_peer_by_destmac_li(struct dp_soc *soc,
  454. uint8_t *dest_mac,
  455. uint8_t vdev_id)
  456. {
  457. struct dp_peer *peer = NULL;
  458. struct dp_ast_entry *ast_entry = NULL;
  459. uint16_t peer_id;
  460. qdf_spin_lock_bh(&soc->ast_lock);
  461. ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, dest_mac, vdev_id);
  462. if (!ast_entry) {
  463. qdf_spin_unlock_bh(&soc->ast_lock);
  464. dp_err("NULL ast entry");
  465. return NULL;
  466. }
  467. peer_id = ast_entry->peer_id;
  468. qdf_spin_unlock_bh(&soc->ast_lock);
  469. if (peer_id == HTT_INVALID_PEER)
  470. return NULL;
  471. peer = dp_peer_get_ref_by_id(soc, peer_id,
  472. DP_MOD_ID_SAWF);
  473. return peer;
  474. }
  475. static void dp_get_rx_hash_key_li(struct dp_soc *soc,
  476. struct cdp_lro_hash_config *lro_hash)
  477. {
  478. dp_get_rx_hash_key_bytes(lro_hash);
  479. }
  480. static void dp_peer_get_reo_hash_li(struct dp_vdev *vdev,
  481. struct cdp_peer_setup_info *setup_info,
  482. enum cdp_host_reo_dest_ring *reo_dest,
  483. bool *hash_based,
  484. uint8_t *lmac_peer_id_msb)
  485. {
  486. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  487. }
  488. static bool dp_reo_remap_config_li(struct dp_soc *soc,
  489. uint32_t *remap0,
  490. uint32_t *remap1,
  491. uint32_t *remap2)
  492. {
  493. return dp_reo_remap_config(soc, remap0, remap1, remap2);
  494. }
  495. void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
  496. {
  497. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  498. arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
  499. arch_ops->dp_rx_process = dp_rx_process_li;
  500. arch_ops->dp_tx_send_fast = dp_tx_send;
  501. arch_ops->tx_comp_get_params_from_hal_desc =
  502. dp_tx_comp_get_params_from_hal_desc_li;
  503. arch_ops->dp_tx_process_htt_completion =
  504. dp_tx_process_htt_completion_li;
  505. arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
  506. dp_wbm_get_rx_desc_from_hal_desc_li;
  507. arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
  508. arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
  509. arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
  510. arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
  511. arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
  512. arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_li;
  513. #else
  514. arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
  515. arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
  516. #endif
  517. arch_ops->txrx_get_context_size = dp_get_context_size_li;
  518. #ifdef WIFI_MONITOR_SUPPORT
  519. arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
  520. #endif
  521. arch_ops->txrx_soc_attach = dp_soc_attach_li;
  522. arch_ops->txrx_soc_detach = dp_soc_detach_li;
  523. arch_ops->txrx_soc_init = dp_soc_init_li;
  524. arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
  525. arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
  526. arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
  527. arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
  528. arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
  529. arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
  530. arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
  531. arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
  532. arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
  533. arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
  534. arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
  535. arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
  536. arch_ops->dp_rx_desc_cookie_2_va =
  537. dp_rx_desc_cookie_2_va_li;
  538. arch_ops->dp_rx_intrabss_handle_nawds = dp_rx_intrabss_handle_nawds_li;
  539. arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_li;
  540. arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
  541. arch_ops->dp_rx_peer_metadata_peer_id_get =
  542. dp_rx_peer_metadata_peer_id_get_li;
  543. arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
  544. arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
  545. arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
  546. arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
  547. arch_ops->dp_peer_rx_reorder_queue_setup =
  548. dp_peer_rx_reorder_queue_setup_li;
  549. arch_ops->dp_find_peer_by_destmac = dp_find_peer_by_destmac_li;
  550. arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_li;
  551. arch_ops->reo_remap_config = dp_reo_remap_config_li;
  552. arch_ops->dp_txrx_ppeds_rings_status = NULL;
  553. arch_ops->txrx_soc_ppeds_start = NULL;
  554. arch_ops->txrx_soc_ppeds_stop = NULL;
  555. arch_ops->get_reo_qdesc_addr = dp_rx_get_reo_qdesc_addr_li;
  556. }
  557. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  558. void dp_tx_comp_get_prefetched_params_from_hal_desc(
  559. struct dp_soc *soc,
  560. void *tx_comp_hal_desc,
  561. struct dp_tx_desc_s **r_tx_desc)
  562. {
  563. uint8_t pool_id;
  564. uint32_t tx_desc_id;
  565. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  566. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  567. DP_TX_DESC_ID_POOL_OS;
  568. /* Find Tx descriptor */
  569. *r_tx_desc = dp_tx_desc_find(soc, pool_id,
  570. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  571. DP_TX_DESC_ID_PAGE_OS,
  572. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  573. DP_TX_DESC_ID_OFFSET_OS);
  574. qdf_prefetch((uint8_t *)*r_tx_desc);
  575. }
  576. #endif