dp_li.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "dp_types.h"
  20. #include "dp_rings.h"
  21. #include <dp_internal.h>
  22. #include <dp_htt.h>
  23. #include "dp_li.h"
  24. #include "dp_li_tx.h"
  25. #include "dp_tx_desc.h"
  26. #include "dp_li_rx.h"
  27. #include "dp_peer.h"
  28. #include <wlan_utility.h>
  29. #include "dp_ipa.h"
  30. #ifdef WIFI_MONITOR_SUPPORT
  31. #include <dp_mon_1.0.h>
  32. #endif
  33. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  34. static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
  35. {.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
  36. /*
  37. * INVALID_WBM_RING_NUM implies re-use of an existing WBM2SW ring
  38. * as indicated by rbm id.
  39. */
  40. {1, INVALID_WBM_RING_NUM, HAL_LI_WBM_SW0_BM_ID, 0},
  41. {2, 2, HAL_LI_WBM_SW2_BM_ID, 0}
  42. };
  43. #else
  44. static struct wlan_cfg_tcl_wbm_ring_num_map g_tcl_wbm_map_array[MAX_TCL_DATA_RINGS] = {
  45. {.tcl_ring_num = 0, .wbm_ring_num = 0, .wbm_rbm_id = HAL_LI_WBM_SW0_BM_ID, .for_ipa = 0},
  46. {1, 1, HAL_LI_WBM_SW1_BM_ID, 0},
  47. {2, 2, HAL_LI_WBM_SW2_BM_ID, 0},
  48. /*
  49. * Although using wbm_ring 4, wbm_ring 3 is mentioned in order to match
  50. * with the tx_mask in dp_service_srngs. Please be careful while using
  51. * this table anywhere else.
  52. */
  53. {3, 3, HAL_LI_WBM_SW4_BM_ID, 0}
  54. };
  55. #endif
  56. #ifdef IPA_WDI3_TX_TWO_PIPES
  57. static inline void
  58. dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *cfg_ctx)
  59. {
  60. if (!cfg_ctx->ipa_enabled)
  61. return;
  62. cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_ring_num = 4;
  63. cfg_ctx->tcl_wbm_map_array[IPA_TX_ALT_RING_IDX].wbm_rbm_id =
  64. HAL_LI_WBM_SW4_BM_ID;
  65. }
  66. #else
  67. static inline void
  68. dp_soc_cfg_update_tcl_wbm_map_for_ipa(struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
  69. {
  70. }
  71. #endif
  72. static void dp_soc_cfg_attach_li(struct dp_soc *soc)
  73. {
  74. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
  75. dp_soc_cfg_attach(soc);
  76. wlan_cfg_set_rx_rel_ring_id(soc_cfg_ctx, WBM2SW_REL_ERR_RING_NUM);
  77. soc_cfg_ctx->tcl_wbm_map_array = g_tcl_wbm_map_array;
  78. dp_soc_cfg_update_tcl_wbm_map_for_ipa(soc_cfg_ctx);
  79. }
  80. qdf_size_t dp_get_context_size_li(enum dp_context_type context_type)
  81. {
  82. switch (context_type) {
  83. case DP_CONTEXT_TYPE_SOC:
  84. return sizeof(struct dp_soc_li);
  85. case DP_CONTEXT_TYPE_PDEV:
  86. return sizeof(struct dp_pdev_li);
  87. case DP_CONTEXT_TYPE_VDEV:
  88. return sizeof(struct dp_vdev_li);
  89. case DP_CONTEXT_TYPE_PEER:
  90. return sizeof(struct dp_peer_li);
  91. default:
  92. return 0;
  93. }
  94. }
  95. static QDF_STATUS dp_soc_attach_li(struct dp_soc *soc,
  96. struct cdp_soc_attach_params *params)
  97. {
  98. soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
  99. return QDF_STATUS_SUCCESS;
  100. }
  101. static QDF_STATUS dp_soc_detach_li(struct dp_soc *soc)
  102. {
  103. return QDF_STATUS_SUCCESS;
  104. }
  105. static void *dp_soc_init_li(struct dp_soc *soc, HTC_HANDLE htc_handle,
  106. struct hif_opaque_softc *hif_handle)
  107. {
  108. wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
  109. WLAN_MD_DP_SOC, "dp_soc");
  110. soc->hif_handle = hif_handle;
  111. soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
  112. if (!soc->hal_soc)
  113. return NULL;
  114. return dp_soc_init(soc, htc_handle, hif_handle);
  115. }
  116. static QDF_STATUS dp_soc_deinit_li(struct dp_soc *soc)
  117. {
  118. qdf_atomic_set(&soc->cmn_init_done, 0);
  119. dp_soc_deinit(soc);
  120. return QDF_STATUS_SUCCESS;
  121. }
  122. static QDF_STATUS dp_pdev_attach_li(struct dp_pdev *pdev,
  123. struct cdp_pdev_attach_params *params)
  124. {
  125. return QDF_STATUS_SUCCESS;
  126. }
  127. static QDF_STATUS dp_pdev_detach_li(struct dp_pdev *pdev)
  128. {
  129. return QDF_STATUS_SUCCESS;
  130. }
  131. static QDF_STATUS dp_vdev_attach_li(struct dp_soc *soc, struct dp_vdev *vdev)
  132. {
  133. return QDF_STATUS_SUCCESS;
  134. }
  135. static QDF_STATUS dp_vdev_detach_li(struct dp_soc *soc, struct dp_vdev *vdev)
  136. {
  137. return QDF_STATUS_SUCCESS;
  138. }
  139. #ifdef AST_OFFLOAD_ENABLE
  140. static void dp_peer_map_detach_li(struct dp_soc *soc)
  141. {
  142. dp_soc_wds_detach(soc);
  143. dp_peer_ast_table_detach(soc);
  144. dp_peer_ast_hash_detach(soc);
  145. dp_peer_mec_hash_detach(soc);
  146. }
  147. static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
  148. {
  149. QDF_STATUS status;
  150. soc->max_peer_id = soc->max_peers;
  151. status = dp_peer_ast_table_attach(soc);
  152. if (!QDF_IS_STATUS_SUCCESS(status))
  153. return status;
  154. status = dp_peer_ast_hash_attach(soc);
  155. if (!QDF_IS_STATUS_SUCCESS(status))
  156. goto ast_table_detach;
  157. status = dp_peer_mec_hash_attach(soc);
  158. if (!QDF_IS_STATUS_SUCCESS(status))
  159. goto hash_detach;
  160. dp_soc_wds_attach(soc);
  161. return QDF_STATUS_SUCCESS;
  162. hash_detach:
  163. dp_peer_ast_hash_detach(soc);
  164. ast_table_detach:
  165. dp_peer_ast_table_detach(soc);
  166. return status;
  167. }
  168. #else
  169. static void dp_peer_map_detach_li(struct dp_soc *soc)
  170. {
  171. }
  172. static QDF_STATUS dp_peer_map_attach_li(struct dp_soc *soc)
  173. {
  174. soc->max_peer_id = soc->max_peers;
  175. return QDF_STATUS_SUCCESS;
  176. }
  177. #endif
  178. static QDF_STATUS dp_peer_setup_li(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  179. uint8_t *peer_mac,
  180. struct cdp_peer_setup_info *setup_info)
  181. {
  182. return dp_peer_setup_wifi3(soc_hdl, vdev_id, peer_mac, setup_info);
  183. }
  184. qdf_size_t dp_get_soc_context_size_li(void)
  185. {
  186. return sizeof(struct dp_soc);
  187. }
  188. #ifdef NO_RX_PKT_HDR_TLV
  189. /**
  190. * dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
  191. * @soc: Common DP soc handle
  192. *
  193. * Return: QDF_STATUS
  194. */
  195. static QDF_STATUS
  196. dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
  197. {
  198. int i;
  199. int mac_id;
  200. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  201. struct dp_srng *rx_mac_srng;
  202. QDF_STATUS status = QDF_STATUS_SUCCESS;
  203. uint32_t target_type = hal_get_target_type(soc->hal_soc);
  204. if (target_type == TARGET_TYPE_QCN9160)
  205. return status;
  206. htt_tlv_filter.mpdu_start = 1;
  207. htt_tlv_filter.msdu_start = 1;
  208. htt_tlv_filter.mpdu_end = 1;
  209. htt_tlv_filter.msdu_end = 1;
  210. htt_tlv_filter.attention = 1;
  211. htt_tlv_filter.packet = 1;
  212. htt_tlv_filter.packet_header = 0;
  213. htt_tlv_filter.ppdu_start = 0;
  214. htt_tlv_filter.ppdu_end = 0;
  215. htt_tlv_filter.ppdu_end_user_stats = 0;
  216. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  217. htt_tlv_filter.ppdu_end_status_done = 0;
  218. htt_tlv_filter.enable_fp = 1;
  219. htt_tlv_filter.enable_md = 0;
  220. htt_tlv_filter.enable_md = 0;
  221. htt_tlv_filter.enable_mo = 0;
  222. htt_tlv_filter.fp_mgmt_filter = 0;
  223. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  224. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  225. FILTER_DATA_MCAST |
  226. FILTER_DATA_DATA);
  227. htt_tlv_filter.mo_mgmt_filter = 0;
  228. htt_tlv_filter.mo_ctrl_filter = 0;
  229. htt_tlv_filter.mo_data_filter = 0;
  230. htt_tlv_filter.md_data_filter = 0;
  231. htt_tlv_filter.offset_valid = true;
  232. htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
  233. /*Not subscribing rx_pkt_header*/
  234. htt_tlv_filter.rx_header_offset = 0;
  235. htt_tlv_filter.rx_mpdu_start_offset =
  236. hal_rx_mpdu_start_offset_get(soc->hal_soc);
  237. htt_tlv_filter.rx_mpdu_end_offset =
  238. hal_rx_mpdu_end_offset_get(soc->hal_soc);
  239. htt_tlv_filter.rx_msdu_start_offset =
  240. hal_rx_msdu_start_offset_get(soc->hal_soc);
  241. htt_tlv_filter.rx_msdu_end_offset =
  242. hal_rx_msdu_end_offset_get(soc->hal_soc);
  243. htt_tlv_filter.rx_attn_offset =
  244. hal_rx_attn_offset_get(soc->hal_soc);
  245. for (i = 0; i < MAX_PDEV_CNT; i++) {
  246. struct dp_pdev *pdev = soc->pdev_list[i];
  247. if (!pdev)
  248. continue;
  249. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  250. int mac_for_pdev =
  251. dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  252. /*
  253. * Obtain lmac id from pdev to access the LMAC ring
  254. * in soc context
  255. */
  256. int lmac_id =
  257. dp_get_lmac_id_for_pdev_id(soc, mac_id,
  258. pdev->pdev_id);
  259. rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
  260. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  261. rx_mac_srng->hal_srng,
  262. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  263. &htt_tlv_filter);
  264. }
  265. }
  266. return status;
  267. }
  268. #else
  269. static QDF_STATUS
  270. dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
  271. {
  272. int i;
  273. int mac_id;
  274. struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
  275. struct dp_srng *rx_mac_srng;
  276. QDF_STATUS status = QDF_STATUS_SUCCESS;
  277. uint32_t target_type = hal_get_target_type(soc->hal_soc);
  278. if (target_type == TARGET_TYPE_QCN9160)
  279. return status;
  280. htt_tlv_filter.mpdu_start = 1;
  281. htt_tlv_filter.msdu_start = 1;
  282. htt_tlv_filter.mpdu_end = 1;
  283. htt_tlv_filter.msdu_end = 1;
  284. htt_tlv_filter.attention = 1;
  285. htt_tlv_filter.packet = 1;
  286. htt_tlv_filter.packet_header = 1;
  287. htt_tlv_filter.ppdu_start = 0;
  288. htt_tlv_filter.ppdu_end = 0;
  289. htt_tlv_filter.ppdu_end_user_stats = 0;
  290. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  291. htt_tlv_filter.ppdu_end_status_done = 0;
  292. htt_tlv_filter.enable_fp = 1;
  293. htt_tlv_filter.enable_md = 0;
  294. htt_tlv_filter.enable_md = 0;
  295. htt_tlv_filter.enable_mo = 0;
  296. htt_tlv_filter.fp_mgmt_filter = 0;
  297. htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
  298. htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
  299. FILTER_DATA_MCAST |
  300. FILTER_DATA_DATA);
  301. htt_tlv_filter.mo_mgmt_filter = 0;
  302. htt_tlv_filter.mo_ctrl_filter = 0;
  303. htt_tlv_filter.mo_data_filter = 0;
  304. htt_tlv_filter.md_data_filter = 0;
  305. htt_tlv_filter.offset_valid = true;
  306. htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
  307. htt_tlv_filter.rx_header_offset =
  308. hal_rx_pkt_tlv_offset_get(soc->hal_soc);
  309. htt_tlv_filter.rx_mpdu_start_offset =
  310. hal_rx_mpdu_start_offset_get(soc->hal_soc);
  311. htt_tlv_filter.rx_mpdu_end_offset =
  312. hal_rx_mpdu_end_offset_get(soc->hal_soc);
  313. htt_tlv_filter.rx_msdu_start_offset =
  314. hal_rx_msdu_start_offset_get(soc->hal_soc);
  315. htt_tlv_filter.rx_msdu_end_offset =
  316. hal_rx_msdu_end_offset_get(soc->hal_soc);
  317. htt_tlv_filter.rx_attn_offset =
  318. hal_rx_attn_offset_get(soc->hal_soc);
  319. for (i = 0; i < MAX_PDEV_CNT; i++) {
  320. struct dp_pdev *pdev = soc->pdev_list[i];
  321. if (!pdev)
  322. continue;
  323. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  324. int mac_for_pdev =
  325. dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  326. /*
  327. * Obtain lmac id from pdev to access the LMAC ring
  328. * in soc context
  329. */
  330. int lmac_id =
  331. dp_get_lmac_id_for_pdev_id(soc, mac_id,
  332. pdev->pdev_id);
  333. rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
  334. htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
  335. rx_mac_srng->hal_srng,
  336. RXDMA_BUF, RX_DATA_BUFFER_SIZE,
  337. &htt_tlv_filter);
  338. }
  339. }
  340. return status;
  341. }
  342. #endif
  343. static inline
  344. QDF_STATUS dp_srng_init_li(struct dp_soc *soc, struct dp_srng *srng,
  345. int ring_type, int ring_num, int mac_id)
  346. {
  347. return dp_srng_init_idx(soc, srng, ring_type, ring_num, mac_id, 0);
  348. }
  349. #ifdef QCA_DP_ENABLE_TX_COMP_RING4
  350. static inline
  351. void dp_deinit_txcomp_ring4(struct dp_soc *soc)
  352. {
  353. if (soc) {
  354. wlan_minidump_remove(soc->tx_comp_ring[3].base_vaddr_unaligned,
  355. soc->tx_comp_ring[3].alloc_size,
  356. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
  357. "Transmit_completion_ring");
  358. dp_srng_deinit(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE, 0);
  359. }
  360. }
  361. static inline
  362. QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
  363. {
  364. if (soc) {
  365. if (dp_srng_init(soc, &soc->tx_comp_ring[3],
  366. WBM2SW_RELEASE, WBM2SW_TXCOMP_RING4_NUM, 0)) {
  367. dp_err("%pK: dp_srng_init failed for rx_rel_ring",
  368. soc);
  369. return QDF_STATUS_E_FAILURE;
  370. }
  371. wlan_minidump_log(soc->tx_comp_ring[3].base_vaddr_unaligned,
  372. soc->tx_comp_ring[3].alloc_size,
  373. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TX_COMP,
  374. "Transmit_completion_ring");
  375. }
  376. return QDF_STATUS_SUCCESS;
  377. }
  378. static inline
  379. void dp_free_txcomp_ring4(struct dp_soc *soc)
  380. {
  381. if (soc)
  382. dp_srng_free(soc, &soc->tx_comp_ring[3]);
  383. }
  384. static inline
  385. QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
  386. uint32_t cached)
  387. {
  388. if (soc) {
  389. if (dp_srng_alloc(soc, &soc->tx_comp_ring[3], WBM2SW_RELEASE,
  390. tx_comp_ring_size, cached)) {
  391. dp_err("dp_srng_alloc failed for tx_comp_ring");
  392. return QDF_STATUS_E_FAILURE;
  393. }
  394. }
  395. return QDF_STATUS_SUCCESS;
  396. }
  397. #else
  398. static inline
  399. void dp_deinit_txcomp_ring4(struct dp_soc *soc)
  400. {
  401. }
  402. static inline
  403. QDF_STATUS dp_init_txcomp_ring4(struct dp_soc *soc)
  404. {
  405. return QDF_STATUS_SUCCESS;
  406. }
  407. static inline
  408. void dp_free_txcomp_ring4(struct dp_soc *soc)
  409. {
  410. }
  411. static inline
  412. QDF_STATUS dp_alloc_txcomp_ring4(struct dp_soc *soc, uint32_t tx_comp_ring_size,
  413. uint32_t cached)
  414. {
  415. return QDF_STATUS_SUCCESS;
  416. }
  417. #endif
  418. static void dp_soc_srng_deinit_li(struct dp_soc *soc)
  419. {
  420. /* Tx Complete ring */
  421. dp_deinit_txcomp_ring4(soc);
  422. }
  423. static void dp_soc_srng_free_li(struct dp_soc *soc)
  424. {
  425. dp_free_txcomp_ring4(soc);
  426. }
  427. static QDF_STATUS dp_soc_srng_alloc_li(struct dp_soc *soc)
  428. {
  429. uint32_t tx_comp_ring_size;
  430. uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
  431. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  432. soc_cfg_ctx = soc->wlan_cfg_ctx;
  433. tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  434. /* Disable cached desc if NSS offload is enabled */
  435. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
  436. cached = 0;
  437. if (dp_alloc_txcomp_ring4(soc, tx_comp_ring_size, cached))
  438. goto fail1;
  439. return QDF_STATUS_SUCCESS;
  440. fail1:
  441. dp_soc_srng_free_li(soc);
  442. return QDF_STATUS_E_NOMEM;
  443. }
  444. static QDF_STATUS dp_soc_srng_init_li(struct dp_soc *soc)
  445. {
  446. /* Tx comp ring 3 */
  447. if (dp_init_txcomp_ring4(soc))
  448. goto fail1;
  449. return QDF_STATUS_SUCCESS;
  450. fail1:
  451. /*
  452. * Cleanup will be done as part of soc_detach, which will
  453. * be called on pdev attach failure
  454. */
  455. dp_soc_srng_deinit_li(soc);
  456. return QDF_STATUS_E_FAILURE;
  457. }
  458. static void dp_tx_implicit_rbm_set_li(struct dp_soc *soc,
  459. uint8_t tx_ring_id,
  460. uint8_t bm_id)
  461. {
  462. }
  463. static QDF_STATUS dp_txrx_set_vdev_param_li(struct dp_soc *soc,
  464. struct dp_vdev *vdev,
  465. enum cdp_vdev_param_type param,
  466. cdp_config_param_type val)
  467. {
  468. return QDF_STATUS_SUCCESS;
  469. }
  470. bool
  471. dp_rx_intrabss_handle_nawds_li(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  472. qdf_nbuf_t nbuf_copy,
  473. struct cdp_tid_rx_stats *tid_stats,
  474. uint8_t link_id)
  475. {
  476. return false;
  477. }
  478. static void dp_rx_word_mask_subscribe_li(struct dp_soc *soc,
  479. uint32_t *msg_word,
  480. void *rx_filter)
  481. {
  482. }
  483. static void dp_get_rx_hash_key_li(struct dp_soc *soc,
  484. struct cdp_lro_hash_config *lro_hash)
  485. {
  486. dp_get_rx_hash_key_bytes(lro_hash);
  487. }
  488. static void dp_peer_get_reo_hash_li(struct dp_vdev *vdev,
  489. struct cdp_peer_setup_info *setup_info,
  490. enum cdp_host_reo_dest_ring *reo_dest,
  491. bool *hash_based,
  492. uint8_t *lmac_peer_id_msb)
  493. {
  494. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  495. }
  496. static bool dp_reo_remap_config_li(struct dp_soc *soc,
  497. uint32_t *remap0,
  498. uint32_t *remap1,
  499. uint32_t *remap2)
  500. {
  501. return dp_reo_remap_config(soc, remap0, remap1, remap2);
  502. }
  503. static uint8_t dp_soc_get_num_soc_li(struct dp_soc *soc)
  504. {
  505. return 1;
  506. }
  507. static QDF_STATUS dp_txrx_get_vdev_mcast_param_li(struct dp_soc *soc,
  508. struct dp_vdev *vdev,
  509. cdp_config_param_type *val)
  510. {
  511. return QDF_STATUS_SUCCESS;
  512. }
  513. static uint8_t dp_get_hw_link_id_li(struct dp_pdev *pdev)
  514. {
  515. return 0;
  516. }
  517. static void dp_get_vdev_stats_for_unmap_peer_li(
  518. struct dp_vdev *vdev,
  519. struct dp_peer *peer,
  520. struct cdp_vdev_stats **vdev_stats)
  521. {
  522. }
  523. static struct
  524. dp_soc *dp_get_soc_by_chip_id_li(struct dp_soc *soc,
  525. uint8_t chip_id)
  526. {
  527. return soc;
  528. }
  529. void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
  530. {
  531. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  532. arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
  533. arch_ops->dp_rx_process = dp_rx_process_li;
  534. arch_ops->dp_tx_send_fast = dp_tx_send;
  535. arch_ops->tx_comp_get_params_from_hal_desc =
  536. dp_tx_comp_get_params_from_hal_desc_li;
  537. arch_ops->dp_tx_process_htt_completion =
  538. dp_tx_process_htt_completion_li;
  539. arch_ops->dp_wbm_get_rx_desc_from_hal_desc =
  540. dp_wbm_get_rx_desc_from_hal_desc_li;
  541. arch_ops->dp_tx_desc_pool_alloc = dp_tx_desc_pool_alloc_li;
  542. arch_ops->dp_tx_desc_pool_free = dp_tx_desc_pool_free_li;
  543. arch_ops->dp_tx_desc_pool_init = dp_tx_desc_pool_init_li;
  544. arch_ops->dp_tx_desc_pool_deinit = dp_tx_desc_pool_deinit_li;
  545. arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_li;
  546. arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_li;
  547. arch_ops->dp_tx_compute_hw_delay = dp_tx_compute_tx_delay_li;
  548. arch_ops->dp_rx_chain_msdus = dp_rx_chain_msdus_li;
  549. arch_ops->dp_rx_wbm_err_reap_desc = dp_rx_wbm_err_reap_desc_li;
  550. arch_ops->dp_rx_null_q_desc_handle = dp_rx_null_q_desc_handle_li;
  551. #else
  552. arch_ops->dp_rx_desc_pool_init = dp_rx_desc_pool_init_generic;
  553. arch_ops->dp_rx_desc_pool_deinit = dp_rx_desc_pool_deinit_generic;
  554. #endif
  555. arch_ops->txrx_get_context_size = dp_get_context_size_li;
  556. #ifdef WIFI_MONITOR_SUPPORT
  557. arch_ops->txrx_get_mon_context_size = dp_mon_get_context_size_li;
  558. #endif
  559. arch_ops->txrx_soc_attach = dp_soc_attach_li;
  560. arch_ops->txrx_soc_detach = dp_soc_detach_li;
  561. arch_ops->txrx_soc_init = dp_soc_init_li;
  562. arch_ops->txrx_soc_deinit = dp_soc_deinit_li;
  563. arch_ops->txrx_soc_srng_alloc = dp_soc_srng_alloc_li;
  564. arch_ops->txrx_soc_srng_init = dp_soc_srng_init_li;
  565. arch_ops->txrx_soc_srng_deinit = dp_soc_srng_deinit_li;
  566. arch_ops->txrx_soc_srng_free = dp_soc_srng_free_li;
  567. arch_ops->txrx_pdev_attach = dp_pdev_attach_li;
  568. arch_ops->txrx_pdev_detach = dp_pdev_detach_li;
  569. arch_ops->txrx_vdev_attach = dp_vdev_attach_li;
  570. arch_ops->txrx_vdev_detach = dp_vdev_detach_li;
  571. arch_ops->txrx_peer_map_attach = dp_peer_map_attach_li;
  572. arch_ops->txrx_peer_map_detach = dp_peer_map_detach_li;
  573. arch_ops->get_rx_hash_key = dp_get_rx_hash_key_li;
  574. arch_ops->dp_set_rx_fst = NULL;
  575. arch_ops->dp_get_rx_fst = NULL;
  576. arch_ops->dp_rx_fst_ref = NULL;
  577. arch_ops->dp_rx_fst_deref = NULL;
  578. arch_ops->txrx_peer_setup = dp_peer_setup_li;
  579. arch_ops->dp_rx_desc_cookie_2_va =
  580. dp_rx_desc_cookie_2_va_li;
  581. arch_ops->dp_rx_intrabss_mcast_handler =
  582. dp_rx_intrabss_handle_nawds_li;
  583. arch_ops->dp_rx_word_mask_subscribe = dp_rx_word_mask_subscribe_li;
  584. arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
  585. arch_ops->dp_rx_peer_metadata_peer_id_get =
  586. dp_rx_peer_metadata_peer_id_get_li;
  587. arch_ops->soc_cfg_attach = dp_soc_cfg_attach_li;
  588. arch_ops->tx_implicit_rbm_set = dp_tx_implicit_rbm_set_li;
  589. arch_ops->txrx_set_vdev_param = dp_txrx_set_vdev_param_li;
  590. arch_ops->txrx_print_peer_stats = dp_print_peer_txrx_stats_li;
  591. arch_ops->dp_peer_rx_reorder_queue_setup =
  592. dp_peer_rx_reorder_queue_setup_li;
  593. arch_ops->peer_get_reo_hash = dp_peer_get_reo_hash_li;
  594. arch_ops->reo_remap_config = dp_reo_remap_config_li;
  595. arch_ops->dp_get_soc_by_chip_id = dp_get_soc_by_chip_id_li;
  596. arch_ops->dp_soc_get_num_soc = dp_soc_get_num_soc_li;
  597. arch_ops->get_reo_qdesc_addr = dp_rx_get_reo_qdesc_addr_li;
  598. arch_ops->txrx_get_vdev_mcast_param = dp_txrx_get_vdev_mcast_param_li;
  599. arch_ops->get_hw_link_id = dp_get_hw_link_id_li;
  600. arch_ops->txrx_srng_init = dp_srng_init_li;
  601. arch_ops->dp_get_vdev_stats_for_unmap_peer =
  602. dp_get_vdev_stats_for_unmap_peer_li;
  603. arch_ops->dp_get_interface_stats = dp_txrx_get_vdev_stats;
  604. #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
  605. arch_ops->dp_update_ring_hptp = dp_update_ring_hptp;
  606. #endif
  607. arch_ops->dp_flush_tx_ring = dp_flush_tcl_ring;
  608. }
  609. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  610. void dp_tx_comp_get_prefetched_params_from_hal_desc(
  611. struct dp_soc *soc,
  612. void *tx_comp_hal_desc,
  613. struct dp_tx_desc_s **r_tx_desc)
  614. {
  615. uint8_t pool_id;
  616. uint32_t tx_desc_id;
  617. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  618. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  619. DP_TX_DESC_ID_POOL_OS;
  620. /* Find Tx descriptor */
  621. *r_tx_desc = dp_tx_desc_find(soc, pool_id,
  622. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  623. DP_TX_DESC_ID_PAGE_OS,
  624. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  625. DP_TX_DESC_ID_OFFSET_OS);
  626. qdf_prefetch((uint8_t *)*r_tx_desc);
  627. }
  628. #endif