dp_mlo.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713
  1. /*
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <wlan_utility.h>
  17. #include <dp_internal.h>
  18. #include <dp_htt.h>
  19. #include <hal_be_api.h>
  20. #include "dp_mlo.h"
  21. #include <dp_be.h>
  22. #include <dp_be_rx.h>
  23. #include <dp_htt.h>
  24. #include <dp_internal.h>
  25. #include <wlan_cfg.h>
  26. #include <wlan_mlo_mgr_cmn.h>
  27. #include "dp_umac_reset.h"
  28. #ifdef DP_UMAC_HW_RESET_SUPPORT
  29. /**
  30. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  31. * @mlo_ctx: mlo soc context
  32. * @chip_id: chip id
  33. * @set: flag indicating whether to set or clear the bit
  34. *
  35. * Return: void
  36. */
  37. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  38. int chip_id, bool set);
  39. #endif
  40. /**
  41. * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
  42. * @ctrl_ctxt: CDP control context
  43. *
  44. * Return: DP MLO context handle on success, NULL on failure
  45. */
  46. static struct cdp_mlo_ctxt *
  47. dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
  48. {
  49. struct dp_mlo_ctxt *mlo_ctxt =
  50. qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
  51. if (!mlo_ctxt) {
  52. dp_err("Failed to allocate DP MLO Context");
  53. return NULL;
  54. }
  55. mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
  56. if (dp_mlo_peer_find_hash_attach_be
  57. (mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
  58. dp_err("Failed to allocate peer hash");
  59. qdf_mem_free(mlo_ctxt);
  60. return NULL;
  61. }
  62. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
  63. (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
  64. LRO_IPV4_SEED_ARR_SZ));
  65. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
  66. (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
  67. LRO_IPV6_SEED_ARR_SZ));
  68. qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
  69. qdf_spinlock_create(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  70. dp_mlo_dev_ctxt_list_attach(mlo_ctxt);
  71. return dp_mlo_ctx_to_cdp(mlo_ctxt);
  72. }
  73. /**
  74. * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
  75. * @cdp_ml_ctxt: pointer to CDP DP MLO context
  76. *
  77. * Return: void
  78. */
  79. static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  80. {
  81. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  82. if (!cdp_ml_ctxt)
  83. return;
  84. qdf_spinlock_destroy(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  85. qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
  86. dp_mlo_dev_ctxt_list_detach(mlo_ctxt);
  87. dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
  88. qdf_mem_free(mlo_ctxt);
  89. }
  90. /**
  91. * dp_mlo_set_soc_by_chip_id() - Add DP soc to ML context soc list
  92. * @ml_ctxt: DP ML context handle
  93. * @soc: DP soc handle
  94. * @chip_id: MLO chip id
  95. *
  96. * Return: void
  97. */
  98. static void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  99. struct dp_soc *soc,
  100. uint8_t chip_id)
  101. {
  102. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  103. ml_ctxt->ml_soc_list[chip_id] = soc;
  104. /* The same API is called during soc_attach and soc_detach
  105. * soc parameter is non-null or null accordingly.
  106. */
  107. if (soc)
  108. ml_ctxt->ml_soc_cnt++;
  109. else
  110. ml_ctxt->ml_soc_cnt--;
  111. dp_umac_reset_update_partner_map(ml_ctxt, chip_id, !!soc);
  112. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  113. }
  114. struct dp_soc*
  115. dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  116. uint8_t chip_id)
  117. {
  118. struct dp_soc *soc = NULL;
  119. if (!ml_ctxt) {
  120. dp_warn("MLO context not created, MLO not enabled");
  121. return NULL;
  122. }
  123. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  124. soc = ml_ctxt->ml_soc_list[chip_id];
  125. if (!soc) {
  126. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  127. return NULL;
  128. }
  129. qdf_atomic_inc(&soc->ref_count);
  130. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  131. return soc;
  132. }
  133. static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
  134. struct dp_soc_be *be_soc)
  135. {
  136. uint8_t i;
  137. struct dp_soc *partner_soc;
  138. struct dp_soc_be *be_partner_soc;
  139. uint8_t pool_id;
  140. QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
  141. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  142. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  143. if (!partner_soc) {
  144. dp_err("partner_soc is NULL");
  145. continue;
  146. }
  147. be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
  148. for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
  149. qdf_status =
  150. dp_hw_cookie_conversion_init
  151. (be_soc,
  152. &be_partner_soc->rx_cc_ctx[pool_id]);
  153. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  154. dp_alert("MLO partner soc RX CC init failed");
  155. return qdf_status;
  156. }
  157. }
  158. }
  159. return qdf_status;
  160. }
  161. static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg, int chip_id)
  162. {
  163. uint8_t i = 0;
  164. uint8_t cpu = 0;
  165. uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  166. uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  167. uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  168. uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  169. /* Save the current interrupt mask and disable the interrupts */
  170. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  171. rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
  172. rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
  173. rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
  174. reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
  175. soc->intr_ctx[i].rx_ring_mask = 0;
  176. soc->intr_ctx[i].rx_err_ring_mask = 0;
  177. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  178. soc->intr_ctx[i].reo_status_ring_mask = 0;
  179. }
  180. /* make sure dp_service_srngs not running on any of the CPU */
  181. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  182. while (qdf_atomic_test_bit(cpu,
  183. &soc->service_rings_running))
  184. ;
  185. }
  186. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  187. uint8_t ring = 0;
  188. uint32_t num_entries = 0;
  189. hal_ring_handle_t hal_ring_hdl = NULL;
  190. uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
  191. soc->wlan_cfg_ctx, i);
  192. uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
  193. soc->wlan_cfg_ctx, i);
  194. uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  195. soc->wlan_cfg_ctx, i);
  196. if (rx_mask) {
  197. /* iterate through each reo ring and process the buf */
  198. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  199. if (!(rx_mask & (1 << ring)))
  200. continue;
  201. hal_ring_hdl =
  202. soc->reo_dest_ring[ring].hal_srng;
  203. num_entries = hal_srng_get_num_entries(
  204. soc->hal_soc,
  205. hal_ring_hdl);
  206. dp_rx_process_be(&soc->intr_ctx[i],
  207. hal_ring_hdl,
  208. ring,
  209. num_entries);
  210. }
  211. }
  212. /* Process REO Exception ring */
  213. if (rx_err_mask) {
  214. hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  215. num_entries = hal_srng_get_num_entries(
  216. soc->hal_soc,
  217. hal_ring_hdl);
  218. dp_rx_err_process(&soc->intr_ctx[i], soc,
  219. hal_ring_hdl, num_entries);
  220. }
  221. /* Process Rx WBM release ring */
  222. if (rx_wbm_rel_mask) {
  223. hal_ring_hdl = soc->rx_rel_ring.hal_srng;
  224. num_entries = hal_srng_get_num_entries(
  225. soc->hal_soc,
  226. hal_ring_hdl);
  227. dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
  228. hal_ring_hdl, num_entries);
  229. }
  230. }
  231. /* restore the interrupt mask */
  232. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  233. soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
  234. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
  235. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
  236. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
  237. }
  238. }
  239. static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
  240. struct cdp_mlo_ctxt *cdp_ml_ctxt)
  241. {
  242. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  243. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  244. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  245. uint8_t pdev_id;
  246. if (!cdp_ml_ctxt)
  247. return;
  248. be_soc->ml_ctxt = mlo_ctxt;
  249. for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
  250. if (soc->pdev_list[pdev_id])
  251. dp_mlo_update_link_to_pdev_map(soc,
  252. soc->pdev_list[pdev_id]);
  253. }
  254. dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
  255. }
  256. static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
  257. struct cdp_mlo_ctxt *cdp_ml_ctxt,
  258. bool is_force_down)
  259. {
  260. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  261. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  262. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  263. if (!cdp_ml_ctxt)
  264. return;
  265. /* During the teardown drain the Rx buffers if any exist in the ring */
  266. dp_mlo_iter_ptnr_soc(be_soc,
  267. dp_mlo_soc_drain_rx_buf,
  268. NULL);
  269. dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
  270. be_soc->ml_ctxt = NULL;
  271. }
  272. static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  273. {
  274. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  275. int i;
  276. struct dp_soc *soc;
  277. struct dp_soc_be *be_soc;
  278. QDF_STATUS qdf_status;
  279. if (!cdp_ml_ctxt)
  280. return;
  281. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  282. soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  283. if (!soc)
  284. continue;
  285. be_soc = dp_get_be_soc_from_dp_soc(soc);
  286. qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
  287. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  288. dp_alert("MLO partner SOC Rx desc CC init failed");
  289. qdf_assert_always(0);
  290. }
  291. }
  292. }
  293. static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
  294. uint8_t pdev_id, uint64_t delta_tsf2)
  295. {
  296. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  297. struct dp_pdev *pdev;
  298. struct dp_pdev_be *be_pdev;
  299. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  300. pdev_id);
  301. if (!pdev) {
  302. dp_err("pdev is NULL for pdev_id %u", pdev_id);
  303. return;
  304. }
  305. be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  306. be_pdev->delta_tsf2 = delta_tsf2;
  307. }
  308. static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
  309. uint64_t delta_tqm)
  310. {
  311. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  312. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  313. be_soc->delta_tqm = delta_tqm;
  314. }
  315. static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
  316. uint64_t offset)
  317. {
  318. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  319. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  320. be_soc->mlo_tstamp_offset = offset;
  321. }
  322. #ifdef CONFIG_MLO_SINGLE_DEV
  323. /**
  324. * dp_aggregate_vdev_basic_stats() - aggregate vdev basic stats
  325. * @tgt_vdev_stats: target vdev buffer
  326. * @src_vdev_stats: source vdev buffer
  327. *
  328. * return: void
  329. */
  330. static inline
  331. void dp_aggregate_vdev_basic_stats(
  332. struct cdp_vdev_stats *tgt_vdev_stats,
  333. struct cdp_vdev_stats *src_vdev_stats)
  334. {
  335. DP_UPDATE_BASIC_STATS(tgt_vdev_stats, src_vdev_stats);
  336. }
  337. /**
  338. * dp_aggregate_vdev_ingress_stats() - aggregate vdev ingress stats
  339. * @tgt_vdev_stats: target vdev buffer
  340. * @src_vdev_stats: source vdev buffer
  341. *
  342. * return: void
  343. */
  344. static inline
  345. void dp_aggregate_vdev_ingress_stats(
  346. struct cdp_vdev_stats *tgt_vdev_stats,
  347. struct cdp_vdev_stats *src_vdev_stats)
  348. {
  349. /* Aggregate vdev ingress stats */
  350. DP_UPDATE_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats);
  351. }
  352. /**
  353. * dp_aggregate_vdev_stats_for_unmapped_peers() - aggregate unmap peer stats
  354. * @tgt_vdev_stats: target vdev buffer
  355. * @src_vdev_stats: source vdev buffer
  356. *
  357. * return: void
  358. */
  359. static inline
  360. void dp_aggregate_vdev_stats_for_unmapped_peers(
  361. struct cdp_vdev_stats *tgt_vdev_stats,
  362. struct cdp_vdev_stats *src_vdev_stats)
  363. {
  364. /* Aggregate unmapped peers stats */
  365. DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(tgt_vdev_stats, src_vdev_stats);
  366. }
  367. /**
  368. * dp_aggregate_all_vdev_stats() - aggregate vdev ingress and unmap peer stats
  369. * @tgt_vdev_stats: target vdev buffer
  370. * @src_vdev_stats: source vdev buffer
  371. *
  372. * return: void
  373. */
  374. static inline
  375. void dp_aggregate_all_vdev_stats(
  376. struct cdp_vdev_stats *tgt_vdev_stats,
  377. struct cdp_vdev_stats *src_vdev_stats)
  378. {
  379. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats);
  380. dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
  381. src_vdev_stats);
  382. }
  383. /**
  384. * dp_mlo_vdev_stats_aggr_bridge_vap() - aggregate bridge vdev stats
  385. * @be_vdev: Dp Vdev handle
  386. * @bridge_vdev: Dp vdev handle for bridge vdev
  387. * @arg: buffer for target vdev stats
  388. *
  389. * return: void
  390. */
  391. static
  392. void dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be *be_vdev,
  393. struct dp_vdev *bridge_vdev,
  394. void *arg)
  395. {
  396. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  397. struct dp_vdev_be *bridge_be_vdev = NULL;
  398. bridge_be_vdev = dp_get_be_vdev_from_dp_vdev(bridge_vdev);
  399. if (!bridge_be_vdev)
  400. return;
  401. dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_vdev->stats);
  402. dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_be_vdev->mlo_stats);
  403. dp_vdev_iterate_peer(bridge_vdev, dp_update_vdev_stats, tgt_vdev_stats,
  404. DP_MOD_ID_GENERIC_STATS);
  405. }
  406. /**
  407. * dp_aggregate_interface_stats_based_on_peer_type() - aggregate stats at
  408. * VDEV level based on peer type connected to vdev
  409. * @vdev: DP VDEV handle
  410. * @vdev_stats: target vdev stats pointer
  411. * @peer_type: type of peer - MLO Link or Legacy peer
  412. *
  413. * return: void
  414. */
  415. static
  416. void dp_aggregate_interface_stats_based_on_peer_type(
  417. struct dp_vdev *vdev,
  418. struct cdp_vdev_stats *vdev_stats,
  419. enum dp_peer_type peer_type)
  420. {
  421. struct cdp_vdev_stats *tgt_vdev_stats = NULL;
  422. struct dp_vdev_be *be_vdev = NULL;
  423. struct dp_soc_be *be_soc = NULL;
  424. if (!vdev || !vdev->pdev)
  425. return;
  426. tgt_vdev_stats = vdev_stats;
  427. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  428. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  429. if (!be_vdev)
  430. return;
  431. if (peer_type == DP_PEER_TYPE_LEGACY) {
  432. dp_aggregate_all_vdev_stats(tgt_vdev_stats,
  433. &vdev->stats);
  434. } else {
  435. if (be_vdev->mcast_primary) {
  436. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  437. dp_mlo_vdev_stats_aggr_bridge_vap,
  438. (void *)vdev_stats,
  439. DP_MOD_ID_GENERIC_STATS,
  440. DP_BRIDGE_VDEV_ITER,
  441. DP_VDEV_ITERATE_SKIP_SELF);
  442. }
  443. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats,
  444. &vdev->stats);
  445. dp_aggregate_vdev_stats_for_unmapped_peers(
  446. tgt_vdev_stats,
  447. &be_vdev->mlo_stats);
  448. }
  449. /* Aggregate associated peer stats */
  450. dp_vdev_iterate_specific_peer_type(vdev,
  451. dp_update_vdev_stats,
  452. vdev_stats,
  453. DP_MOD_ID_GENERIC_STATS,
  454. peer_type);
  455. }
  456. /**
  457. * dp_aggregate_interface_stats() - aggregate stats at VDEV level
  458. * @vdev: DP VDEV handle
  459. * @vdev_stats: target vdev stats pointer
  460. *
  461. * return: void
  462. */
  463. static
  464. void dp_aggregate_interface_stats(struct dp_vdev *vdev,
  465. struct cdp_vdev_stats *vdev_stats)
  466. {
  467. struct dp_vdev_be *be_vdev = NULL;
  468. struct dp_soc_be *be_soc = NULL;
  469. if (!vdev || !vdev->pdev)
  470. return;
  471. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  472. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  473. if (!be_vdev)
  474. return;
  475. if (be_vdev->mcast_primary) {
  476. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  477. dp_mlo_vdev_stats_aggr_bridge_vap,
  478. (void *)vdev_stats, DP_MOD_ID_GENERIC_STATS,
  479. DP_BRIDGE_VDEV_ITER,
  480. DP_VDEV_ITERATE_SKIP_SELF);
  481. }
  482. dp_aggregate_all_vdev_stats(vdev_stats, &be_vdev->mlo_stats);
  483. dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats);
  484. dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
  485. DP_MOD_ID_GENERIC_STATS);
  486. dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
  487. }
  488. /**
  489. * dp_mlo_aggr_ptnr_iface_stats() - aggregate mlo partner vdev stats
  490. * @be_vdev: vdev handle
  491. * @ptnr_vdev: partner vdev handle
  492. * @arg: target buffer for aggregation
  493. *
  494. * return: void
  495. */
  496. static
  497. void dp_mlo_aggr_ptnr_iface_stats(struct dp_vdev_be *be_vdev,
  498. struct dp_vdev *ptnr_vdev,
  499. void *arg)
  500. {
  501. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  502. dp_aggregate_interface_stats(ptnr_vdev, tgt_vdev_stats);
  503. }
  504. /**
  505. * dp_mlo_aggr_ptnr_iface_stats_mlo_links() - aggregate mlo partner vdev stats
  506. * based on peer type
  507. * @be_vdev: vdev handle
  508. * @ptnr_vdev: partner vdev handle
  509. * @arg: target buffer for aggregation
  510. *
  511. * return: void
  512. */
  513. static
  514. void dp_mlo_aggr_ptnr_iface_stats_mlo_links(
  515. struct dp_vdev_be *be_vdev,
  516. struct dp_vdev *ptnr_vdev,
  517. void *arg)
  518. {
  519. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  520. dp_aggregate_interface_stats_based_on_peer_type(ptnr_vdev,
  521. tgt_vdev_stats,
  522. DP_PEER_TYPE_MLO_LINK);
  523. }
  524. /**
  525. * dp_aggregate_sta_interface_stats() - for sta mode aggregate vdev stats from
  526. * all link peers
  527. * @soc: soc handle
  528. * @vdev: vdev handle
  529. * @buf: target buffer for aggregation
  530. *
  531. * return: QDF_STATUS
  532. */
  533. static QDF_STATUS
  534. dp_aggregate_sta_interface_stats(struct dp_soc *soc,
  535. struct dp_vdev *vdev,
  536. void *buf)
  537. {
  538. struct dp_peer *vap_bss_peer = NULL;
  539. struct dp_peer *mld_peer = NULL;
  540. struct dp_peer *link_peer = NULL;
  541. struct dp_mld_link_peers link_peers_info;
  542. uint8_t i = 0;
  543. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  544. vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
  545. DP_MOD_ID_GENERIC_STATS);
  546. if (!vap_bss_peer)
  547. return QDF_STATUS_E_FAILURE;
  548. mld_peer = DP_GET_MLD_PEER_FROM_PEER(vap_bss_peer);
  549. if (!mld_peer) {
  550. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  551. return QDF_STATUS_E_FAILURE;
  552. }
  553. dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, &link_peers_info,
  554. DP_MOD_ID_GENERIC_STATS);
  555. for (i = 0; i < link_peers_info.num_links; i++) {
  556. link_peer = link_peers_info.link_peers[i];
  557. dp_update_vdev_stats(soc, link_peer, buf);
  558. dp_aggregate_vdev_ingress_stats((struct cdp_vdev_stats *)buf,
  559. &link_peer->vdev->stats);
  560. dp_aggregate_vdev_basic_stats(
  561. (struct cdp_vdev_stats *)buf,
  562. &link_peer->vdev->stats);
  563. }
  564. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_GENERIC_STATS);
  565. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  566. return ret;
  567. }
  568. static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
  569. uint8_t vdev_id, void *buf,
  570. bool link_vdev_only)
  571. {
  572. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  573. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  574. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  575. DP_MOD_ID_GENERIC_STATS);
  576. struct dp_vdev_be *vdev_be = NULL;
  577. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  578. if (!vdev)
  579. return QDF_STATUS_E_FAILURE;
  580. vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  581. if (!vdev_be || !vdev_be->mlo_dev_ctxt) {
  582. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  583. return QDF_STATUS_E_FAILURE;
  584. }
  585. if (vdev->opmode == wlan_op_mode_sta) {
  586. ret = dp_aggregate_sta_interface_stats(soc, vdev, buf);
  587. goto complete;
  588. }
  589. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  590. vdev->opmode == wlan_op_mode_ap) {
  591. dp_aggregate_interface_stats_based_on_peer_type(
  592. vdev, buf,
  593. DP_PEER_TYPE_MLO_LINK);
  594. if (link_vdev_only)
  595. goto complete;
  596. /* Aggregate stats from partner vdevs */
  597. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  598. dp_mlo_aggr_ptnr_iface_stats_mlo_links,
  599. buf,
  600. DP_MOD_ID_GENERIC_STATS,
  601. DP_LINK_VDEV_ITER,
  602. DP_VDEV_ITERATE_SKIP_SELF);
  603. } else {
  604. dp_aggregate_interface_stats(vdev, buf);
  605. if (link_vdev_only)
  606. goto complete;
  607. /* Aggregate stats from partner vdevs */
  608. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  609. dp_mlo_aggr_ptnr_iface_stats, buf,
  610. DP_MOD_ID_GENERIC_STATS,
  611. DP_LINK_VDEV_ITER,
  612. DP_VDEV_ITERATE_SKIP_SELF);
  613. }
  614. /* Aggregate vdev stats from MLO ctx for detached MLO Links */
  615. dp_update_mlo_ctxt_stats(buf, &vdev_be->mlo_dev_ctxt->stats);
  616. complete:
  617. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  618. return ret;
  619. }
  620. QDF_STATUS
  621. dp_get_interface_stats_be(struct cdp_soc_t *soc_hdl,
  622. uint8_t vdev_id,
  623. void *buf,
  624. bool is_aggregate)
  625. {
  626. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  627. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  628. DP_MOD_ID_GENERIC_STATS);
  629. if (!vdev)
  630. return QDF_STATUS_E_FAILURE;
  631. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  632. vdev->opmode == wlan_op_mode_ap) {
  633. dp_aggregate_interface_stats_based_on_peer_type(
  634. vdev, buf,
  635. DP_PEER_TYPE_LEGACY);
  636. } else {
  637. dp_aggregate_interface_stats(vdev, buf);
  638. }
  639. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  640. return QDF_STATUS_SUCCESS;
  641. }
  642. #endif
  643. static struct cdp_mlo_ops dp_mlo_ops = {
  644. .mlo_soc_setup = dp_mlo_soc_setup,
  645. .mlo_soc_teardown = dp_mlo_soc_teardown,
  646. .mlo_setup_complete = dp_mlo_setup_complete,
  647. .mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
  648. .mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
  649. .mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
  650. .mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
  651. .mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
  652. #ifdef CONFIG_MLO_SINGLE_DEV
  653. .mlo_get_mld_vdev_stats = dp_mlo_get_mld_vdev_stats,
  654. #endif
  655. };
  656. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  657. struct cdp_soc_attach_params *params)
  658. {
  659. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  660. if (!params->mlo_enabled) {
  661. dp_warn("MLO not enabled on SOC");
  662. return;
  663. }
  664. be_soc->mlo_chip_id = params->mlo_chip_id;
  665. be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
  666. be_soc->mlo_enabled = 1;
  667. soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
  668. }
  669. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  670. {
  671. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  672. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  673. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  674. uint8_t link_id;
  675. if (!be_soc->mlo_enabled)
  676. return;
  677. if (!ml_ctxt)
  678. return;
  679. link_id = be_pdev->mlo_link_id;
  680. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
  681. if (!ml_ctxt->link_to_pdev_map[link_id])
  682. ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
  683. else
  684. dp_alert("Attempt to update existing map for link %u",
  685. link_id);
  686. }
  687. }
  688. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  689. {
  690. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  691. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  692. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  693. uint8_t link_id;
  694. if (!be_soc->mlo_enabled)
  695. return;
  696. if (!ml_ctxt)
  697. return;
  698. link_id = be_pdev->mlo_link_id;
  699. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  700. ml_ctxt->link_to_pdev_map[link_id] = NULL;
  701. }
  702. static struct dp_pdev_be *
  703. dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
  704. {
  705. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  706. return ml_ctxt->link_to_pdev_map[link_id];
  707. return NULL;
  708. }
  709. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  710. struct cdp_pdev_attach_params *params)
  711. {
  712. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
  713. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  714. if (!be_soc->mlo_enabled) {
  715. dp_info("MLO not enabled on SOC");
  716. return;
  717. }
  718. be_pdev->mlo_link_id = params->mlo_link_id;
  719. }
  720. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  721. struct dp_peer *peer,
  722. uint16_t peer_id)
  723. {
  724. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  725. struct dp_mlo_ctxt *mlo_ctxt = NULL;
  726. bool is_ml_peer_id =
  727. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  728. uint8_t chip_id;
  729. struct dp_soc *temp_soc;
  730. /* for non ML peer dont map on partner chips*/
  731. if (!is_ml_peer_id)
  732. return;
  733. mlo_ctxt = be_soc->ml_ctxt;
  734. if (!mlo_ctxt)
  735. return;
  736. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  737. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  738. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  739. if (!temp_soc)
  740. continue;
  741. /* skip if this is current soc */
  742. if (temp_soc == soc)
  743. continue;
  744. dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
  745. }
  746. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  747. }
  748. qdf_export_symbol(dp_mlo_partner_chips_map);
  749. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  750. uint16_t peer_id)
  751. {
  752. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  753. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  754. bool is_ml_peer_id =
  755. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  756. uint8_t chip_id;
  757. struct dp_soc *temp_soc;
  758. if (!is_ml_peer_id)
  759. return;
  760. if (!mlo_ctxt)
  761. return;
  762. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  763. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  764. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  765. if (!temp_soc)
  766. continue;
  767. /* skip if this is current soc */
  768. if (temp_soc == soc)
  769. continue;
  770. dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
  771. }
  772. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  773. }
  774. qdf_export_symbol(dp_mlo_partner_chips_unmap);
  775. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  776. {
  777. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  778. return be_soc->mlo_chip_id;
  779. }
  780. qdf_export_symbol(dp_mlo_get_chip_id);
  781. struct dp_peer *
  782. dp_mlo_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  783. uint8_t *peer_mac_addr,
  784. int mac_addr_is_aligned,
  785. uint8_t vdev_id,
  786. uint8_t chip_id,
  787. enum dp_mod_id mod_id)
  788. {
  789. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  790. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  791. struct dp_soc *link_peer_soc = NULL;
  792. struct dp_peer *peer = NULL;
  793. if (!mlo_ctxt)
  794. return NULL;
  795. link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  796. if (!link_peer_soc)
  797. return NULL;
  798. peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
  799. mac_addr_is_aligned, vdev_id,
  800. mod_id);
  801. qdf_atomic_dec(&link_peer_soc->ref_count);
  802. return peer;
  803. }
  804. qdf_export_symbol(dp_mlo_link_peer_hash_find_by_chip_id);
  805. void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
  806. struct cdp_lro_hash_config *lro_hash)
  807. {
  808. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  809. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  810. if (!be_soc->mlo_enabled || !ml_ctxt)
  811. return dp_get_rx_hash_key_bytes(lro_hash);
  812. qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
  813. (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
  814. LRO_IPV4_SEED_ARR_SZ));
  815. qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
  816. (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
  817. LRO_IPV6_SEED_ARR_SZ));
  818. }
  819. struct dp_soc *
  820. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
  821. {
  822. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  823. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  824. struct dp_soc *replenish_soc;
  825. if (!be_soc->mlo_enabled || !mlo_ctxt)
  826. return soc;
  827. if (be_soc->mlo_chip_id == chip_id)
  828. return soc;
  829. replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  830. if (qdf_unlikely(!replenish_soc)) {
  831. dp_alert("replenish SOC is NULL");
  832. qdf_assert_always(0);
  833. }
  834. return replenish_soc;
  835. }
  836. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
  837. {
  838. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  839. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  840. if (!be_soc->mlo_enabled || !mlo_ctxt)
  841. return 1;
  842. return mlo_ctxt->ml_soc_cnt;
  843. }
  844. struct dp_soc *
  845. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
  846. {
  847. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  848. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  849. struct dp_soc *partner_soc = NULL;
  850. uint8_t chip_id;
  851. if (!be_soc->mlo_enabled || !mlo_ctxt)
  852. return soc;
  853. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  854. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  855. if (!partner_soc)
  856. continue;
  857. if (partner_soc->idle_link_bm_id == idle_bm_id)
  858. return partner_soc;
  859. }
  860. return NULL;
  861. }
  862. #ifdef WLAN_MLO_MULTI_CHIP
  863. static void dp_print_mlo_partner_list(struct dp_vdev_be *be_vdev,
  864. struct dp_vdev *partner_vdev,
  865. void *arg)
  866. {
  867. struct dp_vdev_be *partner_vdev_be = NULL;
  868. struct dp_soc_be *partner_soc_be = NULL;
  869. partner_vdev_be = dp_get_be_vdev_from_dp_vdev(partner_vdev);
  870. partner_soc_be = dp_get_be_soc_from_dp_soc(partner_vdev->pdev->soc);
  871. DP_PRINT_STATS("is_bridge_vap = %s, mcast_primary = %s, vdev_id = %d, pdev_id = %d, chip_id = %d",
  872. partner_vdev->is_bridge_vdev ? "true" : "false",
  873. partner_vdev_be->mcast_primary ? "true" : "false",
  874. partner_vdev->vdev_id,
  875. partner_vdev->pdev->pdev_id,
  876. partner_soc_be->mlo_chip_id);
  877. }
  878. void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  879. struct dp_vdev_be *be_vdev,
  880. dp_ptnr_vdev_iter_func func,
  881. void *arg,
  882. enum dp_mod_id mod_id,
  883. uint8_t type,
  884. bool include_self_vdev)
  885. {
  886. int i = 0;
  887. int j = 0;
  888. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  889. struct dp_vdev *self_vdev = &be_vdev->vdev;
  890. if (type < DP_LINK_VDEV_ITER || type > DP_ALL_VDEV_ITER) {
  891. dp_err("invalid iterate type");
  892. return;
  893. }
  894. if (!be_vdev->mlo_dev_ctxt) {
  895. if (!include_self_vdev)
  896. return;
  897. (*func)(be_vdev, self_vdev, arg);
  898. }
  899. for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
  900. IS_LINK_VDEV_ITER_REQUIRED(type); i++) {
  901. struct dp_soc *ptnr_soc =
  902. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  903. if (!ptnr_soc)
  904. continue;
  905. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  906. struct dp_vdev *ptnr_vdev;
  907. ptnr_vdev = dp_vdev_get_ref_by_id(
  908. ptnr_soc,
  909. be_vdev->mlo_dev_ctxt->vdev_list[i][j],
  910. mod_id);
  911. if (!ptnr_vdev)
  912. continue;
  913. if ((ptnr_vdev == self_vdev) && (!include_self_vdev)) {
  914. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  915. ptnr_vdev,
  916. mod_id);
  917. continue;
  918. }
  919. (*func)(be_vdev, ptnr_vdev, arg);
  920. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  921. ptnr_vdev,
  922. mod_id);
  923. }
  924. }
  925. for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
  926. IS_BRIDGE_VDEV_ITER_REQUIRED(type); i++) {
  927. struct dp_soc *ptnr_soc =
  928. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  929. if (!ptnr_soc)
  930. continue;
  931. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  932. struct dp_vdev *bridge_vdev;
  933. bridge_vdev = dp_vdev_get_ref_by_id(
  934. ptnr_soc,
  935. be_vdev->mlo_dev_ctxt->bridge_vdev[i][j],
  936. mod_id);
  937. if (!bridge_vdev)
  938. continue;
  939. if ((bridge_vdev == self_vdev) &&
  940. (!include_self_vdev)) {
  941. dp_vdev_unref_delete(
  942. bridge_vdev->pdev->soc,
  943. bridge_vdev,
  944. mod_id);
  945. continue;
  946. }
  947. (*func)(be_vdev, bridge_vdev, arg);
  948. dp_vdev_unref_delete(bridge_vdev->pdev->soc,
  949. bridge_vdev,
  950. mod_id);
  951. }
  952. }
  953. }
  954. qdf_export_symbol(dp_mlo_iter_ptnr_vdev);
  955. void dp_mlo_debug_print_ptnr_info(struct dp_vdev *vdev)
  956. {
  957. struct dp_vdev_be *be_vdev = NULL;
  958. struct dp_soc_be *be_soc = NULL;
  959. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  960. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  961. DP_PRINT_STATS("self vdev is_bridge_vap = %s, mcast_primary = %s, vdev = %d, pdev_id = %d, chip_id = %d",
  962. vdev->is_bridge_vdev ? "true" : "false",
  963. be_vdev->mcast_primary ? "true" : "false",
  964. vdev->vdev_id,
  965. vdev->pdev->pdev_id,
  966. dp_mlo_get_chip_id(vdev->pdev->soc));
  967. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  968. dp_print_mlo_partner_list,
  969. NULL, DP_MOD_ID_GENERIC_STATS,
  970. DP_ALL_VDEV_ITER,
  971. DP_VDEV_ITERATE_SKIP_SELF);
  972. }
  973. #endif
  974. #ifdef WLAN_MCAST_MLO
  975. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  976. struct dp_vdev_be *be_vdev,
  977. enum dp_mod_id mod_id)
  978. {
  979. int i = 0;
  980. int j = 0;
  981. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  982. struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
  983. if (!be_vdev->mlo_dev_ctxt) {
  984. return NULL;
  985. }
  986. if (be_vdev->mcast_primary) {
  987. if (dp_vdev_get_ref((struct dp_soc *)be_soc, vdev, mod_id) !=
  988. QDF_STATUS_SUCCESS)
  989. return NULL;
  990. return vdev;
  991. }
  992. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  993. struct dp_soc *ptnr_soc =
  994. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  995. if (!ptnr_soc)
  996. continue;
  997. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  998. struct dp_vdev *ptnr_vdev = NULL;
  999. struct dp_vdev_be *be_ptnr_vdev = NULL;
  1000. ptnr_vdev = dp_vdev_get_ref_by_id(
  1001. ptnr_soc,
  1002. be_vdev->mlo_dev_ctxt->vdev_list[i][j],
  1003. mod_id);
  1004. if (!ptnr_vdev)
  1005. continue;
  1006. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  1007. if (be_ptnr_vdev->mcast_primary)
  1008. return ptnr_vdev;
  1009. dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
  1010. &be_ptnr_vdev->vdev,
  1011. mod_id);
  1012. }
  1013. }
  1014. return NULL;
  1015. }
  1016. qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
  1017. #endif
  1018. /**
  1019. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  1020. * @be_soc: dp_soc_be pointer
  1021. * @func: Function to be called for each soc
  1022. * @arg: context to be passed to the callback
  1023. *
  1024. * Return: true if mlo is enabled, false if mlo is disabled
  1025. */
  1026. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  1027. void *arg)
  1028. {
  1029. int i = 0;
  1030. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1031. if (!be_soc->mlo_enabled || !be_soc->ml_ctxt)
  1032. return false;
  1033. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1034. struct dp_soc *ptnr_soc =
  1035. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1036. if (!ptnr_soc)
  1037. continue;
  1038. (*func)(ptnr_soc, arg, i);
  1039. }
  1040. return true;
  1041. }
  1042. qdf_export_symbol(dp_mlo_iter_ptnr_soc);
  1043. static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
  1044. {
  1045. struct dp_soc *soc;
  1046. struct dp_pdev *pdev;
  1047. struct dp_soc_be *be_soc;
  1048. uint32_t mlo_offset;
  1049. pdev = &be_pdev->pdev;
  1050. soc = pdev->soc;
  1051. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1052. mlo_offset = be_soc->mlo_tstamp_offset;
  1053. return mlo_offset;
  1054. }
  1055. int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
  1056. uint8_t hw_link_id)
  1057. {
  1058. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1059. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  1060. struct dp_pdev_be *be_pdev;
  1061. int32_t delta_tsf2_mlo_offset;
  1062. int32_t mlo_offset, delta_tsf2;
  1063. if (!ml_ctxt)
  1064. return 0;
  1065. be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
  1066. if (!be_pdev)
  1067. return 0;
  1068. mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
  1069. delta_tsf2 = be_pdev->delta_tsf2;
  1070. delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
  1071. return delta_tsf2_mlo_offset;
  1072. }
  1073. int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
  1074. {
  1075. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1076. int32_t delta_tqm_mlo_offset;
  1077. int32_t mlo_offset, delta_tqm;
  1078. mlo_offset = be_soc->mlo_tstamp_offset;
  1079. delta_tqm = be_soc->delta_tqm;
  1080. delta_tqm_mlo_offset = mlo_offset - delta_tqm;
  1081. return delta_tqm_mlo_offset;
  1082. }
  1083. #ifdef DP_UMAC_HW_RESET_SUPPORT
  1084. /**
  1085. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  1086. * @mlo_ctx: DP ML context handle
  1087. * @chip_id: chip id
  1088. * @set: flag indicating whether to set or clear the bit
  1089. *
  1090. * Return: void
  1091. */
  1092. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  1093. int chip_id, bool set)
  1094. {
  1095. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx =
  1096. &mlo_ctx->grp_umac_reset_ctx;
  1097. if (set)
  1098. qdf_atomic_set_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1099. else
  1100. qdf_atomic_clear_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1101. }
  1102. QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc)
  1103. {
  1104. struct dp_mlo_ctxt *mlo_ctx;
  1105. struct dp_soc_be *be_soc;
  1106. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1107. if (!be_soc) {
  1108. dp_umac_reset_err("null be_soc");
  1109. return QDF_STATUS_E_NULL_VALUE;
  1110. }
  1111. mlo_ctx = be_soc->ml_ctxt;
  1112. if (!mlo_ctx) {
  1113. /* This API can be called for non-MLO SOC as well. Hence, return
  1114. * the status as success when mlo_ctx is NULL.
  1115. */
  1116. return QDF_STATUS_SUCCESS;
  1117. }
  1118. dp_umac_reset_update_partner_map(mlo_ctx, be_soc->mlo_chip_id, false);
  1119. return QDF_STATUS_SUCCESS;
  1120. }
  1121. /**
  1122. * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
  1123. * @soc: dp soc handle
  1124. *
  1125. * Return: void
  1126. */
  1127. void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc)
  1128. {
  1129. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1130. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1131. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1132. if (!mlo_ctx) {
  1133. dp_umac_reset_alert("Umac reset was handled on soc %pK", soc);
  1134. return;
  1135. }
  1136. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1137. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1138. grp_umac_reset_ctx->umac_reset_in_progress = false;
  1139. grp_umac_reset_ctx->is_target_recovery = false;
  1140. grp_umac_reset_ctx->response_map = 0;
  1141. grp_umac_reset_ctx->request_map = 0;
  1142. grp_umac_reset_ctx->initiator_chip_id = 0;
  1143. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1144. dp_umac_reset_alert("Umac reset was handled on mlo group ctxt %pK",
  1145. mlo_ctx);
  1146. }
  1147. /**
  1148. * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
  1149. * @soc: dp soc handle
  1150. * @umac_reset_ctx: Umac reset context
  1151. * @rx_event: Rx event received
  1152. * @is_target_recovery: Flag to indicate if it is triggered for target recovery
  1153. *
  1154. * Return: status
  1155. */
  1156. QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
  1157. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1158. enum umac_reset_rx_event rx_event,
  1159. bool is_target_recovery)
  1160. {
  1161. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1162. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1163. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1164. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1165. if (!mlo_ctx)
  1166. return dp_umac_reset_validate_n_update_state_machine_on_rx(
  1167. umac_reset_ctx, rx_event,
  1168. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1169. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1170. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1171. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1172. if (grp_umac_reset_ctx->umac_reset_in_progress) {
  1173. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1174. return QDF_STATUS_E_INVAL;
  1175. }
  1176. status = dp_umac_reset_validate_n_update_state_machine_on_rx(
  1177. umac_reset_ctx, rx_event,
  1178. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1179. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1180. if (status != QDF_STATUS_SUCCESS) {
  1181. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1182. return status;
  1183. }
  1184. grp_umac_reset_ctx->umac_reset_in_progress = true;
  1185. grp_umac_reset_ctx->is_target_recovery = is_target_recovery;
  1186. /* We don't wait for the 'Umac trigger' message from all socs */
  1187. grp_umac_reset_ctx->request_map = grp_umac_reset_ctx->partner_map;
  1188. grp_umac_reset_ctx->response_map = grp_umac_reset_ctx->partner_map;
  1189. grp_umac_reset_ctx->initiator_chip_id = dp_mlo_get_chip_id(soc);
  1190. grp_umac_reset_ctx->umac_reset_count++;
  1191. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1192. return QDF_STATUS_SUCCESS;
  1193. }
  1194. /**
  1195. * dp_umac_reset_handle_action_cb() - Function to call action callback
  1196. * @soc: dp soc handle
  1197. * @umac_reset_ctx: Umac reset context
  1198. * @action: Action to call the callback for
  1199. *
  1200. * Return: QDF_STATUS status
  1201. */
  1202. QDF_STATUS
  1203. dp_umac_reset_handle_action_cb(struct dp_soc *soc,
  1204. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1205. enum umac_reset_action action)
  1206. {
  1207. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1208. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1209. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1210. if (!mlo_ctx) {
  1211. dp_umac_reset_debug("MLO context is Null");
  1212. goto handle;
  1213. }
  1214. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1215. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1216. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1217. &grp_umac_reset_ctx->request_map);
  1218. dp_umac_reset_debug("partner_map %u request_map %u",
  1219. grp_umac_reset_ctx->partner_map,
  1220. grp_umac_reset_ctx->request_map);
  1221. /* This logic is needed for synchronization between mlo socs */
  1222. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->request_map)
  1223. != grp_umac_reset_ctx->partner_map) {
  1224. struct hif_softc *hif_sc = HIF_GET_SOFTC(soc->hif_handle);
  1225. struct hif_umac_reset_ctx *hif_umac_reset_ctx;
  1226. if (!hif_sc) {
  1227. hif_err("scn is null");
  1228. qdf_assert_always(0);
  1229. return QDF_STATUS_E_FAILURE;
  1230. }
  1231. hif_umac_reset_ctx = &hif_sc->umac_reset_ctx;
  1232. /* Mark the action as pending */
  1233. umac_reset_ctx->pending_action = action;
  1234. /* Reschedule the tasklet and exit */
  1235. tasklet_hi_schedule(&hif_umac_reset_ctx->intr_tq);
  1236. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1237. return QDF_STATUS_SUCCESS;
  1238. }
  1239. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1240. umac_reset_ctx->pending_action = UMAC_RESET_ACTION_NONE;
  1241. handle:
  1242. if (!umac_reset_ctx->rx_actions.cb[action]) {
  1243. dp_umac_reset_err("rx callback is NULL");
  1244. return QDF_STATUS_E_FAILURE;
  1245. }
  1246. return umac_reset_ctx->rx_actions.cb[action](soc);
  1247. }
  1248. /**
  1249. * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
  1250. * @umac_reset_ctx: UMAC reset context
  1251. * @tx_cmd: Tx command to be posted
  1252. *
  1253. * Return: QDF status of operation
  1254. */
  1255. QDF_STATUS
  1256. dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1257. enum umac_reset_tx_cmd tx_cmd)
  1258. {
  1259. struct dp_soc *soc = container_of(umac_reset_ctx, struct dp_soc,
  1260. umac_reset_ctx);
  1261. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1262. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1263. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1264. if (!mlo_ctx) {
  1265. dp_umac_reset_post_tx_cmd_via_shmem(soc, &tx_cmd, 0);
  1266. return QDF_STATUS_SUCCESS;
  1267. }
  1268. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1269. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1270. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1271. &grp_umac_reset_ctx->response_map);
  1272. /* This logic is needed for synchronization between mlo socs */
  1273. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->response_map)
  1274. != grp_umac_reset_ctx->partner_map) {
  1275. dp_umac_reset_debug(
  1276. "Response(s) pending : expected map %u current map %u",
  1277. grp_umac_reset_ctx->partner_map,
  1278. grp_umac_reset_ctx->response_map);
  1279. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1280. return QDF_STATUS_SUCCESS;
  1281. }
  1282. dp_umac_reset_debug(
  1283. "All responses received: expected map %u current map %u",
  1284. grp_umac_reset_ctx->partner_map,
  1285. grp_umac_reset_ctx->response_map);
  1286. grp_umac_reset_ctx->response_map = 0;
  1287. grp_umac_reset_ctx->request_map = 0;
  1288. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1289. dp_mlo_iter_ptnr_soc(be_soc, &dp_umac_reset_post_tx_cmd_via_shmem,
  1290. &tx_cmd);
  1291. if (tx_cmd == UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE)
  1292. dp_umac_reset_complete_umac_recovery(soc);
  1293. return QDF_STATUS_SUCCESS;
  1294. }
  1295. /**
  1296. * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
  1297. * @soc: dp soc handle
  1298. *
  1299. * Return: true if the soc is initiator or false otherwise
  1300. */
  1301. bool dp_umac_reset_initiator_check(struct dp_soc *soc)
  1302. {
  1303. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1304. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1305. if (!mlo_ctx)
  1306. return true;
  1307. return (mlo_ctx->grp_umac_reset_ctx.initiator_chip_id ==
  1308. dp_mlo_get_chip_id(soc));
  1309. }
  1310. /**
  1311. * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
  1312. * @soc: dp soc handle
  1313. *
  1314. * Return: true if the session is for target recovery or false otherwise
  1315. */
  1316. bool dp_umac_reset_target_recovery_check(struct dp_soc *soc)
  1317. {
  1318. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1319. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1320. if (!mlo_ctx)
  1321. return false;
  1322. return mlo_ctx->grp_umac_reset_ctx.is_target_recovery;
  1323. }
  1324. /**
  1325. * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
  1326. * @soc: dp soc handle
  1327. *
  1328. * Return: true if the soc is ignored or false otherwise
  1329. */
  1330. bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc)
  1331. {
  1332. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1333. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1334. if (!mlo_ctx)
  1335. return false;
  1336. return !qdf_atomic_test_bit(dp_mlo_get_chip_id(soc),
  1337. &mlo_ctx->grp_umac_reset_ctx.partner_map);
  1338. }
  1339. QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
  1340. {
  1341. struct dp_mlo_ctxt *mlo_ctx;
  1342. struct dp_soc_be *be_soc;
  1343. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1344. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1345. if (!be_soc) {
  1346. dp_umac_reset_err("null be_soc");
  1347. return QDF_STATUS_E_NULL_VALUE;
  1348. }
  1349. mlo_ctx = be_soc->ml_ctxt;
  1350. if (!mlo_ctx) {
  1351. /* This API can be called for non-MLO SOC as well. Hence, return
  1352. * the status as success when mlo_ctx is NULL.
  1353. */
  1354. return QDF_STATUS_SUCCESS;
  1355. }
  1356. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1357. DP_UMAC_RESET_PRINT_STATS("MLO UMAC RESET stats\n"
  1358. "\t\tPartner map :%x\n"
  1359. "\t\tRequest map :%x\n"
  1360. "\t\tResponse map :%x\n"
  1361. "\t\tIs target recovery :%d\n"
  1362. "\t\tIs Umac reset inprogress :%d\n"
  1363. "\t\tNumber of UMAC reset triggered:%d\n"
  1364. "\t\tInitiator chip ID :%d\n",
  1365. grp_umac_reset_ctx->partner_map,
  1366. grp_umac_reset_ctx->request_map,
  1367. grp_umac_reset_ctx->response_map,
  1368. grp_umac_reset_ctx->is_target_recovery,
  1369. grp_umac_reset_ctx->umac_reset_in_progress,
  1370. grp_umac_reset_ctx->umac_reset_count,
  1371. grp_umac_reset_ctx->initiator_chip_id);
  1372. return QDF_STATUS_SUCCESS;
  1373. }
  1374. enum cdp_umac_reset_state
  1375. dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc)
  1376. {
  1377. struct dp_soc_umac_reset_ctx *umac_reset_ctx;
  1378. struct dp_soc *soc = (struct dp_soc *)psoc;
  1379. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1380. struct dp_soc_be *be_soc = NULL;
  1381. struct dp_mlo_ctxt *mlo_ctx = NULL;
  1382. enum cdp_umac_reset_state umac_reset_is_inprogress;
  1383. if (!soc) {
  1384. dp_umac_reset_err("DP SOC is null");
  1385. return CDP_UMAC_RESET_INVALID_STATE;
  1386. }
  1387. umac_reset_ctx = &soc->umac_reset_ctx;
  1388. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1389. if (be_soc)
  1390. mlo_ctx = be_soc->ml_ctxt;
  1391. if (mlo_ctx) {
  1392. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1393. umac_reset_is_inprogress =
  1394. grp_umac_reset_ctx->umac_reset_in_progress;
  1395. } else {
  1396. umac_reset_is_inprogress = (umac_reset_ctx->current_state !=
  1397. UMAC_RESET_STATE_WAIT_FOR_TRIGGER);
  1398. }
  1399. if (umac_reset_is_inprogress)
  1400. return CDP_UMAC_RESET_IN_PROGRESS;
  1401. /* Check if the umac reset was in progress during the buffer
  1402. * window.
  1403. */
  1404. umac_reset_is_inprogress =
  1405. ((qdf_get_log_timestamp_usecs() -
  1406. umac_reset_ctx->ts.post_reset_complete_done) <=
  1407. (wlan_cfg_get_umac_reset_buffer_window_ms(soc->wlan_cfg_ctx) *
  1408. 1000));
  1409. return (umac_reset_is_inprogress ?
  1410. CDP_UMAC_RESET_IN_PROGRESS_DURING_BUFFER_WINDOW :
  1411. CDP_UMAC_RESET_NOT_IN_PROGRESS);
  1412. }
  1413. #endif
  1414. struct dp_soc *
  1415. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
  1416. {
  1417. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1418. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1419. struct dp_soc *partner_soc;
  1420. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1421. return soc;
  1422. if (be_soc->mlo_chip_id == chip_id)
  1423. return soc;
  1424. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1425. return partner_soc;
  1426. }