dp_mlo.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803
  1. /*
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <wlan_utility.h>
  17. #include <dp_internal.h>
  18. #include <dp_htt.h>
  19. #include <hal_be_api.h>
  20. #include "dp_mlo.h"
  21. #include <dp_be.h>
  22. #include <dp_be_rx.h>
  23. #include <dp_htt.h>
  24. #include <dp_internal.h>
  25. #include <wlan_cfg.h>
  26. #include <wlan_mlo_mgr_cmn.h>
  27. #include "dp_umac_reset.h"
  28. #define dp_aggregate_vdev_stats_for_unmapped_peers(_tgtobj, _srcobj) \
  29. DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj)
  30. #ifdef DP_UMAC_HW_RESET_SUPPORT
  31. /**
  32. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  33. * @mlo_ctx: mlo soc context
  34. * @chip_id: chip id
  35. * @set: flag indicating whether to set or clear the bit
  36. *
  37. * Return: void
  38. */
  39. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  40. int chip_id, bool set);
  41. #endif
  42. /**
  43. * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
  44. * @ctrl_ctxt: CDP control context
  45. *
  46. * Return: DP MLO context handle on success, NULL on failure
  47. */
  48. static struct cdp_mlo_ctxt *
  49. dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
  50. {
  51. struct dp_mlo_ctxt *mlo_ctxt =
  52. qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
  53. if (!mlo_ctxt) {
  54. dp_err("Failed to allocate DP MLO Context");
  55. return NULL;
  56. }
  57. mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
  58. if (dp_mlo_peer_find_hash_attach_be
  59. (mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
  60. dp_err("Failed to allocate peer hash");
  61. qdf_mem_free(mlo_ctxt);
  62. return NULL;
  63. }
  64. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
  65. (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
  66. LRO_IPV4_SEED_ARR_SZ));
  67. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
  68. (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
  69. LRO_IPV6_SEED_ARR_SZ));
  70. qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
  71. qdf_spinlock_create(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  72. dp_mlo_dev_ctxt_list_attach(mlo_ctxt);
  73. return dp_mlo_ctx_to_cdp(mlo_ctxt);
  74. }
  75. /**
  76. * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
  77. * @cdp_ml_ctxt: pointer to CDP DP MLO context
  78. *
  79. * Return: void
  80. */
  81. static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  82. {
  83. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  84. if (!cdp_ml_ctxt)
  85. return;
  86. qdf_spinlock_destroy(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  87. qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
  88. dp_mlo_dev_ctxt_list_detach(mlo_ctxt);
  89. dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
  90. qdf_mem_free(mlo_ctxt);
  91. }
  92. /**
  93. * dp_mlo_set_soc_by_chip_id() - Add DP soc to ML context soc list
  94. * @ml_ctxt: DP ML context handle
  95. * @soc: DP soc handle
  96. * @chip_id: MLO chip id
  97. *
  98. * Return: void
  99. */
  100. static void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  101. struct dp_soc *soc,
  102. uint8_t chip_id)
  103. {
  104. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  105. ml_ctxt->ml_soc_list[chip_id] = soc;
  106. /* The same API is called during soc_attach and soc_detach
  107. * soc parameter is non-null or null accordingly.
  108. */
  109. if (soc)
  110. ml_ctxt->ml_soc_cnt++;
  111. else
  112. ml_ctxt->ml_soc_cnt--;
  113. dp_umac_reset_update_partner_map(ml_ctxt, chip_id, !!soc);
  114. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  115. }
  116. struct dp_soc*
  117. dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  118. uint8_t chip_id)
  119. {
  120. struct dp_soc *soc = NULL;
  121. if (!ml_ctxt) {
  122. dp_warn("MLO context not created, MLO not enabled");
  123. return NULL;
  124. }
  125. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  126. soc = ml_ctxt->ml_soc_list[chip_id];
  127. if (!soc) {
  128. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  129. return NULL;
  130. }
  131. qdf_atomic_inc(&soc->ref_count);
  132. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  133. return soc;
  134. }
  135. static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
  136. struct dp_soc_be *be_soc)
  137. {
  138. uint8_t i;
  139. struct dp_soc *partner_soc;
  140. struct dp_soc_be *be_partner_soc;
  141. uint8_t pool_id;
  142. QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
  143. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  144. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  145. if (!partner_soc) {
  146. dp_err("partner_soc is NULL");
  147. continue;
  148. }
  149. be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
  150. for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
  151. qdf_status =
  152. dp_hw_cookie_conversion_init
  153. (be_soc,
  154. &be_partner_soc->rx_cc_ctx[pool_id]);
  155. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  156. dp_alert("MLO partner soc RX CC init failed");
  157. return qdf_status;
  158. }
  159. }
  160. }
  161. return qdf_status;
  162. }
  163. static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg, int chip_id)
  164. {
  165. uint8_t i = 0;
  166. uint8_t cpu = 0;
  167. uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  168. uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  169. uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  170. uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  171. /* Save the current interrupt mask and disable the interrupts */
  172. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  173. rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
  174. rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
  175. rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
  176. reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
  177. soc->intr_ctx[i].rx_ring_mask = 0;
  178. soc->intr_ctx[i].rx_err_ring_mask = 0;
  179. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  180. soc->intr_ctx[i].reo_status_ring_mask = 0;
  181. }
  182. /* make sure dp_service_srngs not running on any of the CPU */
  183. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  184. while (qdf_atomic_test_bit(cpu,
  185. &soc->service_rings_running))
  186. ;
  187. }
  188. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  189. uint8_t ring = 0;
  190. uint32_t num_entries = 0;
  191. hal_ring_handle_t hal_ring_hdl = NULL;
  192. uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
  193. soc->wlan_cfg_ctx, i);
  194. uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
  195. soc->wlan_cfg_ctx, i);
  196. uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  197. soc->wlan_cfg_ctx, i);
  198. if (rx_mask) {
  199. /* iterate through each reo ring and process the buf */
  200. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  201. if (!(rx_mask & (1 << ring)))
  202. continue;
  203. hal_ring_hdl =
  204. soc->reo_dest_ring[ring].hal_srng;
  205. num_entries = hal_srng_get_num_entries(
  206. soc->hal_soc,
  207. hal_ring_hdl);
  208. dp_rx_process_be(&soc->intr_ctx[i],
  209. hal_ring_hdl,
  210. ring,
  211. num_entries);
  212. }
  213. }
  214. /* Process REO Exception ring */
  215. if (rx_err_mask) {
  216. hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  217. num_entries = hal_srng_get_num_entries(
  218. soc->hal_soc,
  219. hal_ring_hdl);
  220. dp_rx_err_process(&soc->intr_ctx[i], soc,
  221. hal_ring_hdl, num_entries);
  222. }
  223. /* Process Rx WBM release ring */
  224. if (rx_wbm_rel_mask) {
  225. hal_ring_hdl = soc->rx_rel_ring.hal_srng;
  226. num_entries = hal_srng_get_num_entries(
  227. soc->hal_soc,
  228. hal_ring_hdl);
  229. dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
  230. hal_ring_hdl, num_entries);
  231. }
  232. }
  233. /* restore the interrupt mask */
  234. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  235. soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
  236. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
  237. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
  238. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
  239. }
  240. }
  241. static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
  242. struct cdp_mlo_ctxt *cdp_ml_ctxt)
  243. {
  244. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  245. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  246. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  247. uint8_t pdev_id;
  248. if (!cdp_ml_ctxt)
  249. return;
  250. be_soc->ml_ctxt = mlo_ctxt;
  251. for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
  252. if (soc->pdev_list[pdev_id])
  253. dp_mlo_update_link_to_pdev_map(soc,
  254. soc->pdev_list[pdev_id]);
  255. }
  256. dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
  257. }
  258. static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
  259. struct cdp_mlo_ctxt *cdp_ml_ctxt,
  260. bool is_force_down)
  261. {
  262. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  263. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  264. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  265. if (!cdp_ml_ctxt)
  266. return;
  267. /* During the teardown drain the Rx buffers if any exist in the ring */
  268. dp_mlo_iter_ptnr_soc(be_soc,
  269. dp_mlo_soc_drain_rx_buf,
  270. NULL);
  271. dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
  272. be_soc->ml_ctxt = NULL;
  273. }
  274. static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  275. {
  276. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  277. int i;
  278. struct dp_soc *soc;
  279. struct dp_soc_be *be_soc;
  280. QDF_STATUS qdf_status;
  281. if (!cdp_ml_ctxt)
  282. return;
  283. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  284. soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  285. if (!soc)
  286. continue;
  287. be_soc = dp_get_be_soc_from_dp_soc(soc);
  288. qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
  289. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  290. dp_alert("MLO partner SOC Rx desc CC init failed");
  291. qdf_assert_always(0);
  292. }
  293. }
  294. }
  295. static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
  296. uint8_t pdev_id, uint64_t delta_tsf2)
  297. {
  298. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  299. struct dp_pdev *pdev;
  300. struct dp_pdev_be *be_pdev;
  301. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  302. pdev_id);
  303. if (!pdev) {
  304. dp_err("pdev is NULL for pdev_id %u", pdev_id);
  305. return;
  306. }
  307. be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  308. be_pdev->delta_tsf2 = delta_tsf2;
  309. }
  310. static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
  311. uint64_t delta_tqm)
  312. {
  313. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  314. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  315. be_soc->delta_tqm = delta_tqm;
  316. }
  317. static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
  318. uint64_t offset)
  319. {
  320. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  321. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  322. be_soc->mlo_tstamp_offset = offset;
  323. }
  324. #ifdef CONFIG_MLO_SINGLE_DEV
  325. /**
  326. * dp_aggregate_vdev_basic_stats() - aggregate vdev basic stats
  327. * @tgt_vdev_stats: target vdev buffer
  328. * @src_vdev_stats: source vdev buffer
  329. *
  330. * return: void
  331. */
  332. static inline
  333. void dp_aggregate_vdev_basic_stats(
  334. struct cdp_vdev_stats *tgt_vdev_stats,
  335. struct dp_vdev_stats *src_vdev_stats)
  336. {
  337. DP_UPDATE_BASIC_STATS(tgt_vdev_stats, src_vdev_stats);
  338. }
  339. /**
  340. * dp_aggregate_vdev_ingress_stats() - aggregate vdev ingress stats
  341. * @tgt_vdev_stats: target vdev buffer
  342. * @src_vdev_stats: source vdev buffer
  343. * @xmit_type: xmit type of packet - MLD/Link
  344. *
  345. * return: void
  346. */
  347. static inline
  348. void dp_aggregate_vdev_ingress_stats(
  349. struct cdp_vdev_stats *tgt_vdev_stats,
  350. struct dp_vdev_stats *src_vdev_stats,
  351. enum dp_pkt_xmit_type xmit_type)
  352. {
  353. /* Aggregate vdev ingress stats */
  354. DP_UPDATE_LINK_VDEV_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats,
  355. xmit_type);
  356. }
  357. /**
  358. * dp_aggregate_all_vdev_stats() - aggregate vdev ingress and unmap peer stats
  359. * @tgt_vdev_stats: target vdev buffer
  360. * @src_vdev_stats: source vdev buffer
  361. * @xmit_type: xmit type of packet - MLD/Link
  362. *
  363. * return: void
  364. */
  365. static inline
  366. void dp_aggregate_all_vdev_stats(
  367. struct cdp_vdev_stats *tgt_vdev_stats,
  368. struct dp_vdev_stats *src_vdev_stats,
  369. enum dp_pkt_xmit_type xmit_type)
  370. {
  371. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats,
  372. xmit_type);
  373. dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
  374. src_vdev_stats);
  375. }
  376. /**
  377. * dp_mlo_vdev_stats_aggr_bridge_vap() - aggregate bridge vdev stats
  378. * @be_vdev: Dp Vdev handle
  379. * @bridge_vdev: Dp vdev handle for bridge vdev
  380. * @arg: buffer for target vdev stats
  381. * @xmit_type: xmit type of packet - MLD/Link
  382. *
  383. * return: void
  384. */
  385. static
  386. void dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be *be_vdev,
  387. struct dp_vdev *bridge_vdev,
  388. void *arg,
  389. enum dp_pkt_xmit_type xmit_type)
  390. {
  391. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  392. struct dp_vdev_be *bridge_be_vdev = NULL;
  393. bridge_be_vdev = dp_get_be_vdev_from_dp_vdev(bridge_vdev);
  394. if (!bridge_be_vdev)
  395. return;
  396. dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_vdev->stats,
  397. xmit_type);
  398. dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
  399. (&bridge_be_vdev->mlo_stats));
  400. dp_vdev_iterate_peer(bridge_vdev, dp_update_vdev_stats, tgt_vdev_stats,
  401. DP_MOD_ID_GENERIC_STATS);
  402. }
  403. /**
  404. * dp_mlo_vdev_stats_aggr_bridge_vap_unified() - aggregate bridge vdev stats for
  405. * unified mode, all MLO and legacy packets are submitted to vdev
  406. * @be_vdev: Dp Vdev handle
  407. * @bridge_vdev: Dp vdev handle for bridge vdev
  408. * @arg: buffer for target vdev stats
  409. *
  410. * return: void
  411. */
  412. static
  413. void dp_mlo_vdev_stats_aggr_bridge_vap_unified(struct dp_vdev_be *be_vdev,
  414. struct dp_vdev *bridge_vdev,
  415. void *arg)
  416. {
  417. dp_mlo_vdev_stats_aggr_bridge_vap(be_vdev, bridge_vdev, arg,
  418. DP_XMIT_TOTAL);
  419. }
  420. /**
  421. * dp_mlo_vdev_stats_aggr_bridge_vap_mld() - aggregate bridge vdev stats for MLD
  422. * mode, all MLO packets are submitted to MLD
  423. * @be_vdev: Dp Vdev handle
  424. * @bridge_vdev: Dp vdev handle for bridge vdev
  425. * @arg: buffer for target vdev stats
  426. *
  427. * return: void
  428. */
  429. static
  430. void dp_mlo_vdev_stats_aggr_bridge_vap_mld(struct dp_vdev_be *be_vdev,
  431. struct dp_vdev *bridge_vdev,
  432. void *arg)
  433. {
  434. dp_mlo_vdev_stats_aggr_bridge_vap(be_vdev, bridge_vdev, arg,
  435. DP_XMIT_MLD);
  436. }
  437. /**
  438. * dp_aggregate_interface_stats_based_on_peer_type() - aggregate stats at
  439. * VDEV level based on peer type connected to vdev
  440. * @vdev: DP VDEV handle
  441. * @vdev_stats: target vdev stats pointer
  442. * @peer_type: type of peer - MLO Link or Legacy peer
  443. *
  444. * return: void
  445. */
  446. static
  447. void dp_aggregate_interface_stats_based_on_peer_type(
  448. struct dp_vdev *vdev,
  449. struct cdp_vdev_stats *vdev_stats,
  450. enum dp_peer_type peer_type)
  451. {
  452. struct cdp_vdev_stats *tgt_vdev_stats = NULL;
  453. struct dp_vdev_be *be_vdev = NULL;
  454. struct dp_soc_be *be_soc = NULL;
  455. if (!vdev || !vdev->pdev)
  456. return;
  457. tgt_vdev_stats = vdev_stats;
  458. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  459. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  460. if (!be_vdev)
  461. return;
  462. if (peer_type == DP_PEER_TYPE_LEGACY) {
  463. dp_aggregate_all_vdev_stats(tgt_vdev_stats,
  464. &vdev->stats, DP_XMIT_LINK);
  465. } else {
  466. if (be_vdev->mcast_primary) {
  467. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  468. dp_mlo_vdev_stats_aggr_bridge_vap_mld,
  469. (void *)vdev_stats,
  470. DP_MOD_ID_GENERIC_STATS,
  471. DP_BRIDGE_VDEV_ITER,
  472. DP_VDEV_ITERATE_SKIP_SELF);
  473. }
  474. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats,
  475. &vdev->stats, DP_XMIT_MLD);
  476. dp_aggregate_vdev_stats_for_unmapped_peers(
  477. tgt_vdev_stats,
  478. (&be_vdev->mlo_stats));
  479. }
  480. /* Aggregate associated peer stats */
  481. dp_vdev_iterate_specific_peer_type(vdev,
  482. dp_update_vdev_stats,
  483. vdev_stats,
  484. DP_MOD_ID_GENERIC_STATS,
  485. peer_type);
  486. }
  487. /**
  488. * dp_aggregate_interface_stats() - aggregate stats at VDEV level
  489. * @vdev: DP VDEV handle
  490. * @vdev_stats: target vdev stats pointer
  491. *
  492. * return: void
  493. */
  494. static
  495. void dp_aggregate_interface_stats(struct dp_vdev *vdev,
  496. struct cdp_vdev_stats *vdev_stats)
  497. {
  498. struct dp_vdev_be *be_vdev = NULL;
  499. struct dp_soc_be *be_soc = NULL;
  500. if (!vdev || !vdev->pdev)
  501. return;
  502. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  503. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  504. if (!be_vdev)
  505. return;
  506. if (be_vdev->mcast_primary) {
  507. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  508. dp_mlo_vdev_stats_aggr_bridge_vap_unified,
  509. (void *)vdev_stats, DP_MOD_ID_GENERIC_STATS,
  510. DP_BRIDGE_VDEV_ITER,
  511. DP_VDEV_ITERATE_SKIP_SELF);
  512. }
  513. dp_aggregate_vdev_stats_for_unmapped_peers(vdev_stats,
  514. (&be_vdev->mlo_stats));
  515. dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats,
  516. DP_XMIT_TOTAL);
  517. dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
  518. DP_MOD_ID_GENERIC_STATS);
  519. dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
  520. }
  521. /**
  522. * dp_mlo_aggr_ptnr_iface_stats() - aggregate mlo partner vdev stats
  523. * @be_vdev: vdev handle
  524. * @ptnr_vdev: partner vdev handle
  525. * @arg: target buffer for aggregation
  526. *
  527. * return: void
  528. */
  529. static
  530. void dp_mlo_aggr_ptnr_iface_stats(struct dp_vdev_be *be_vdev,
  531. struct dp_vdev *ptnr_vdev,
  532. void *arg)
  533. {
  534. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  535. dp_aggregate_interface_stats(ptnr_vdev, tgt_vdev_stats);
  536. }
  537. /**
  538. * dp_mlo_aggr_ptnr_iface_stats_mlo_links() - aggregate mlo partner vdev stats
  539. * based on peer type
  540. * @be_vdev: vdev handle
  541. * @ptnr_vdev: partner vdev handle
  542. * @arg: target buffer for aggregation
  543. *
  544. * return: void
  545. */
  546. static
  547. void dp_mlo_aggr_ptnr_iface_stats_mlo_links(
  548. struct dp_vdev_be *be_vdev,
  549. struct dp_vdev *ptnr_vdev,
  550. void *arg)
  551. {
  552. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  553. dp_aggregate_interface_stats_based_on_peer_type(ptnr_vdev,
  554. tgt_vdev_stats,
  555. DP_PEER_TYPE_MLO_LINK);
  556. }
  557. /**
  558. * dp_aggregate_sta_interface_stats() - for sta mode aggregate vdev stats from
  559. * all link peers
  560. * @soc: soc handle
  561. * @vdev: vdev handle
  562. * @buf: target buffer for aggregation
  563. *
  564. * return: QDF_STATUS
  565. */
  566. static QDF_STATUS
  567. dp_aggregate_sta_interface_stats(struct dp_soc *soc,
  568. struct dp_vdev *vdev,
  569. void *buf)
  570. {
  571. struct dp_peer *vap_bss_peer = NULL;
  572. struct dp_peer *mld_peer = NULL;
  573. struct dp_peer *link_peer = NULL;
  574. struct dp_mld_link_peers link_peers_info;
  575. uint8_t i = 0;
  576. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  577. vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
  578. DP_MOD_ID_GENERIC_STATS);
  579. if (!vap_bss_peer)
  580. return QDF_STATUS_E_FAILURE;
  581. mld_peer = DP_GET_MLD_PEER_FROM_PEER(vap_bss_peer);
  582. if (!mld_peer) {
  583. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  584. return QDF_STATUS_E_FAILURE;
  585. }
  586. dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, &link_peers_info,
  587. DP_MOD_ID_GENERIC_STATS);
  588. for (i = 0; i < link_peers_info.num_links; i++) {
  589. link_peer = link_peers_info.link_peers[i];
  590. dp_update_vdev_stats(soc, link_peer, buf);
  591. dp_aggregate_vdev_ingress_stats((struct cdp_vdev_stats *)buf,
  592. &link_peer->vdev->stats,
  593. DP_XMIT_TOTAL);
  594. dp_aggregate_vdev_basic_stats(
  595. (struct cdp_vdev_stats *)buf,
  596. &link_peer->vdev->stats);
  597. }
  598. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_GENERIC_STATS);
  599. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  600. return ret;
  601. }
  602. static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
  603. uint8_t vdev_id, void *buf,
  604. bool link_vdev_only)
  605. {
  606. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  607. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  608. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  609. DP_MOD_ID_GENERIC_STATS);
  610. struct dp_vdev_be *vdev_be = NULL;
  611. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  612. if (!vdev)
  613. return QDF_STATUS_E_FAILURE;
  614. vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  615. if (!vdev_be || !vdev_be->mlo_dev_ctxt) {
  616. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  617. return QDF_STATUS_E_FAILURE;
  618. }
  619. if (vdev->opmode == wlan_op_mode_sta) {
  620. ret = dp_aggregate_sta_interface_stats(soc, vdev, buf);
  621. goto complete;
  622. }
  623. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  624. vdev->opmode == wlan_op_mode_ap) {
  625. dp_aggregate_interface_stats_based_on_peer_type(
  626. vdev, buf,
  627. DP_PEER_TYPE_MLO_LINK);
  628. if (link_vdev_only)
  629. goto complete;
  630. /* Aggregate stats from partner vdevs */
  631. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  632. dp_mlo_aggr_ptnr_iface_stats_mlo_links,
  633. buf,
  634. DP_MOD_ID_GENERIC_STATS,
  635. DP_LINK_VDEV_ITER,
  636. DP_VDEV_ITERATE_SKIP_SELF);
  637. /* Aggregate vdev stats from MLO ctx for detached MLO Links */
  638. dp_update_mlo_link_vdev_ctxt_stats(buf,
  639. &vdev_be->mlo_dev_ctxt->stats,
  640. DP_XMIT_MLD);
  641. } else {
  642. dp_aggregate_interface_stats(vdev, buf);
  643. if (link_vdev_only)
  644. goto complete;
  645. /* Aggregate stats from partner vdevs */
  646. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  647. dp_mlo_aggr_ptnr_iface_stats, buf,
  648. DP_MOD_ID_GENERIC_STATS,
  649. DP_LINK_VDEV_ITER,
  650. DP_VDEV_ITERATE_SKIP_SELF);
  651. /* Aggregate vdev stats from MLO ctx for detached MLO Links */
  652. dp_update_mlo_link_vdev_ctxt_stats(buf,
  653. &vdev_be->mlo_dev_ctxt->stats,
  654. DP_XMIT_TOTAL);
  655. }
  656. complete:
  657. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  658. return ret;
  659. }
  660. QDF_STATUS
  661. dp_get_interface_stats_be(struct cdp_soc_t *soc_hdl,
  662. uint8_t vdev_id,
  663. void *buf,
  664. bool is_aggregate)
  665. {
  666. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  667. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  668. DP_MOD_ID_GENERIC_STATS);
  669. if (!vdev)
  670. return QDF_STATUS_E_FAILURE;
  671. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  672. vdev->opmode == wlan_op_mode_ap) {
  673. dp_aggregate_interface_stats_based_on_peer_type(
  674. vdev, buf,
  675. DP_PEER_TYPE_LEGACY);
  676. } else {
  677. dp_aggregate_interface_stats(vdev, buf);
  678. }
  679. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  680. return QDF_STATUS_SUCCESS;
  681. }
  682. #endif
  683. static struct cdp_mlo_ops dp_mlo_ops = {
  684. .mlo_soc_setup = dp_mlo_soc_setup,
  685. .mlo_soc_teardown = dp_mlo_soc_teardown,
  686. .mlo_setup_complete = dp_mlo_setup_complete,
  687. .mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
  688. .mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
  689. .mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
  690. .mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
  691. .mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
  692. #ifdef CONFIG_MLO_SINGLE_DEV
  693. .mlo_get_mld_vdev_stats = dp_mlo_get_mld_vdev_stats,
  694. #endif
  695. };
  696. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  697. struct cdp_soc_attach_params *params)
  698. {
  699. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  700. if (!params->mlo_enabled) {
  701. dp_warn("MLO not enabled on SOC");
  702. return;
  703. }
  704. be_soc->mlo_chip_id = params->mlo_chip_id;
  705. be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
  706. be_soc->mlo_enabled = 1;
  707. soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
  708. }
  709. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  710. {
  711. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  712. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  713. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  714. uint8_t link_id;
  715. if (!be_soc->mlo_enabled)
  716. return;
  717. if (!ml_ctxt)
  718. return;
  719. link_id = be_pdev->mlo_link_id;
  720. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
  721. if (!ml_ctxt->link_to_pdev_map[link_id])
  722. ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
  723. else
  724. dp_alert("Attempt to update existing map for link %u",
  725. link_id);
  726. }
  727. }
  728. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  729. {
  730. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  731. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  732. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  733. uint8_t link_id;
  734. if (!be_soc->mlo_enabled)
  735. return;
  736. if (!ml_ctxt)
  737. return;
  738. link_id = be_pdev->mlo_link_id;
  739. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  740. ml_ctxt->link_to_pdev_map[link_id] = NULL;
  741. }
  742. static struct dp_pdev_be *
  743. dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
  744. {
  745. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  746. return ml_ctxt->link_to_pdev_map[link_id];
  747. return NULL;
  748. }
  749. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  750. struct cdp_pdev_attach_params *params)
  751. {
  752. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
  753. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  754. if (!be_soc->mlo_enabled) {
  755. dp_info("MLO not enabled on SOC");
  756. return;
  757. }
  758. be_pdev->mlo_link_id = params->mlo_link_id;
  759. }
  760. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  761. struct dp_peer *peer,
  762. uint16_t peer_id)
  763. {
  764. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  765. struct dp_mlo_ctxt *mlo_ctxt = NULL;
  766. bool is_ml_peer_id =
  767. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  768. uint8_t chip_id;
  769. struct dp_soc *temp_soc;
  770. /* for non ML peer dont map on partner chips*/
  771. if (!is_ml_peer_id)
  772. return;
  773. mlo_ctxt = be_soc->ml_ctxt;
  774. if (!mlo_ctxt)
  775. return;
  776. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  777. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  778. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  779. if (!temp_soc)
  780. continue;
  781. /* skip if this is current soc */
  782. if (temp_soc == soc)
  783. continue;
  784. dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
  785. }
  786. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  787. }
  788. qdf_export_symbol(dp_mlo_partner_chips_map);
  789. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  790. uint16_t peer_id)
  791. {
  792. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  793. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  794. bool is_ml_peer_id =
  795. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  796. uint8_t chip_id;
  797. struct dp_soc *temp_soc;
  798. if (!is_ml_peer_id)
  799. return;
  800. if (!mlo_ctxt)
  801. return;
  802. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  803. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  804. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  805. if (!temp_soc)
  806. continue;
  807. /* skip if this is current soc */
  808. if (temp_soc == soc)
  809. continue;
  810. dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
  811. }
  812. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  813. }
  814. qdf_export_symbol(dp_mlo_partner_chips_unmap);
  815. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  816. {
  817. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  818. return be_soc->mlo_chip_id;
  819. }
  820. qdf_export_symbol(dp_mlo_get_chip_id);
  821. struct dp_peer *
  822. dp_mlo_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  823. uint8_t *peer_mac_addr,
  824. int mac_addr_is_aligned,
  825. uint8_t vdev_id,
  826. uint8_t chip_id,
  827. enum dp_mod_id mod_id)
  828. {
  829. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  830. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  831. struct dp_soc *link_peer_soc = NULL;
  832. struct dp_peer *peer = NULL;
  833. if (!mlo_ctxt)
  834. return NULL;
  835. link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  836. if (!link_peer_soc)
  837. return NULL;
  838. peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
  839. mac_addr_is_aligned, vdev_id,
  840. mod_id);
  841. qdf_atomic_dec(&link_peer_soc->ref_count);
  842. return peer;
  843. }
  844. qdf_export_symbol(dp_mlo_link_peer_hash_find_by_chip_id);
  845. void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
  846. struct cdp_lro_hash_config *lro_hash)
  847. {
  848. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  849. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  850. if (!be_soc->mlo_enabled || !ml_ctxt)
  851. return dp_get_rx_hash_key_bytes(lro_hash);
  852. qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
  853. (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
  854. LRO_IPV4_SEED_ARR_SZ));
  855. qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
  856. (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
  857. LRO_IPV6_SEED_ARR_SZ));
  858. }
  859. struct dp_soc *
  860. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
  861. {
  862. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  863. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  864. struct dp_soc *replenish_soc;
  865. if (!be_soc->mlo_enabled || !mlo_ctxt)
  866. return soc;
  867. if (be_soc->mlo_chip_id == chip_id)
  868. return soc;
  869. replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  870. if (qdf_unlikely(!replenish_soc)) {
  871. dp_alert("replenish SOC is NULL");
  872. qdf_assert_always(0);
  873. }
  874. return replenish_soc;
  875. }
  876. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
  877. {
  878. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  879. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  880. if (!be_soc->mlo_enabled || !mlo_ctxt)
  881. return 1;
  882. return mlo_ctxt->ml_soc_cnt;
  883. }
  884. struct dp_soc *
  885. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
  886. {
  887. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  888. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  889. struct dp_soc *partner_soc = NULL;
  890. uint8_t chip_id;
  891. if (!be_soc->mlo_enabled || !mlo_ctxt)
  892. return soc;
  893. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  894. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  895. if (!partner_soc)
  896. continue;
  897. if (partner_soc->idle_link_bm_id == idle_bm_id)
  898. return partner_soc;
  899. }
  900. return NULL;
  901. }
  902. #ifdef WLAN_MLO_MULTI_CHIP
  903. static void dp_print_mlo_partner_list(struct dp_vdev_be *be_vdev,
  904. struct dp_vdev *partner_vdev,
  905. void *arg)
  906. {
  907. struct dp_vdev_be *partner_vdev_be = NULL;
  908. struct dp_soc_be *partner_soc_be = NULL;
  909. partner_vdev_be = dp_get_be_vdev_from_dp_vdev(partner_vdev);
  910. partner_soc_be = dp_get_be_soc_from_dp_soc(partner_vdev->pdev->soc);
  911. DP_PRINT_STATS("is_bridge_vap = %s, mcast_primary = %s, vdev_id = %d, pdev_id = %d, chip_id = %d",
  912. partner_vdev->is_bridge_vdev ? "true" : "false",
  913. partner_vdev_be->mcast_primary ? "true" : "false",
  914. partner_vdev->vdev_id,
  915. partner_vdev->pdev->pdev_id,
  916. partner_soc_be->mlo_chip_id);
  917. }
  918. void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  919. struct dp_vdev_be *be_vdev,
  920. dp_ptnr_vdev_iter_func func,
  921. void *arg,
  922. enum dp_mod_id mod_id,
  923. uint8_t type,
  924. bool include_self_vdev)
  925. {
  926. int i = 0;
  927. int j = 0;
  928. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  929. struct dp_vdev *self_vdev = &be_vdev->vdev;
  930. if (type < DP_LINK_VDEV_ITER || type > DP_ALL_VDEV_ITER) {
  931. dp_err("invalid iterate type");
  932. return;
  933. }
  934. if (!be_vdev->mlo_dev_ctxt) {
  935. if (!include_self_vdev)
  936. return;
  937. (*func)(be_vdev, self_vdev, arg);
  938. }
  939. for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
  940. IS_LINK_VDEV_ITER_REQUIRED(type); i++) {
  941. struct dp_soc *ptnr_soc =
  942. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  943. if (!ptnr_soc)
  944. continue;
  945. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  946. struct dp_vdev *ptnr_vdev;
  947. ptnr_vdev = dp_vdev_get_ref_by_id(
  948. ptnr_soc,
  949. be_vdev->mlo_dev_ctxt->vdev_list[i][j],
  950. mod_id);
  951. if (!ptnr_vdev)
  952. continue;
  953. if ((ptnr_vdev == self_vdev) && (!include_self_vdev)) {
  954. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  955. ptnr_vdev,
  956. mod_id);
  957. continue;
  958. }
  959. (*func)(be_vdev, ptnr_vdev, arg);
  960. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  961. ptnr_vdev,
  962. mod_id);
  963. }
  964. }
  965. for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
  966. IS_BRIDGE_VDEV_ITER_REQUIRED(type); i++) {
  967. struct dp_soc *ptnr_soc =
  968. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  969. if (!ptnr_soc)
  970. continue;
  971. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  972. struct dp_vdev *bridge_vdev;
  973. bridge_vdev = dp_vdev_get_ref_by_id(
  974. ptnr_soc,
  975. be_vdev->mlo_dev_ctxt->bridge_vdev[i][j],
  976. mod_id);
  977. if (!bridge_vdev)
  978. continue;
  979. if ((bridge_vdev == self_vdev) &&
  980. (!include_self_vdev)) {
  981. dp_vdev_unref_delete(
  982. bridge_vdev->pdev->soc,
  983. bridge_vdev,
  984. mod_id);
  985. continue;
  986. }
  987. (*func)(be_vdev, bridge_vdev, arg);
  988. dp_vdev_unref_delete(bridge_vdev->pdev->soc,
  989. bridge_vdev,
  990. mod_id);
  991. }
  992. }
  993. }
  994. qdf_export_symbol(dp_mlo_iter_ptnr_vdev);
  995. void dp_mlo_debug_print_ptnr_info(struct dp_vdev *vdev)
  996. {
  997. struct dp_vdev_be *be_vdev = NULL;
  998. struct dp_soc_be *be_soc = NULL;
  999. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  1000. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  1001. DP_PRINT_STATS("self vdev is_bridge_vap = %s, mcast_primary = %s, vdev = %d, pdev_id = %d, chip_id = %d",
  1002. vdev->is_bridge_vdev ? "true" : "false",
  1003. be_vdev->mcast_primary ? "true" : "false",
  1004. vdev->vdev_id,
  1005. vdev->pdev->pdev_id,
  1006. dp_mlo_get_chip_id(vdev->pdev->soc));
  1007. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  1008. dp_print_mlo_partner_list,
  1009. NULL, DP_MOD_ID_GENERIC_STATS,
  1010. DP_ALL_VDEV_ITER,
  1011. DP_VDEV_ITERATE_SKIP_SELF);
  1012. }
  1013. #endif
  1014. #ifdef WLAN_MCAST_MLO
  1015. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  1016. struct dp_vdev_be *be_vdev,
  1017. enum dp_mod_id mod_id)
  1018. {
  1019. int i = 0;
  1020. int j = 0;
  1021. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1022. struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
  1023. if (!be_vdev->mlo_dev_ctxt) {
  1024. return NULL;
  1025. }
  1026. if (be_vdev->mcast_primary) {
  1027. if (dp_vdev_get_ref((struct dp_soc *)be_soc, vdev, mod_id) !=
  1028. QDF_STATUS_SUCCESS)
  1029. return NULL;
  1030. return vdev;
  1031. }
  1032. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1033. struct dp_soc *ptnr_soc =
  1034. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1035. if (!ptnr_soc)
  1036. continue;
  1037. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  1038. struct dp_vdev *ptnr_vdev = NULL;
  1039. struct dp_vdev_be *be_ptnr_vdev = NULL;
  1040. ptnr_vdev = dp_vdev_get_ref_by_id(
  1041. ptnr_soc,
  1042. be_vdev->mlo_dev_ctxt->vdev_list[i][j],
  1043. mod_id);
  1044. if (!ptnr_vdev)
  1045. continue;
  1046. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  1047. if (be_ptnr_vdev->mcast_primary)
  1048. return ptnr_vdev;
  1049. dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
  1050. &be_ptnr_vdev->vdev,
  1051. mod_id);
  1052. }
  1053. }
  1054. return NULL;
  1055. }
  1056. qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
  1057. #endif
  1058. /**
  1059. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  1060. * @be_soc: dp_soc_be pointer
  1061. * @func: Function to be called for each soc
  1062. * @arg: context to be passed to the callback
  1063. *
  1064. * Return: true if mlo is enabled, false if mlo is disabled
  1065. */
  1066. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  1067. void *arg)
  1068. {
  1069. int i = 0;
  1070. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1071. if (!be_soc->mlo_enabled || !be_soc->ml_ctxt)
  1072. return false;
  1073. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1074. struct dp_soc *ptnr_soc =
  1075. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1076. if (!ptnr_soc)
  1077. continue;
  1078. (*func)(ptnr_soc, arg, i);
  1079. }
  1080. return true;
  1081. }
  1082. qdf_export_symbol(dp_mlo_iter_ptnr_soc);
  1083. static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
  1084. {
  1085. struct dp_soc *soc;
  1086. struct dp_pdev *pdev;
  1087. struct dp_soc_be *be_soc;
  1088. uint32_t mlo_offset;
  1089. pdev = &be_pdev->pdev;
  1090. soc = pdev->soc;
  1091. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1092. mlo_offset = be_soc->mlo_tstamp_offset;
  1093. return mlo_offset;
  1094. }
  1095. int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
  1096. uint8_t hw_link_id)
  1097. {
  1098. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1099. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  1100. struct dp_pdev_be *be_pdev;
  1101. int32_t delta_tsf2_mlo_offset;
  1102. int32_t mlo_offset, delta_tsf2;
  1103. if (!ml_ctxt)
  1104. return 0;
  1105. be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
  1106. if (!be_pdev)
  1107. return 0;
  1108. mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
  1109. delta_tsf2 = be_pdev->delta_tsf2;
  1110. delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
  1111. return delta_tsf2_mlo_offset;
  1112. }
  1113. int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
  1114. {
  1115. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1116. int32_t delta_tqm_mlo_offset;
  1117. int32_t mlo_offset, delta_tqm;
  1118. mlo_offset = be_soc->mlo_tstamp_offset;
  1119. delta_tqm = be_soc->delta_tqm;
  1120. delta_tqm_mlo_offset = mlo_offset - delta_tqm;
  1121. return delta_tqm_mlo_offset;
  1122. }
  1123. #ifdef DP_UMAC_HW_RESET_SUPPORT
  1124. /**
  1125. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  1126. * @mlo_ctx: DP ML context handle
  1127. * @chip_id: chip id
  1128. * @set: flag indicating whether to set or clear the bit
  1129. *
  1130. * Return: void
  1131. */
  1132. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  1133. int chip_id, bool set)
  1134. {
  1135. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx =
  1136. &mlo_ctx->grp_umac_reset_ctx;
  1137. if (set)
  1138. qdf_atomic_set_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1139. else
  1140. qdf_atomic_clear_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1141. }
  1142. QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc)
  1143. {
  1144. struct dp_mlo_ctxt *mlo_ctx;
  1145. struct dp_soc_be *be_soc;
  1146. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1147. if (!be_soc) {
  1148. dp_umac_reset_err("null be_soc");
  1149. return QDF_STATUS_E_NULL_VALUE;
  1150. }
  1151. mlo_ctx = be_soc->ml_ctxt;
  1152. if (!mlo_ctx) {
  1153. /* This API can be called for non-MLO SOC as well. Hence, return
  1154. * the status as success when mlo_ctx is NULL.
  1155. */
  1156. return QDF_STATUS_SUCCESS;
  1157. }
  1158. dp_umac_reset_update_partner_map(mlo_ctx, be_soc->mlo_chip_id, false);
  1159. return QDF_STATUS_SUCCESS;
  1160. }
  1161. /**
  1162. * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
  1163. * @soc: dp soc handle
  1164. *
  1165. * Return: void
  1166. */
  1167. void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc)
  1168. {
  1169. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1170. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1171. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1172. if (!mlo_ctx) {
  1173. dp_umac_reset_alert("Umac reset was handled on soc %pK", soc);
  1174. return;
  1175. }
  1176. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1177. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1178. grp_umac_reset_ctx->umac_reset_in_progress = false;
  1179. grp_umac_reset_ctx->is_target_recovery = false;
  1180. grp_umac_reset_ctx->response_map = 0;
  1181. grp_umac_reset_ctx->request_map = 0;
  1182. grp_umac_reset_ctx->initiator_chip_id = 0;
  1183. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1184. dp_umac_reset_alert("Umac reset was handled on mlo group ctxt %pK",
  1185. mlo_ctx);
  1186. }
  1187. /**
  1188. * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
  1189. * @soc: dp soc handle
  1190. * @umac_reset_ctx: Umac reset context
  1191. * @rx_event: Rx event received
  1192. * @is_target_recovery: Flag to indicate if it is triggered for target recovery
  1193. *
  1194. * Return: status
  1195. */
  1196. QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
  1197. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1198. enum umac_reset_rx_event rx_event,
  1199. bool is_target_recovery)
  1200. {
  1201. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1202. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1203. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1204. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1205. if (!mlo_ctx)
  1206. return dp_umac_reset_validate_n_update_state_machine_on_rx(
  1207. umac_reset_ctx, rx_event,
  1208. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1209. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1210. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1211. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1212. if (grp_umac_reset_ctx->umac_reset_in_progress) {
  1213. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1214. return QDF_STATUS_E_INVAL;
  1215. }
  1216. status = dp_umac_reset_validate_n_update_state_machine_on_rx(
  1217. umac_reset_ctx, rx_event,
  1218. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1219. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1220. if (status != QDF_STATUS_SUCCESS) {
  1221. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1222. return status;
  1223. }
  1224. grp_umac_reset_ctx->umac_reset_in_progress = true;
  1225. grp_umac_reset_ctx->is_target_recovery = is_target_recovery;
  1226. /* We don't wait for the 'Umac trigger' message from all socs */
  1227. grp_umac_reset_ctx->request_map = grp_umac_reset_ctx->partner_map;
  1228. grp_umac_reset_ctx->response_map = grp_umac_reset_ctx->partner_map;
  1229. grp_umac_reset_ctx->initiator_chip_id = dp_mlo_get_chip_id(soc);
  1230. grp_umac_reset_ctx->umac_reset_count++;
  1231. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1232. return QDF_STATUS_SUCCESS;
  1233. }
  1234. /**
  1235. * dp_umac_reset_handle_action_cb() - Function to call action callback
  1236. * @soc: dp soc handle
  1237. * @umac_reset_ctx: Umac reset context
  1238. * @action: Action to call the callback for
  1239. *
  1240. * Return: QDF_STATUS status
  1241. */
  1242. QDF_STATUS
  1243. dp_umac_reset_handle_action_cb(struct dp_soc *soc,
  1244. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1245. enum umac_reset_action action)
  1246. {
  1247. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1248. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1249. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1250. if (!mlo_ctx) {
  1251. dp_umac_reset_debug("MLO context is Null");
  1252. goto handle;
  1253. }
  1254. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1255. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1256. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1257. &grp_umac_reset_ctx->request_map);
  1258. dp_umac_reset_debug("partner_map %u request_map %u",
  1259. grp_umac_reset_ctx->partner_map,
  1260. grp_umac_reset_ctx->request_map);
  1261. /* This logic is needed for synchronization between mlo socs */
  1262. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->request_map)
  1263. != grp_umac_reset_ctx->partner_map) {
  1264. struct hif_softc *hif_sc = HIF_GET_SOFTC(soc->hif_handle);
  1265. struct hif_umac_reset_ctx *hif_umac_reset_ctx;
  1266. if (!hif_sc) {
  1267. hif_err("scn is null");
  1268. qdf_assert_always(0);
  1269. return QDF_STATUS_E_FAILURE;
  1270. }
  1271. hif_umac_reset_ctx = &hif_sc->umac_reset_ctx;
  1272. /* Mark the action as pending */
  1273. umac_reset_ctx->pending_action = action;
  1274. /* Reschedule the tasklet and exit */
  1275. tasklet_hi_schedule(&hif_umac_reset_ctx->intr_tq);
  1276. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1277. return QDF_STATUS_SUCCESS;
  1278. }
  1279. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1280. umac_reset_ctx->pending_action = UMAC_RESET_ACTION_NONE;
  1281. handle:
  1282. if (!umac_reset_ctx->rx_actions.cb[action]) {
  1283. dp_umac_reset_err("rx callback is NULL");
  1284. return QDF_STATUS_E_FAILURE;
  1285. }
  1286. return umac_reset_ctx->rx_actions.cb[action](soc);
  1287. }
  1288. /**
  1289. * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
  1290. * @umac_reset_ctx: UMAC reset context
  1291. * @tx_cmd: Tx command to be posted
  1292. *
  1293. * Return: QDF status of operation
  1294. */
  1295. QDF_STATUS
  1296. dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1297. enum umac_reset_tx_cmd tx_cmd)
  1298. {
  1299. struct dp_soc *soc = container_of(umac_reset_ctx, struct dp_soc,
  1300. umac_reset_ctx);
  1301. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1302. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1303. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1304. if (!mlo_ctx) {
  1305. dp_umac_reset_post_tx_cmd_via_shmem(soc, &tx_cmd, 0);
  1306. return QDF_STATUS_SUCCESS;
  1307. }
  1308. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1309. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1310. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1311. &grp_umac_reset_ctx->response_map);
  1312. /* This logic is needed for synchronization between mlo socs */
  1313. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->response_map)
  1314. != grp_umac_reset_ctx->partner_map) {
  1315. dp_umac_reset_debug(
  1316. "Response(s) pending : expected map %u current map %u",
  1317. grp_umac_reset_ctx->partner_map,
  1318. grp_umac_reset_ctx->response_map);
  1319. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1320. return QDF_STATUS_SUCCESS;
  1321. }
  1322. dp_umac_reset_debug(
  1323. "All responses received: expected map %u current map %u",
  1324. grp_umac_reset_ctx->partner_map,
  1325. grp_umac_reset_ctx->response_map);
  1326. grp_umac_reset_ctx->response_map = 0;
  1327. grp_umac_reset_ctx->request_map = 0;
  1328. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1329. dp_mlo_iter_ptnr_soc(be_soc, &dp_umac_reset_post_tx_cmd_via_shmem,
  1330. &tx_cmd);
  1331. if (tx_cmd == UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE)
  1332. dp_umac_reset_complete_umac_recovery(soc);
  1333. return QDF_STATUS_SUCCESS;
  1334. }
  1335. /**
  1336. * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
  1337. * @soc: dp soc handle
  1338. *
  1339. * Return: true if the soc is initiator or false otherwise
  1340. */
  1341. bool dp_umac_reset_initiator_check(struct dp_soc *soc)
  1342. {
  1343. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1344. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1345. if (!mlo_ctx)
  1346. return true;
  1347. return (mlo_ctx->grp_umac_reset_ctx.initiator_chip_id ==
  1348. dp_mlo_get_chip_id(soc));
  1349. }
  1350. /**
  1351. * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
  1352. * @soc: dp soc handle
  1353. *
  1354. * Return: true if the session is for target recovery or false otherwise
  1355. */
  1356. bool dp_umac_reset_target_recovery_check(struct dp_soc *soc)
  1357. {
  1358. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1359. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1360. if (!mlo_ctx)
  1361. return false;
  1362. return mlo_ctx->grp_umac_reset_ctx.is_target_recovery;
  1363. }
  1364. /**
  1365. * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
  1366. * @soc: dp soc handle
  1367. *
  1368. * Return: true if the soc is ignored or false otherwise
  1369. */
  1370. bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc)
  1371. {
  1372. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1373. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1374. if (!mlo_ctx)
  1375. return false;
  1376. return !qdf_atomic_test_bit(dp_mlo_get_chip_id(soc),
  1377. &mlo_ctx->grp_umac_reset_ctx.partner_map);
  1378. }
  1379. QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
  1380. {
  1381. struct dp_mlo_ctxt *mlo_ctx;
  1382. struct dp_soc_be *be_soc;
  1383. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1384. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1385. if (!be_soc) {
  1386. dp_umac_reset_err("null be_soc");
  1387. return QDF_STATUS_E_NULL_VALUE;
  1388. }
  1389. mlo_ctx = be_soc->ml_ctxt;
  1390. if (!mlo_ctx) {
  1391. /* This API can be called for non-MLO SOC as well. Hence, return
  1392. * the status as success when mlo_ctx is NULL.
  1393. */
  1394. return QDF_STATUS_SUCCESS;
  1395. }
  1396. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1397. DP_UMAC_RESET_PRINT_STATS("MLO UMAC RESET stats\n"
  1398. "\t\tPartner map :%x\n"
  1399. "\t\tRequest map :%x\n"
  1400. "\t\tResponse map :%x\n"
  1401. "\t\tIs target recovery :%d\n"
  1402. "\t\tIs Umac reset inprogress :%d\n"
  1403. "\t\tNumber of UMAC reset triggered:%d\n"
  1404. "\t\tInitiator chip ID :%d\n",
  1405. grp_umac_reset_ctx->partner_map,
  1406. grp_umac_reset_ctx->request_map,
  1407. grp_umac_reset_ctx->response_map,
  1408. grp_umac_reset_ctx->is_target_recovery,
  1409. grp_umac_reset_ctx->umac_reset_in_progress,
  1410. grp_umac_reset_ctx->umac_reset_count,
  1411. grp_umac_reset_ctx->initiator_chip_id);
  1412. return QDF_STATUS_SUCCESS;
  1413. }
  1414. enum cdp_umac_reset_state
  1415. dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc)
  1416. {
  1417. struct dp_soc_umac_reset_ctx *umac_reset_ctx;
  1418. struct dp_soc *soc = (struct dp_soc *)psoc;
  1419. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1420. struct dp_soc_be *be_soc = NULL;
  1421. struct dp_mlo_ctxt *mlo_ctx = NULL;
  1422. enum cdp_umac_reset_state umac_reset_is_inprogress;
  1423. if (!soc) {
  1424. dp_umac_reset_err("DP SOC is null");
  1425. return CDP_UMAC_RESET_INVALID_STATE;
  1426. }
  1427. umac_reset_ctx = &soc->umac_reset_ctx;
  1428. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1429. if (be_soc)
  1430. mlo_ctx = be_soc->ml_ctxt;
  1431. if (mlo_ctx) {
  1432. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1433. umac_reset_is_inprogress =
  1434. grp_umac_reset_ctx->umac_reset_in_progress;
  1435. } else {
  1436. umac_reset_is_inprogress = (umac_reset_ctx->current_state !=
  1437. UMAC_RESET_STATE_WAIT_FOR_TRIGGER);
  1438. }
  1439. if (umac_reset_is_inprogress)
  1440. return CDP_UMAC_RESET_IN_PROGRESS;
  1441. /* Check if the umac reset was in progress during the buffer
  1442. * window.
  1443. */
  1444. umac_reset_is_inprogress =
  1445. ((qdf_get_log_timestamp_usecs() -
  1446. umac_reset_ctx->ts.post_reset_complete_done) <=
  1447. (wlan_cfg_get_umac_reset_buffer_window_ms(soc->wlan_cfg_ctx) *
  1448. 1000));
  1449. return (umac_reset_is_inprogress ?
  1450. CDP_UMAC_RESET_IN_PROGRESS_DURING_BUFFER_WINDOW :
  1451. CDP_UMAC_RESET_NOT_IN_PROGRESS);
  1452. }
  1453. /**
  1454. * dp_get_global_tx_desc_cleanup_flag() - Get cleanup needed flag
  1455. * @soc: dp soc handle
  1456. *
  1457. * Return: cleanup needed/ not needed
  1458. */
  1459. bool dp_get_global_tx_desc_cleanup_flag(struct dp_soc *soc)
  1460. {
  1461. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1462. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1463. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1464. bool flag;
  1465. if (!mlo_ctx)
  1466. return true;
  1467. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1468. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1469. flag = grp_umac_reset_ctx->tx_desc_pool_cleaned;
  1470. if (!flag)
  1471. grp_umac_reset_ctx->tx_desc_pool_cleaned = true;
  1472. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1473. return !flag;
  1474. }
  1475. /**
  1476. * dp_reset_global_tx_desc_cleanup_flag() - Reset cleanup needed flag
  1477. * @soc: dp soc handle
  1478. *
  1479. * Return: None
  1480. */
  1481. void dp_reset_global_tx_desc_cleanup_flag(struct dp_soc *soc)
  1482. {
  1483. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1484. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1485. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1486. if (!mlo_ctx)
  1487. return;
  1488. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1489. grp_umac_reset_ctx->tx_desc_pool_cleaned = false;
  1490. }
  1491. #endif
  1492. struct dp_soc *
  1493. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
  1494. {
  1495. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1496. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1497. struct dp_soc *partner_soc;
  1498. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1499. return soc;
  1500. if (be_soc->mlo_chip_id == chip_id)
  1501. return soc;
  1502. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1503. return partner_soc;
  1504. }