dp_mlo.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894
  1. /*
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <wlan_utility.h>
  17. #include <dp_internal.h>
  18. #include <dp_htt.h>
  19. #include <hal_be_api.h>
  20. #include "dp_mlo.h"
  21. #include <dp_be.h>
  22. #include <dp_be_rx.h>
  23. #include <dp_htt.h>
  24. #include <dp_internal.h>
  25. #include <wlan_cfg.h>
  26. #include <wlan_mlo_mgr_cmn.h>
  27. #include "dp_umac_reset.h"
  28. #ifdef DP_UMAC_HW_RESET_SUPPORT
  29. /**
  30. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  31. * @mlo_ctx: mlo soc context
  32. * @chip_id: chip id
  33. * @set: flag indicating whether to set or clear the bit
  34. *
  35. * Return: void
  36. */
  37. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  38. int chip_id, bool set);
  39. #endif
  40. /**
  41. * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
  42. * @ctrl_ctxt: CDP control context
  43. *
  44. * Return: DP MLO context handle on success, NULL on failure
  45. */
  46. static struct cdp_mlo_ctxt *
  47. dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
  48. {
  49. struct dp_mlo_ctxt *mlo_ctxt =
  50. qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
  51. if (!mlo_ctxt) {
  52. dp_err("Failed to allocate DP MLO Context");
  53. return NULL;
  54. }
  55. mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
  56. if (dp_mlo_peer_find_hash_attach_be
  57. (mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
  58. dp_err("Failed to allocate peer hash");
  59. qdf_mem_free(mlo_ctxt);
  60. return NULL;
  61. }
  62. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
  63. (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
  64. LRO_IPV4_SEED_ARR_SZ));
  65. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
  66. (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
  67. LRO_IPV6_SEED_ARR_SZ));
  68. qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
  69. qdf_spinlock_create(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  70. return dp_mlo_ctx_to_cdp(mlo_ctxt);
  71. }
  72. /**
  73. * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
  74. * @cdp_ml_ctxt: pointer to CDP DP MLO context
  75. *
  76. * Return: void
  77. */
  78. static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  79. {
  80. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  81. if (!cdp_ml_ctxt)
  82. return;
  83. qdf_spinlock_destroy(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  84. qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
  85. dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
  86. qdf_mem_free(mlo_ctxt);
  87. }
  88. /**
  89. * dp_mlo_set_soc_by_chip_id() - Add DP soc to ML context soc list
  90. * @ml_ctxt: DP ML context handle
  91. * @soc: DP soc handle
  92. * @chip_id: MLO chip id
  93. *
  94. * Return: void
  95. */
  96. static void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  97. struct dp_soc *soc,
  98. uint8_t chip_id)
  99. {
  100. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  101. ml_ctxt->ml_soc_list[chip_id] = soc;
  102. /* The same API is called during soc_attach and soc_detach
  103. * soc parameter is non-null or null accordingly.
  104. */
  105. if (soc)
  106. ml_ctxt->ml_soc_cnt++;
  107. else
  108. ml_ctxt->ml_soc_cnt--;
  109. dp_umac_reset_update_partner_map(ml_ctxt, chip_id, !!soc);
  110. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  111. }
  112. struct dp_soc*
  113. dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  114. uint8_t chip_id)
  115. {
  116. struct dp_soc *soc = NULL;
  117. if (!ml_ctxt) {
  118. dp_warn("MLO context not created, MLO not enabled");
  119. return NULL;
  120. }
  121. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  122. soc = ml_ctxt->ml_soc_list[chip_id];
  123. if (!soc) {
  124. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  125. return NULL;
  126. }
  127. qdf_atomic_inc(&soc->ref_count);
  128. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  129. return soc;
  130. }
  131. static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
  132. struct dp_soc_be *be_soc)
  133. {
  134. uint8_t i;
  135. struct dp_soc *partner_soc;
  136. struct dp_soc_be *be_partner_soc;
  137. uint8_t pool_id;
  138. QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
  139. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  140. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  141. if (!partner_soc) {
  142. dp_err("partner_soc is NULL");
  143. continue;
  144. }
  145. be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
  146. for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
  147. qdf_status =
  148. dp_hw_cookie_conversion_init
  149. (be_soc,
  150. &be_partner_soc->rx_cc_ctx[pool_id]);
  151. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  152. dp_alert("MLO partner soc RX CC init failed");
  153. return qdf_status;
  154. }
  155. }
  156. }
  157. return qdf_status;
  158. }
  159. static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg, int chip_id)
  160. {
  161. uint8_t i = 0;
  162. uint8_t cpu = 0;
  163. uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  164. uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  165. uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  166. uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  167. /* Save the current interrupt mask and disable the interrupts */
  168. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  169. rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
  170. rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
  171. rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
  172. reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
  173. soc->intr_ctx[i].rx_ring_mask = 0;
  174. soc->intr_ctx[i].rx_err_ring_mask = 0;
  175. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  176. soc->intr_ctx[i].reo_status_ring_mask = 0;
  177. }
  178. /* make sure dp_service_srngs not running on any of the CPU */
  179. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  180. while (qdf_atomic_test_bit(cpu,
  181. &soc->service_rings_running))
  182. ;
  183. }
  184. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  185. uint8_t ring = 0;
  186. uint32_t num_entries = 0;
  187. hal_ring_handle_t hal_ring_hdl = NULL;
  188. uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
  189. soc->wlan_cfg_ctx, i);
  190. uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
  191. soc->wlan_cfg_ctx, i);
  192. uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  193. soc->wlan_cfg_ctx, i);
  194. if (rx_mask) {
  195. /* iterate through each reo ring and process the buf */
  196. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  197. if (!(rx_mask & (1 << ring)))
  198. continue;
  199. hal_ring_hdl =
  200. soc->reo_dest_ring[ring].hal_srng;
  201. num_entries = hal_srng_get_num_entries(
  202. soc->hal_soc,
  203. hal_ring_hdl);
  204. dp_rx_process_be(&soc->intr_ctx[i],
  205. hal_ring_hdl,
  206. ring,
  207. num_entries);
  208. }
  209. }
  210. /* Process REO Exception ring */
  211. if (rx_err_mask) {
  212. hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  213. num_entries = hal_srng_get_num_entries(
  214. soc->hal_soc,
  215. hal_ring_hdl);
  216. dp_rx_err_process(&soc->intr_ctx[i], soc,
  217. hal_ring_hdl, num_entries);
  218. }
  219. /* Process Rx WBM release ring */
  220. if (rx_wbm_rel_mask) {
  221. hal_ring_hdl = soc->rx_rel_ring.hal_srng;
  222. num_entries = hal_srng_get_num_entries(
  223. soc->hal_soc,
  224. hal_ring_hdl);
  225. dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
  226. hal_ring_hdl, num_entries);
  227. }
  228. }
  229. /* restore the interrupt mask */
  230. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  231. soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
  232. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
  233. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
  234. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
  235. }
  236. }
  237. static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
  238. struct cdp_mlo_ctxt *cdp_ml_ctxt)
  239. {
  240. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  241. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  242. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  243. uint8_t pdev_id;
  244. if (!cdp_ml_ctxt)
  245. return;
  246. be_soc->ml_ctxt = mlo_ctxt;
  247. for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
  248. if (soc->pdev_list[pdev_id])
  249. dp_mlo_update_link_to_pdev_map(soc,
  250. soc->pdev_list[pdev_id]);
  251. }
  252. dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
  253. }
  254. static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
  255. struct cdp_mlo_ctxt *cdp_ml_ctxt,
  256. bool is_force_down)
  257. {
  258. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  259. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  260. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  261. if (!cdp_ml_ctxt)
  262. return;
  263. /* During the teardown drain the Rx buffers if any exist in the ring */
  264. dp_mlo_iter_ptnr_soc(be_soc,
  265. dp_mlo_soc_drain_rx_buf,
  266. NULL);
  267. dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
  268. be_soc->ml_ctxt = NULL;
  269. }
  270. static QDF_STATUS dp_mlo_add_ptnr_vdev(struct dp_vdev *vdev1,
  271. struct dp_vdev *vdev2,
  272. struct dp_soc *soc, uint8_t pdev_id)
  273. {
  274. struct dp_soc_be *soc_be = dp_get_be_soc_from_dp_soc(soc);
  275. struct dp_vdev_be *vdev2_be = dp_get_be_vdev_from_dp_vdev(vdev2);
  276. /* return when valid entry exists */
  277. if (vdev1->is_bridge_vdev) {
  278. if (vdev2_be->bridge_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
  279. CDP_INVALID_VDEV_ID)
  280. return QDF_STATUS_SUCCESS;
  281. vdev2_be->bridge_vdev_list[soc_be->mlo_chip_id][pdev_id] =
  282. vdev1->vdev_id;
  283. } else {
  284. if (vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
  285. CDP_INVALID_VDEV_ID)
  286. return QDF_STATUS_SUCCESS;
  287. vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] =
  288. vdev1->vdev_id;
  289. }
  290. mlo_debug("Add vdev%d to vdev%d list, mlo_chip_id = %d pdev_id = %d\n",
  291. vdev1->vdev_id, vdev2->vdev_id, soc_be->mlo_chip_id, pdev_id);
  292. return QDF_STATUS_SUCCESS;
  293. }
  294. QDF_STATUS dp_update_mlo_ptnr_list(struct cdp_soc_t *soc_hdl,
  295. int8_t partner_vdev_ids[], uint8_t num_vdevs,
  296. uint8_t self_vdev_id)
  297. {
  298. int i, j;
  299. struct dp_soc *self_soc = cdp_soc_t_to_dp_soc(soc_hdl);
  300. struct dp_vdev *self_vdev;
  301. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  302. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(self_soc);
  303. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  304. if (!dp_mlo)
  305. return QDF_STATUS_E_FAILURE;
  306. self_vdev = dp_vdev_get_ref_by_id(self_soc, self_vdev_id, DP_MOD_ID_RX);
  307. if (!self_vdev)
  308. return QDF_STATUS_E_FAILURE;
  309. /* go through the input vdev id list and if there are partner vdevs,
  310. * - then add the current vdev's id to partner vdev's list using pdev_id and
  311. * increase the reference
  312. * - add partner vdev to self list and increase the reference
  313. */
  314. for (i = 0; i < num_vdevs; i++) {
  315. if (partner_vdev_ids[i] == CDP_INVALID_VDEV_ID)
  316. continue;
  317. for (j = 0; j < WLAN_MAX_MLO_CHIPS; j++) {
  318. struct dp_soc *soc =
  319. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, j);
  320. if (soc) {
  321. struct dp_vdev *vdev;
  322. vdev = dp_vdev_get_ref_by_id(soc,
  323. partner_vdev_ids[i], DP_MOD_ID_RX);
  324. if (vdev) {
  325. if (vdev == self_vdev) {
  326. dp_vdev_unref_delete(soc,
  327. vdev, DP_MOD_ID_RX);
  328. /*dp_soc_unref_delete(soc); */
  329. continue;
  330. }
  331. if (qdf_is_macaddr_equal(
  332. (struct qdf_mac_addr *)self_vdev->mld_mac_addr.raw,
  333. (struct qdf_mac_addr *)vdev->mld_mac_addr.raw)) {
  334. if (dp_mlo_add_ptnr_vdev(self_vdev,
  335. vdev, self_soc,
  336. self_vdev->pdev->pdev_id) !=
  337. QDF_STATUS_SUCCESS) {
  338. dp_err("Unable to add self to partner vdev's list");
  339. dp_vdev_unref_delete(soc,
  340. vdev, DP_MOD_ID_RX);
  341. /* TODO - release soc ref here */
  342. /* dp_soc_unref_delete(soc);*/
  343. ret = QDF_STATUS_E_FAILURE;
  344. goto exit;
  345. }
  346. /* add to self list */
  347. if (dp_mlo_add_ptnr_vdev(vdev, self_vdev, soc,
  348. vdev->pdev->pdev_id) !=
  349. QDF_STATUS_SUCCESS) {
  350. dp_err("Unable to add vdev to self vdev's list");
  351. dp_vdev_unref_delete(self_soc,
  352. vdev, DP_MOD_ID_RX);
  353. /* TODO - release soc ref here */
  354. /* dp_soc_unref_delete(soc);*/
  355. ret = QDF_STATUS_E_FAILURE;
  356. goto exit;
  357. }
  358. }
  359. dp_vdev_unref_delete(soc, vdev,
  360. DP_MOD_ID_RX);
  361. } /* vdev */
  362. /* TODO - release soc ref here */
  363. /* dp_soc_unref_delete(soc); */
  364. } /* soc */
  365. } /* for */
  366. } /* for */
  367. exit:
  368. dp_vdev_unref_delete(self_soc, self_vdev, DP_MOD_ID_RX);
  369. return ret;
  370. }
  371. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev)
  372. {
  373. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  374. struct dp_vdev_be *vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  375. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  376. uint8_t soc_id = be_soc->mlo_chip_id;
  377. uint8_t pdev_id = vdev->pdev->pdev_id;
  378. int i, j;
  379. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  380. for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
  381. struct dp_vdev *pr_vdev;
  382. struct dp_soc *pr_soc;
  383. struct dp_soc_be *pr_soc_be;
  384. struct dp_pdev *pr_pdev;
  385. struct dp_vdev_be *pr_vdev_be;
  386. if (vdev_be->partner_vdev_list[i][j] ==
  387. CDP_INVALID_VDEV_ID)
  388. continue;
  389. pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  390. if (!pr_soc)
  391. continue;
  392. pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
  393. pr_vdev = dp_vdev_get_ref_by_id(pr_soc,
  394. vdev_be->partner_vdev_list[i][j],
  395. DP_MOD_ID_RX);
  396. if (!pr_vdev)
  397. continue;
  398. /* remove self vdev from partner list */
  399. pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
  400. if (vdev->is_bridge_vdev)
  401. pr_vdev_be->bridge_vdev_list[soc_id][pdev_id] =
  402. CDP_INVALID_VDEV_ID;
  403. else
  404. pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
  405. CDP_INVALID_VDEV_ID;
  406. /* remove partner vdev from self list */
  407. pr_pdev = pr_vdev->pdev;
  408. vdev_be->partner_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
  409. CDP_INVALID_VDEV_ID;
  410. dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
  411. }
  412. }
  413. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  414. for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
  415. struct dp_vdev *pr_vdev = NULL;
  416. struct dp_soc *pr_soc = NULL;
  417. struct dp_soc_be *pr_soc_be = NULL;
  418. struct dp_pdev *pr_pdev = NULL;
  419. struct dp_vdev_be *pr_vdev_be = NULL;
  420. if (vdev_be->bridge_vdev_list[i][j] ==
  421. CDP_INVALID_VDEV_ID)
  422. continue;
  423. pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  424. if (!pr_soc)
  425. continue;
  426. pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
  427. pr_vdev = dp_vdev_get_ref_by_id(
  428. pr_soc,
  429. vdev_be->bridge_vdev_list[i][j],
  430. DP_MOD_ID_RX);
  431. if (!pr_vdev)
  432. continue;
  433. /* remove self vdev from partner list */
  434. pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
  435. if (vdev->is_bridge_vdev)
  436. pr_vdev_be->bridge_vdev_list[soc_id][pdev_id] =
  437. CDP_INVALID_VDEV_ID;
  438. else
  439. pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
  440. CDP_INVALID_VDEV_ID;
  441. /* remove partner vdev from self list */
  442. pr_pdev = pr_vdev->pdev;
  443. vdev_be->bridge_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
  444. CDP_INVALID_VDEV_ID;
  445. dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
  446. }
  447. }
  448. }
  449. static QDF_STATUS
  450. dp_clear_mlo_ptnr_list(struct cdp_soc_t *soc_hdl, uint8_t self_vdev_id)
  451. {
  452. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  453. struct dp_vdev *vdev;
  454. vdev = dp_vdev_get_ref_by_id(soc, self_vdev_id, DP_MOD_ID_RX);
  455. if (!vdev)
  456. return QDF_STATUS_E_FAILURE;
  457. dp_clr_mlo_ptnr_list(soc, vdev);
  458. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  459. return QDF_STATUS_SUCCESS;
  460. }
  461. static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  462. {
  463. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  464. int i;
  465. struct dp_soc *soc;
  466. struct dp_soc_be *be_soc;
  467. QDF_STATUS qdf_status;
  468. if (!cdp_ml_ctxt)
  469. return;
  470. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  471. soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  472. if (!soc)
  473. continue;
  474. be_soc = dp_get_be_soc_from_dp_soc(soc);
  475. qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
  476. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  477. dp_alert("MLO partner SOC Rx desc CC init failed");
  478. qdf_assert_always(0);
  479. }
  480. }
  481. }
  482. static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
  483. uint8_t pdev_id, uint64_t delta_tsf2)
  484. {
  485. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  486. struct dp_pdev *pdev;
  487. struct dp_pdev_be *be_pdev;
  488. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  489. pdev_id);
  490. if (!pdev) {
  491. dp_err("pdev is NULL for pdev_id %u", pdev_id);
  492. return;
  493. }
  494. be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  495. be_pdev->delta_tsf2 = delta_tsf2;
  496. }
  497. static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
  498. uint64_t delta_tqm)
  499. {
  500. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  501. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  502. be_soc->delta_tqm = delta_tqm;
  503. }
  504. static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
  505. uint64_t offset)
  506. {
  507. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  508. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  509. be_soc->mlo_tstamp_offset = offset;
  510. }
  511. #ifdef CONFIG_MLO_SINGLE_DEV
  512. /**
  513. * dp_aggregate_vdev_basic_stats() - aggregate vdev basic stats
  514. * @tgt_vdev_stats: target vdev buffer
  515. * @src_vdev_stats: source vdev buffer
  516. *
  517. * return: void
  518. */
  519. static inline
  520. void dp_aggregate_vdev_basic_stats(
  521. struct cdp_vdev_stats *tgt_vdev_stats,
  522. struct cdp_vdev_stats *src_vdev_stats)
  523. {
  524. DP_UPDATE_BASIC_STATS(tgt_vdev_stats, src_vdev_stats);
  525. }
  526. /**
  527. * dp_aggregate_vdev_ingress_stats() - aggregate vdev ingress stats
  528. * @tgt_vdev_stats: target vdev buffer
  529. * @src_vdev_stats: source vdev buffer
  530. *
  531. * return: void
  532. */
  533. static inline
  534. void dp_aggregate_vdev_ingress_stats(
  535. struct cdp_vdev_stats *tgt_vdev_stats,
  536. struct cdp_vdev_stats *src_vdev_stats)
  537. {
  538. /* Aggregate vdev ingress stats */
  539. DP_UPDATE_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats);
  540. }
  541. /**
  542. * dp_aggregate_vdev_stats_for_unmapped_peers() - aggregate unmap peer stats
  543. * @tgt_vdev_stats: target vdev buffer
  544. * @src_vdev_stats: source vdev buffer
  545. *
  546. * return: void
  547. */
  548. static inline
  549. void dp_aggregate_vdev_stats_for_unmapped_peers(
  550. struct cdp_vdev_stats *tgt_vdev_stats,
  551. struct cdp_vdev_stats *src_vdev_stats)
  552. {
  553. /* Aggregate unmapped peers stats */
  554. DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(tgt_vdev_stats, src_vdev_stats);
  555. }
  556. /**
  557. * dp_aggregate_all_vdev_stats() - aggregate vdev ingress and unmap peer stats
  558. * @tgt_vdev_stats: target vdev buffer
  559. * @src_vdev_stats: source vdev buffer
  560. *
  561. * return: void
  562. */
  563. static inline
  564. void dp_aggregate_all_vdev_stats(
  565. struct cdp_vdev_stats *tgt_vdev_stats,
  566. struct cdp_vdev_stats *src_vdev_stats)
  567. {
  568. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats);
  569. dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
  570. src_vdev_stats);
  571. }
  572. /**
  573. * dp_mlo_vdev_stats_aggr_bridge_vap() - aggregate bridge vdev stats
  574. * @be_vdev: Dp Vdev handle
  575. * @bridge_vdev: Dp vdev handle for bridge vdev
  576. * @arg: buffer for target vdev stats
  577. *
  578. * return: void
  579. */
  580. static
  581. void dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be *be_vdev,
  582. struct dp_vdev *bridge_vdev,
  583. void *arg)
  584. {
  585. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  586. struct dp_vdev_be *bridge_be_vdev = NULL;
  587. bridge_be_vdev = dp_get_be_vdev_from_dp_vdev(bridge_vdev);
  588. if (!bridge_be_vdev)
  589. return;
  590. dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_vdev->stats);
  591. dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_be_vdev->mlo_stats);
  592. dp_vdev_iterate_peer(bridge_vdev, dp_update_vdev_stats, tgt_vdev_stats,
  593. DP_MOD_ID_GENERIC_STATS);
  594. }
  595. /**
  596. * dp_aggregate_interface_stats_based_on_peer_type() - aggregate stats at
  597. * VDEV level based on peer type connected to vdev
  598. * @vdev: DP VDEV handle
  599. * @vdev_stats: target vdev stats pointer
  600. * @peer_type: type of peer - MLO Link or Legacy peer
  601. *
  602. * return: void
  603. */
  604. static
  605. void dp_aggregate_interface_stats_based_on_peer_type(
  606. struct dp_vdev *vdev,
  607. struct cdp_vdev_stats *vdev_stats,
  608. enum dp_peer_type peer_type)
  609. {
  610. struct cdp_vdev_stats *tgt_vdev_stats = NULL;
  611. struct dp_vdev_be *be_vdev = NULL;
  612. struct dp_soc_be *be_soc = NULL;
  613. if (!vdev || !vdev->pdev)
  614. return;
  615. tgt_vdev_stats = vdev_stats;
  616. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  617. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  618. if (!be_vdev)
  619. return;
  620. if (peer_type == DP_PEER_TYPE_LEGACY) {
  621. dp_aggregate_all_vdev_stats(tgt_vdev_stats,
  622. &vdev->stats);
  623. } else {
  624. if (be_vdev->mcast_primary) {
  625. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  626. dp_mlo_vdev_stats_aggr_bridge_vap,
  627. (void *)vdev_stats,
  628. DP_MOD_ID_GENERIC_STATS,
  629. DP_BRIDGE_VDEV_ITER);
  630. }
  631. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats,
  632. &vdev->stats);
  633. dp_aggregate_vdev_stats_for_unmapped_peers(
  634. tgt_vdev_stats,
  635. &be_vdev->mlo_stats);
  636. }
  637. /* Aggregate associated peer stats */
  638. dp_vdev_iterate_specific_peer_type(vdev,
  639. dp_update_vdev_stats,
  640. vdev_stats,
  641. DP_MOD_ID_GENERIC_STATS,
  642. peer_type);
  643. }
  644. /**
  645. * dp_aggregate_interface_stats() - aggregate stats at VDEV level
  646. * @vdev: DP VDEV handle
  647. * @vdev_stats: target vdev stats pointer
  648. *
  649. * return: void
  650. */
  651. static
  652. void dp_aggregate_interface_stats(struct dp_vdev *vdev,
  653. struct cdp_vdev_stats *vdev_stats)
  654. {
  655. struct dp_vdev_be *be_vdev = NULL;
  656. struct dp_soc_be *be_soc = NULL;
  657. if (!vdev || !vdev->pdev)
  658. return;
  659. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  660. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  661. if (!be_vdev)
  662. return;
  663. if (be_vdev->mcast_primary) {
  664. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  665. dp_mlo_vdev_stats_aggr_bridge_vap,
  666. (void *)vdev_stats, DP_MOD_ID_GENERIC_STATS,
  667. DP_BRIDGE_VDEV_ITER);
  668. }
  669. dp_aggregate_all_vdev_stats(vdev_stats, &be_vdev->mlo_stats);
  670. dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats);
  671. dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
  672. DP_MOD_ID_GENERIC_STATS);
  673. dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
  674. }
  675. /**
  676. * dp_mlo_aggr_ptnr_iface_stats() - aggregate mlo partner vdev stats
  677. * @be_vdev: vdev handle
  678. * @ptnr_vdev: partner vdev handle
  679. * @arg: target buffer for aggregation
  680. *
  681. * return: void
  682. */
  683. static
  684. void dp_mlo_aggr_ptnr_iface_stats(struct dp_vdev_be *be_vdev,
  685. struct dp_vdev *ptnr_vdev,
  686. void *arg)
  687. {
  688. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  689. dp_aggregate_interface_stats(ptnr_vdev, tgt_vdev_stats);
  690. }
  691. /**
  692. * dp_mlo_aggr_ptnr_iface_stats_mlo_links() - aggregate mlo partner vdev stats
  693. * based on peer type
  694. * @be_vdev: vdev handle
  695. * @ptnr_vdev: partner vdev handle
  696. * @arg: target buffer for aggregation
  697. *
  698. * return: void
  699. */
  700. static
  701. void dp_mlo_aggr_ptnr_iface_stats_mlo_links(
  702. struct dp_vdev_be *be_vdev,
  703. struct dp_vdev *ptnr_vdev,
  704. void *arg)
  705. {
  706. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  707. dp_aggregate_interface_stats_based_on_peer_type(ptnr_vdev,
  708. tgt_vdev_stats,
  709. DP_PEER_TYPE_MLO_LINK);
  710. }
  711. /**
  712. * dp_aggregate_sta_interface_stats() - for sta mode aggregate vdev stats from
  713. * all link peers
  714. * @soc: soc handle
  715. * @vdev: vdev handle
  716. * @buf: target buffer for aggregation
  717. *
  718. * return: QDF_STATUS
  719. */
  720. static QDF_STATUS
  721. dp_aggregate_sta_interface_stats(struct dp_soc *soc,
  722. struct dp_vdev *vdev,
  723. void *buf)
  724. {
  725. struct dp_peer *vap_bss_peer = NULL;
  726. struct dp_peer *mld_peer = NULL;
  727. struct dp_peer *link_peer = NULL;
  728. struct dp_mld_link_peers link_peers_info;
  729. uint8_t i = 0;
  730. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  731. vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
  732. DP_MOD_ID_GENERIC_STATS);
  733. if (!vap_bss_peer)
  734. return QDF_STATUS_E_FAILURE;
  735. mld_peer = DP_GET_MLD_PEER_FROM_PEER(vap_bss_peer);
  736. if (!mld_peer) {
  737. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  738. return QDF_STATUS_E_FAILURE;
  739. }
  740. dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, &link_peers_info,
  741. DP_MOD_ID_GENERIC_STATS);
  742. for (i = 0; i < link_peers_info.num_links; i++) {
  743. link_peer = link_peers_info.link_peers[i];
  744. dp_update_vdev_stats(soc, link_peer, buf);
  745. dp_aggregate_vdev_ingress_stats((struct cdp_vdev_stats *)buf,
  746. &link_peer->vdev->stats);
  747. dp_aggregate_vdev_basic_stats(
  748. (struct cdp_vdev_stats *)buf,
  749. &link_peer->vdev->stats);
  750. }
  751. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_GENERIC_STATS);
  752. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  753. return ret;
  754. }
  755. static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
  756. uint8_t vdev_id, void *buf,
  757. bool link_vdev_only)
  758. {
  759. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  760. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  761. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  762. DP_MOD_ID_GENERIC_STATS);
  763. struct dp_vdev_be *vdev_be = NULL;
  764. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  765. if (!vdev)
  766. return QDF_STATUS_E_FAILURE;
  767. vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  768. if (!vdev_be) {
  769. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  770. return QDF_STATUS_E_FAILURE;
  771. }
  772. if (vdev->opmode == wlan_op_mode_sta) {
  773. ret = dp_aggregate_sta_interface_stats(soc, vdev, buf);
  774. goto complete;
  775. }
  776. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  777. vdev->opmode == wlan_op_mode_ap) {
  778. dp_aggregate_interface_stats_based_on_peer_type(
  779. vdev, buf,
  780. DP_PEER_TYPE_MLO_LINK);
  781. if (link_vdev_only)
  782. goto complete;
  783. /* Aggregate stats from partner vdevs */
  784. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  785. dp_mlo_aggr_ptnr_iface_stats_mlo_links,
  786. buf,
  787. DP_MOD_ID_GENERIC_STATS,
  788. DP_LINK_VDEV_ITER);
  789. } else {
  790. dp_aggregate_interface_stats(vdev, buf);
  791. if (link_vdev_only)
  792. goto complete;
  793. /* Aggregate stats from partner vdevs */
  794. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  795. dp_mlo_aggr_ptnr_iface_stats, buf,
  796. DP_MOD_ID_GENERIC_STATS,
  797. DP_LINK_VDEV_ITER);
  798. }
  799. complete:
  800. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  801. return ret;
  802. }
  803. QDF_STATUS
  804. dp_get_interface_stats_be(struct cdp_soc_t *soc_hdl,
  805. uint8_t vdev_id,
  806. void *buf,
  807. bool is_aggregate)
  808. {
  809. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  810. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  811. DP_MOD_ID_GENERIC_STATS);
  812. if (!vdev)
  813. return QDF_STATUS_E_FAILURE;
  814. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  815. vdev->opmode == wlan_op_mode_ap) {
  816. dp_aggregate_interface_stats_based_on_peer_type(
  817. vdev, buf,
  818. DP_PEER_TYPE_LEGACY);
  819. } else {
  820. dp_aggregate_interface_stats(vdev, buf);
  821. }
  822. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  823. return QDF_STATUS_SUCCESS;
  824. }
  825. #endif
  826. static struct cdp_mlo_ops dp_mlo_ops = {
  827. .mlo_soc_setup = dp_mlo_soc_setup,
  828. .mlo_soc_teardown = dp_mlo_soc_teardown,
  829. .update_mlo_ptnr_list = dp_update_mlo_ptnr_list,
  830. .clear_mlo_ptnr_list = dp_clear_mlo_ptnr_list,
  831. .mlo_setup_complete = dp_mlo_setup_complete,
  832. .mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
  833. .mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
  834. .mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
  835. .mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
  836. .mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
  837. #ifdef CONFIG_MLO_SINGLE_DEV
  838. .mlo_get_mld_vdev_stats = dp_mlo_get_mld_vdev_stats,
  839. #endif
  840. };
  841. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  842. struct cdp_soc_attach_params *params)
  843. {
  844. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  845. if (!params->mlo_enabled) {
  846. dp_warn("MLO not enabled on SOC");
  847. return;
  848. }
  849. be_soc->mlo_chip_id = params->mlo_chip_id;
  850. be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
  851. be_soc->mlo_enabled = 1;
  852. soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
  853. }
  854. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  855. {
  856. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  857. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  858. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  859. uint8_t link_id;
  860. if (!be_soc->mlo_enabled)
  861. return;
  862. if (!ml_ctxt)
  863. return;
  864. link_id = be_pdev->mlo_link_id;
  865. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
  866. if (!ml_ctxt->link_to_pdev_map[link_id])
  867. ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
  868. else
  869. dp_alert("Attempt to update existing map for link %u",
  870. link_id);
  871. }
  872. }
  873. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  874. {
  875. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  876. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  877. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  878. uint8_t link_id;
  879. if (!be_soc->mlo_enabled)
  880. return;
  881. if (!ml_ctxt)
  882. return;
  883. link_id = be_pdev->mlo_link_id;
  884. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  885. ml_ctxt->link_to_pdev_map[link_id] = NULL;
  886. }
  887. static struct dp_pdev_be *
  888. dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
  889. {
  890. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  891. return ml_ctxt->link_to_pdev_map[link_id];
  892. return NULL;
  893. }
  894. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  895. struct cdp_pdev_attach_params *params)
  896. {
  897. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
  898. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  899. if (!be_soc->mlo_enabled) {
  900. dp_info("MLO not enabled on SOC");
  901. return;
  902. }
  903. be_pdev->mlo_link_id = params->mlo_link_id;
  904. }
  905. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  906. struct dp_peer *peer,
  907. uint16_t peer_id)
  908. {
  909. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  910. struct dp_mlo_ctxt *mlo_ctxt = NULL;
  911. bool is_ml_peer_id =
  912. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  913. uint8_t chip_id;
  914. struct dp_soc *temp_soc;
  915. /* for non ML peer dont map on partner chips*/
  916. if (!is_ml_peer_id)
  917. return;
  918. mlo_ctxt = be_soc->ml_ctxt;
  919. if (!mlo_ctxt)
  920. return;
  921. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  922. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  923. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  924. if (!temp_soc)
  925. continue;
  926. /* skip if this is current soc */
  927. if (temp_soc == soc)
  928. continue;
  929. dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
  930. }
  931. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  932. }
  933. qdf_export_symbol(dp_mlo_partner_chips_map);
  934. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  935. uint16_t peer_id)
  936. {
  937. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  938. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  939. bool is_ml_peer_id =
  940. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  941. uint8_t chip_id;
  942. struct dp_soc *temp_soc;
  943. if (!is_ml_peer_id)
  944. return;
  945. if (!mlo_ctxt)
  946. return;
  947. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  948. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  949. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  950. if (!temp_soc)
  951. continue;
  952. /* skip if this is current soc */
  953. if (temp_soc == soc)
  954. continue;
  955. dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
  956. }
  957. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  958. }
  959. qdf_export_symbol(dp_mlo_partner_chips_unmap);
  960. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  961. {
  962. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  963. return be_soc->mlo_chip_id;
  964. }
  965. qdf_export_symbol(dp_mlo_get_chip_id);
  966. struct dp_peer *
  967. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  968. uint8_t *peer_mac_addr,
  969. int mac_addr_is_aligned,
  970. uint8_t vdev_id,
  971. uint8_t chip_id,
  972. enum dp_mod_id mod_id)
  973. {
  974. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  975. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  976. struct dp_soc *link_peer_soc = NULL;
  977. struct dp_peer *peer = NULL;
  978. if (!mlo_ctxt)
  979. return NULL;
  980. link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  981. if (!link_peer_soc)
  982. return NULL;
  983. peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
  984. mac_addr_is_aligned, vdev_id,
  985. mod_id);
  986. qdf_atomic_dec(&link_peer_soc->ref_count);
  987. return peer;
  988. }
  989. qdf_export_symbol(dp_link_peer_hash_find_by_chip_id);
  990. void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
  991. struct cdp_lro_hash_config *lro_hash)
  992. {
  993. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  994. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  995. if (!be_soc->mlo_enabled || !ml_ctxt)
  996. return dp_get_rx_hash_key_bytes(lro_hash);
  997. qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
  998. (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
  999. LRO_IPV4_SEED_ARR_SZ));
  1000. qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
  1001. (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
  1002. LRO_IPV6_SEED_ARR_SZ));
  1003. }
  1004. struct dp_soc *
  1005. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
  1006. {
  1007. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1008. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1009. struct dp_soc *replenish_soc;
  1010. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1011. return soc;
  1012. if (be_soc->mlo_chip_id == chip_id)
  1013. return soc;
  1014. replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1015. if (qdf_unlikely(!replenish_soc)) {
  1016. dp_alert("replenish SOC is NULL");
  1017. qdf_assert_always(0);
  1018. }
  1019. return replenish_soc;
  1020. }
  1021. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
  1022. {
  1023. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1024. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1025. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1026. return 1;
  1027. return mlo_ctxt->ml_soc_cnt;
  1028. }
  1029. struct dp_soc *
  1030. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
  1031. {
  1032. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1033. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1034. struct dp_soc *partner_soc = NULL;
  1035. uint8_t chip_id;
  1036. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1037. return soc;
  1038. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  1039. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1040. if (!partner_soc)
  1041. continue;
  1042. if (partner_soc->idle_link_bm_id == idle_bm_id)
  1043. return partner_soc;
  1044. }
  1045. return NULL;
  1046. }
  1047. #ifdef WLAN_MLO_MULTI_CHIP
  1048. static void dp_print_mlo_partner_list(struct dp_vdev_be *be_vdev,
  1049. struct dp_vdev *partner_vdev,
  1050. void *arg)
  1051. {
  1052. struct dp_vdev_be *partner_vdev_be = NULL;
  1053. struct dp_soc_be *partner_soc_be = NULL;
  1054. partner_vdev_be = dp_get_be_vdev_from_dp_vdev(partner_vdev);
  1055. partner_soc_be = dp_get_be_soc_from_dp_soc(partner_vdev->pdev->soc);
  1056. DP_PRINT_STATS("is_bridge_vap = %s, mcast_primary = %s, vdev_id = %d, pdev_id = %d, chip_id = %d",
  1057. partner_vdev->is_bridge_vdev ? "true" : "false",
  1058. partner_vdev_be->mcast_primary ? "true" : "false",
  1059. partner_vdev->vdev_id,
  1060. partner_vdev->pdev->pdev_id,
  1061. partner_soc_be->mlo_chip_id);
  1062. }
  1063. void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  1064. struct dp_vdev_be *be_vdev,
  1065. dp_ptnr_vdev_iter_func func,
  1066. void *arg,
  1067. enum dp_mod_id mod_id,
  1068. uint8_t type)
  1069. {
  1070. int i = 0;
  1071. int j = 0;
  1072. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1073. if (type < DP_LINK_VDEV_ITER || type > DP_ALL_VDEV_ITER) {
  1074. dp_err("invalid iterate type");
  1075. return;
  1076. }
  1077. for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
  1078. IS_LINK_VDEV_ITER_REQUIRED(type); i++) {
  1079. struct dp_soc *ptnr_soc =
  1080. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1081. if (!ptnr_soc)
  1082. continue;
  1083. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  1084. struct dp_vdev *ptnr_vdev;
  1085. ptnr_vdev = dp_vdev_get_ref_by_id(
  1086. ptnr_soc,
  1087. be_vdev->partner_vdev_list[i][j],
  1088. mod_id);
  1089. if (!ptnr_vdev)
  1090. continue;
  1091. (*func)(be_vdev, ptnr_vdev, arg);
  1092. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  1093. ptnr_vdev,
  1094. mod_id);
  1095. }
  1096. }
  1097. for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
  1098. IS_BRIDGE_VDEV_ITER_REQUIRED(type); i++) {
  1099. struct dp_soc *ptnr_soc =
  1100. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1101. if (!ptnr_soc)
  1102. continue;
  1103. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  1104. struct dp_vdev *bridge_vdev;
  1105. bridge_vdev = dp_vdev_get_ref_by_id(
  1106. ptnr_soc,
  1107. be_vdev->bridge_vdev_list[i][j],
  1108. mod_id);
  1109. if (!bridge_vdev)
  1110. continue;
  1111. (*func)(be_vdev, bridge_vdev, arg);
  1112. dp_vdev_unref_delete(bridge_vdev->pdev->soc,
  1113. bridge_vdev,
  1114. mod_id);
  1115. }
  1116. }
  1117. }
  1118. qdf_export_symbol(dp_mlo_iter_ptnr_vdev);
  1119. void dp_mlo_debug_print_ptnr_info(struct dp_vdev *vdev)
  1120. {
  1121. struct dp_vdev_be *be_vdev = NULL;
  1122. struct dp_soc_be *be_soc = NULL;
  1123. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  1124. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  1125. DP_PRINT_STATS("self vdev is_bridge_vap = %s, mcast_primary = %s, vdev = %d, pdev_id = %d, chip_id = %d",
  1126. vdev->is_bridge_vdev ? "true" : "false",
  1127. be_vdev->mcast_primary ? "true" : "false",
  1128. vdev->vdev_id,
  1129. vdev->pdev->pdev_id,
  1130. dp_mlo_get_chip_id(vdev->pdev->soc));
  1131. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  1132. dp_print_mlo_partner_list,
  1133. NULL, DP_MOD_ID_GENERIC_STATS,
  1134. DP_ALL_VDEV_ITER);
  1135. }
  1136. #endif
  1137. #ifdef WLAN_MCAST_MLO
  1138. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  1139. struct dp_vdev_be *be_vdev,
  1140. enum dp_mod_id mod_id)
  1141. {
  1142. int i = 0;
  1143. int j = 0;
  1144. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1145. struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
  1146. if (be_vdev->mcast_primary) {
  1147. if (dp_vdev_get_ref((struct dp_soc *)be_soc, vdev, mod_id) !=
  1148. QDF_STATUS_SUCCESS)
  1149. return NULL;
  1150. return vdev;
  1151. }
  1152. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1153. struct dp_soc *ptnr_soc =
  1154. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1155. if (!ptnr_soc)
  1156. continue;
  1157. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  1158. struct dp_vdev *ptnr_vdev = NULL;
  1159. struct dp_vdev_be *be_ptnr_vdev = NULL;
  1160. ptnr_vdev = dp_vdev_get_ref_by_id(
  1161. ptnr_soc,
  1162. be_vdev->partner_vdev_list[i][j],
  1163. mod_id);
  1164. if (!ptnr_vdev)
  1165. continue;
  1166. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  1167. if (be_ptnr_vdev->mcast_primary)
  1168. return ptnr_vdev;
  1169. dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
  1170. &be_ptnr_vdev->vdev,
  1171. mod_id);
  1172. }
  1173. }
  1174. return NULL;
  1175. }
  1176. qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
  1177. #endif
  1178. /**
  1179. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  1180. * @be_soc: dp_soc_be pointer
  1181. * @func: Function to be called for each soc
  1182. * @arg: context to be passed to the callback
  1183. *
  1184. * Return: true if mlo is enabled, false if mlo is disabled
  1185. */
  1186. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  1187. void *arg)
  1188. {
  1189. int i = 0;
  1190. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1191. if (!be_soc->mlo_enabled || !be_soc->ml_ctxt)
  1192. return false;
  1193. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1194. struct dp_soc *ptnr_soc =
  1195. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1196. if (!ptnr_soc)
  1197. continue;
  1198. (*func)(ptnr_soc, arg, i);
  1199. }
  1200. return true;
  1201. }
  1202. qdf_export_symbol(dp_mlo_iter_ptnr_soc);
  1203. static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
  1204. {
  1205. struct dp_soc *soc;
  1206. struct dp_pdev *pdev;
  1207. struct dp_soc_be *be_soc;
  1208. uint32_t mlo_offset;
  1209. pdev = &be_pdev->pdev;
  1210. soc = pdev->soc;
  1211. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1212. mlo_offset = be_soc->mlo_tstamp_offset;
  1213. return mlo_offset;
  1214. }
  1215. int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
  1216. uint8_t hw_link_id)
  1217. {
  1218. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1219. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  1220. struct dp_pdev_be *be_pdev;
  1221. int32_t delta_tsf2_mlo_offset;
  1222. int32_t mlo_offset, delta_tsf2;
  1223. if (!ml_ctxt)
  1224. return 0;
  1225. be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
  1226. if (!be_pdev)
  1227. return 0;
  1228. mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
  1229. delta_tsf2 = be_pdev->delta_tsf2;
  1230. delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
  1231. return delta_tsf2_mlo_offset;
  1232. }
  1233. int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
  1234. {
  1235. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1236. int32_t delta_tqm_mlo_offset;
  1237. int32_t mlo_offset, delta_tqm;
  1238. mlo_offset = be_soc->mlo_tstamp_offset;
  1239. delta_tqm = be_soc->delta_tqm;
  1240. delta_tqm_mlo_offset = mlo_offset - delta_tqm;
  1241. return delta_tqm_mlo_offset;
  1242. }
  1243. #ifdef DP_UMAC_HW_RESET_SUPPORT
  1244. /**
  1245. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  1246. * @mlo_ctx: DP ML context handle
  1247. * @chip_id: chip id
  1248. * @set: flag indicating whether to set or clear the bit
  1249. *
  1250. * Return: void
  1251. */
  1252. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  1253. int chip_id, bool set)
  1254. {
  1255. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx =
  1256. &mlo_ctx->grp_umac_reset_ctx;
  1257. if (set)
  1258. qdf_atomic_set_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1259. else
  1260. qdf_atomic_clear_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1261. }
  1262. QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc)
  1263. {
  1264. struct dp_mlo_ctxt *mlo_ctx;
  1265. struct dp_soc_be *be_soc;
  1266. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1267. if (!be_soc) {
  1268. dp_umac_reset_err("null be_soc");
  1269. return QDF_STATUS_E_NULL_VALUE;
  1270. }
  1271. mlo_ctx = be_soc->ml_ctxt;
  1272. if (!mlo_ctx) {
  1273. /* This API can be called for non-MLO SOC as well. Hence, return
  1274. * the status as success when mlo_ctx is NULL.
  1275. */
  1276. return QDF_STATUS_SUCCESS;
  1277. }
  1278. dp_umac_reset_update_partner_map(mlo_ctx, be_soc->mlo_chip_id, false);
  1279. return QDF_STATUS_SUCCESS;
  1280. }
  1281. /**
  1282. * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
  1283. * @soc: dp soc handle
  1284. *
  1285. * Return: void
  1286. */
  1287. void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc)
  1288. {
  1289. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1290. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1291. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1292. if (!mlo_ctx) {
  1293. dp_umac_reset_alert("Umac reset was handled on soc %pK", soc);
  1294. return;
  1295. }
  1296. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1297. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1298. grp_umac_reset_ctx->umac_reset_in_progress = false;
  1299. grp_umac_reset_ctx->is_target_recovery = false;
  1300. grp_umac_reset_ctx->response_map = 0;
  1301. grp_umac_reset_ctx->request_map = 0;
  1302. grp_umac_reset_ctx->initiator_chip_id = 0;
  1303. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1304. dp_umac_reset_alert("Umac reset was handled on mlo group ctxt %pK",
  1305. mlo_ctx);
  1306. }
  1307. /**
  1308. * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
  1309. * @soc: dp soc handle
  1310. * @umac_reset_ctx: Umac reset context
  1311. * @rx_event: Rx event received
  1312. * @is_target_recovery: Flag to indicate if it is triggered for target recovery
  1313. *
  1314. * Return: status
  1315. */
  1316. QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
  1317. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1318. enum umac_reset_rx_event rx_event,
  1319. bool is_target_recovery)
  1320. {
  1321. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1322. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1323. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1324. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1325. if (!mlo_ctx)
  1326. return dp_umac_reset_validate_n_update_state_machine_on_rx(
  1327. umac_reset_ctx, rx_event,
  1328. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1329. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1330. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1331. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1332. if (grp_umac_reset_ctx->umac_reset_in_progress) {
  1333. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1334. return QDF_STATUS_E_INVAL;
  1335. }
  1336. status = dp_umac_reset_validate_n_update_state_machine_on_rx(
  1337. umac_reset_ctx, rx_event,
  1338. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1339. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1340. if (status != QDF_STATUS_SUCCESS) {
  1341. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1342. return status;
  1343. }
  1344. grp_umac_reset_ctx->umac_reset_in_progress = true;
  1345. grp_umac_reset_ctx->is_target_recovery = is_target_recovery;
  1346. /* We don't wait for the 'Umac trigger' message from all socs */
  1347. grp_umac_reset_ctx->request_map = grp_umac_reset_ctx->partner_map;
  1348. grp_umac_reset_ctx->response_map = grp_umac_reset_ctx->partner_map;
  1349. grp_umac_reset_ctx->initiator_chip_id = dp_mlo_get_chip_id(soc);
  1350. grp_umac_reset_ctx->umac_reset_count++;
  1351. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1352. return QDF_STATUS_SUCCESS;
  1353. }
  1354. /**
  1355. * dp_umac_reset_handle_action_cb() - Function to call action callback
  1356. * @soc: dp soc handle
  1357. * @umac_reset_ctx: Umac reset context
  1358. * @action: Action to call the callback for
  1359. *
  1360. * Return: QDF_STATUS status
  1361. */
  1362. QDF_STATUS
  1363. dp_umac_reset_handle_action_cb(struct dp_soc *soc,
  1364. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1365. enum umac_reset_action action)
  1366. {
  1367. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1368. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1369. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1370. if (!mlo_ctx) {
  1371. dp_umac_reset_debug("MLO context is Null");
  1372. goto handle;
  1373. }
  1374. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1375. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1376. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1377. &grp_umac_reset_ctx->request_map);
  1378. dp_umac_reset_debug("partner_map %u request_map %u",
  1379. grp_umac_reset_ctx->partner_map,
  1380. grp_umac_reset_ctx->request_map);
  1381. /* This logic is needed for synchronization between mlo socs */
  1382. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->request_map)
  1383. != grp_umac_reset_ctx->partner_map) {
  1384. struct hif_softc *hif_sc = HIF_GET_SOFTC(soc->hif_handle);
  1385. struct hif_umac_reset_ctx *hif_umac_reset_ctx;
  1386. if (!hif_sc) {
  1387. hif_err("scn is null");
  1388. qdf_assert_always(0);
  1389. return QDF_STATUS_E_FAILURE;
  1390. }
  1391. hif_umac_reset_ctx = &hif_sc->umac_reset_ctx;
  1392. /* Mark the action as pending */
  1393. umac_reset_ctx->pending_action = action;
  1394. /* Reschedule the tasklet and exit */
  1395. tasklet_hi_schedule(&hif_umac_reset_ctx->intr_tq);
  1396. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1397. return QDF_STATUS_SUCCESS;
  1398. }
  1399. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1400. umac_reset_ctx->pending_action = UMAC_RESET_ACTION_NONE;
  1401. handle:
  1402. if (!umac_reset_ctx->rx_actions.cb[action]) {
  1403. dp_umac_reset_err("rx callback is NULL");
  1404. return QDF_STATUS_E_FAILURE;
  1405. }
  1406. return umac_reset_ctx->rx_actions.cb[action](soc);
  1407. }
  1408. /**
  1409. * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
  1410. * @umac_reset_ctx: UMAC reset context
  1411. * @tx_cmd: Tx command to be posted
  1412. *
  1413. * Return: QDF status of operation
  1414. */
  1415. QDF_STATUS
  1416. dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1417. enum umac_reset_tx_cmd tx_cmd)
  1418. {
  1419. struct dp_soc *soc = container_of(umac_reset_ctx, struct dp_soc,
  1420. umac_reset_ctx);
  1421. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1422. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1423. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1424. if (!mlo_ctx) {
  1425. dp_umac_reset_post_tx_cmd_via_shmem(soc, &tx_cmd, 0);
  1426. return QDF_STATUS_SUCCESS;
  1427. }
  1428. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1429. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1430. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1431. &grp_umac_reset_ctx->response_map);
  1432. /* This logic is needed for synchronization between mlo socs */
  1433. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->response_map)
  1434. != grp_umac_reset_ctx->partner_map) {
  1435. dp_umac_reset_debug(
  1436. "Response(s) pending : expected map %u current map %u",
  1437. grp_umac_reset_ctx->partner_map,
  1438. grp_umac_reset_ctx->response_map);
  1439. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1440. return QDF_STATUS_SUCCESS;
  1441. }
  1442. dp_umac_reset_debug(
  1443. "All responses received: expected map %u current map %u",
  1444. grp_umac_reset_ctx->partner_map,
  1445. grp_umac_reset_ctx->response_map);
  1446. grp_umac_reset_ctx->response_map = 0;
  1447. grp_umac_reset_ctx->request_map = 0;
  1448. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1449. dp_mlo_iter_ptnr_soc(be_soc, &dp_umac_reset_post_tx_cmd_via_shmem,
  1450. &tx_cmd);
  1451. if (tx_cmd == UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE)
  1452. dp_umac_reset_complete_umac_recovery(soc);
  1453. return QDF_STATUS_SUCCESS;
  1454. }
  1455. /**
  1456. * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
  1457. * @soc: dp soc handle
  1458. *
  1459. * Return: true if the soc is initiator or false otherwise
  1460. */
  1461. bool dp_umac_reset_initiator_check(struct dp_soc *soc)
  1462. {
  1463. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1464. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1465. if (!mlo_ctx)
  1466. return true;
  1467. return (mlo_ctx->grp_umac_reset_ctx.initiator_chip_id ==
  1468. dp_mlo_get_chip_id(soc));
  1469. }
  1470. /**
  1471. * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
  1472. * @soc: dp soc handle
  1473. *
  1474. * Return: true if the session is for target recovery or false otherwise
  1475. */
  1476. bool dp_umac_reset_target_recovery_check(struct dp_soc *soc)
  1477. {
  1478. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1479. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1480. if (!mlo_ctx)
  1481. return false;
  1482. return mlo_ctx->grp_umac_reset_ctx.is_target_recovery;
  1483. }
  1484. /**
  1485. * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
  1486. * @soc: dp soc handle
  1487. *
  1488. * Return: true if the soc is ignored or false otherwise
  1489. */
  1490. bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc)
  1491. {
  1492. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1493. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1494. if (!mlo_ctx)
  1495. return false;
  1496. return !qdf_atomic_test_bit(dp_mlo_get_chip_id(soc),
  1497. &mlo_ctx->grp_umac_reset_ctx.partner_map);
  1498. }
  1499. QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
  1500. {
  1501. struct dp_mlo_ctxt *mlo_ctx;
  1502. struct dp_soc_be *be_soc;
  1503. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1504. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1505. if (!be_soc) {
  1506. dp_umac_reset_err("null be_soc");
  1507. return QDF_STATUS_E_NULL_VALUE;
  1508. }
  1509. mlo_ctx = be_soc->ml_ctxt;
  1510. if (!mlo_ctx) {
  1511. /* This API can be called for non-MLO SOC as well. Hence, return
  1512. * the status as success when mlo_ctx is NULL.
  1513. */
  1514. return QDF_STATUS_SUCCESS;
  1515. }
  1516. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1517. DP_UMAC_RESET_PRINT_STATS("MLO UMAC RESET stats\n"
  1518. "\t\tPartner map :%x\n"
  1519. "\t\tRequest map :%x\n"
  1520. "\t\tResponse map :%x\n"
  1521. "\t\tIs target recovery :%d\n"
  1522. "\t\tIs Umac reset inprogress :%d\n"
  1523. "\t\tNumber of UMAC reset triggered:%d\n"
  1524. "\t\tInitiator chip ID :%d\n",
  1525. grp_umac_reset_ctx->partner_map,
  1526. grp_umac_reset_ctx->request_map,
  1527. grp_umac_reset_ctx->response_map,
  1528. grp_umac_reset_ctx->is_target_recovery,
  1529. grp_umac_reset_ctx->umac_reset_in_progress,
  1530. grp_umac_reset_ctx->umac_reset_count,
  1531. grp_umac_reset_ctx->initiator_chip_id);
  1532. return QDF_STATUS_SUCCESS;
  1533. }
  1534. enum cdp_umac_reset_state
  1535. dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc)
  1536. {
  1537. struct dp_soc_umac_reset_ctx *umac_reset_ctx;
  1538. struct dp_soc *soc = (struct dp_soc *)psoc;
  1539. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1540. struct dp_soc_be *be_soc = NULL;
  1541. struct dp_mlo_ctxt *mlo_ctx = NULL;
  1542. enum cdp_umac_reset_state umac_reset_is_inprogress;
  1543. if (!soc) {
  1544. dp_umac_reset_err("DP SOC is null");
  1545. return CDP_UMAC_RESET_INVALID_STATE;
  1546. }
  1547. umac_reset_ctx = &soc->umac_reset_ctx;
  1548. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1549. if (be_soc)
  1550. mlo_ctx = be_soc->ml_ctxt;
  1551. if (mlo_ctx) {
  1552. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1553. umac_reset_is_inprogress =
  1554. grp_umac_reset_ctx->umac_reset_in_progress;
  1555. } else {
  1556. umac_reset_is_inprogress = (umac_reset_ctx->current_state !=
  1557. UMAC_RESET_STATE_WAIT_FOR_TRIGGER);
  1558. }
  1559. if (umac_reset_is_inprogress)
  1560. return CDP_UMAC_RESET_IN_PROGRESS;
  1561. /* Check if the umac reset was in progress during the buffer
  1562. * window.
  1563. */
  1564. umac_reset_is_inprogress =
  1565. ((qdf_get_log_timestamp_usecs() -
  1566. umac_reset_ctx->ts.post_reset_complete_done) <=
  1567. (wlan_cfg_get_umac_reset_buffer_window_ms(soc->wlan_cfg_ctx) *
  1568. 1000));
  1569. return (umac_reset_is_inprogress ?
  1570. CDP_UMAC_RESET_IN_PROGRESS_DURING_BUFFER_WINDOW :
  1571. CDP_UMAC_RESET_NOT_IN_PROGRESS);
  1572. }
  1573. #endif
  1574. struct dp_soc *
  1575. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
  1576. {
  1577. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1578. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1579. struct dp_soc *partner_soc;
  1580. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1581. return soc;
  1582. if (be_soc->mlo_chip_id == chip_id)
  1583. return soc;
  1584. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1585. return partner_soc;
  1586. }