dp_mlo.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291
  1. /*
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <wlan_utility.h>
  17. #include <dp_internal.h>
  18. #include <dp_htt.h>
  19. #include <hal_be_api.h>
  20. #include "dp_mlo.h"
  21. #include <dp_be.h>
  22. #include <dp_be_rx.h>
  23. #include <dp_htt.h>
  24. #include <dp_internal.h>
  25. #include <wlan_cfg.h>
  26. #include <wlan_mlo_mgr_cmn.h>
  27. #include "dp_umac_reset.h"
  28. #ifdef DP_UMAC_HW_RESET_SUPPORT
  29. /**
  30. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  31. * @mlo_ctx: mlo soc context
  32. * @chip_id: chip id
  33. * @set: flag indicating whether to set or clear the bit
  34. *
  35. * Return: void
  36. */
  37. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  38. int chip_id, bool set);
  39. #endif
  40. /**
  41. * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
  42. * @ctrl_ctxt: CDP control context
  43. *
  44. * Return: DP MLO context handle on success, NULL on failure
  45. */
  46. static struct cdp_mlo_ctxt *
  47. dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
  48. {
  49. struct dp_mlo_ctxt *mlo_ctxt =
  50. qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
  51. if (!mlo_ctxt) {
  52. dp_err("Failed to allocate DP MLO Context");
  53. return NULL;
  54. }
  55. mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
  56. if (dp_mlo_peer_find_hash_attach_be
  57. (mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
  58. dp_err("Failed to allocate peer hash");
  59. qdf_mem_free(mlo_ctxt);
  60. return NULL;
  61. }
  62. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
  63. (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
  64. LRO_IPV4_SEED_ARR_SZ));
  65. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
  66. (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
  67. LRO_IPV6_SEED_ARR_SZ));
  68. qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
  69. qdf_spinlock_create(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  70. return dp_mlo_ctx_to_cdp(mlo_ctxt);
  71. }
  72. /**
  73. * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
  74. * @cdp_ml_ctxt: pointer to CDP DP MLO context
  75. *
  76. * Return: void
  77. */
  78. static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  79. {
  80. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  81. if (!cdp_ml_ctxt)
  82. return;
  83. qdf_spinlock_destroy(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  84. qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
  85. dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
  86. qdf_mem_free(mlo_ctxt);
  87. }
  88. /**
  89. * dp_mlo_set_soc_by_chip_id() - Add DP soc to ML context soc list
  90. * @ml_ctxt: DP ML context handle
  91. * @soc: DP soc handle
  92. * @chip_id: MLO chip id
  93. *
  94. * Return: void
  95. */
  96. static void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  97. struct dp_soc *soc,
  98. uint8_t chip_id)
  99. {
  100. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  101. ml_ctxt->ml_soc_list[chip_id] = soc;
  102. /* The same API is called during soc_attach and soc_detach
  103. * soc parameter is non-null or null accordingly.
  104. */
  105. if (soc)
  106. ml_ctxt->ml_soc_cnt++;
  107. else
  108. ml_ctxt->ml_soc_cnt--;
  109. dp_umac_reset_update_partner_map(ml_ctxt, chip_id, !!soc);
  110. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  111. }
  112. struct dp_soc*
  113. dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  114. uint8_t chip_id)
  115. {
  116. struct dp_soc *soc = NULL;
  117. if (!ml_ctxt) {
  118. dp_warn("MLO context not created, MLO not enabled");
  119. return NULL;
  120. }
  121. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  122. soc = ml_ctxt->ml_soc_list[chip_id];
  123. if (!soc) {
  124. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  125. return NULL;
  126. }
  127. qdf_atomic_inc(&soc->ref_count);
  128. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  129. return soc;
  130. }
  131. static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
  132. struct dp_soc_be *be_soc)
  133. {
  134. uint8_t i;
  135. struct dp_soc *partner_soc;
  136. struct dp_soc_be *be_partner_soc;
  137. uint8_t pool_id;
  138. QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
  139. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  140. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  141. if (!partner_soc) {
  142. dp_err("partner_soc is NULL");
  143. continue;
  144. }
  145. be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
  146. for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
  147. qdf_status =
  148. dp_hw_cookie_conversion_init
  149. (be_soc,
  150. &be_partner_soc->rx_cc_ctx[pool_id]);
  151. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  152. dp_alert("MLO partner soc RX CC init failed");
  153. return qdf_status;
  154. }
  155. }
  156. }
  157. return qdf_status;
  158. }
  159. static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg, int chip_id)
  160. {
  161. uint8_t i = 0;
  162. uint8_t cpu = 0;
  163. uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  164. uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  165. uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  166. uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  167. /* Save the current interrupt mask and disable the interrupts */
  168. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  169. rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
  170. rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
  171. rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
  172. reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
  173. soc->intr_ctx[i].rx_ring_mask = 0;
  174. soc->intr_ctx[i].rx_err_ring_mask = 0;
  175. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  176. soc->intr_ctx[i].reo_status_ring_mask = 0;
  177. }
  178. /* make sure dp_service_srngs not running on any of the CPU */
  179. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  180. while (qdf_atomic_test_bit(cpu,
  181. &soc->service_rings_running))
  182. ;
  183. }
  184. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  185. uint8_t ring = 0;
  186. uint32_t num_entries = 0;
  187. hal_ring_handle_t hal_ring_hdl = NULL;
  188. uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
  189. soc->wlan_cfg_ctx, i);
  190. uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
  191. soc->wlan_cfg_ctx, i);
  192. uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  193. soc->wlan_cfg_ctx, i);
  194. if (rx_mask) {
  195. /* iterate through each reo ring and process the buf */
  196. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  197. if (!(rx_mask & (1 << ring)))
  198. continue;
  199. hal_ring_hdl =
  200. soc->reo_dest_ring[ring].hal_srng;
  201. num_entries = hal_srng_get_num_entries(
  202. soc->hal_soc,
  203. hal_ring_hdl);
  204. dp_rx_process_be(&soc->intr_ctx[i],
  205. hal_ring_hdl,
  206. ring,
  207. num_entries);
  208. }
  209. }
  210. /* Process REO Exception ring */
  211. if (rx_err_mask) {
  212. hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  213. num_entries = hal_srng_get_num_entries(
  214. soc->hal_soc,
  215. hal_ring_hdl);
  216. dp_rx_err_process(&soc->intr_ctx[i], soc,
  217. hal_ring_hdl, num_entries);
  218. }
  219. /* Process Rx WBM release ring */
  220. if (rx_wbm_rel_mask) {
  221. hal_ring_hdl = soc->rx_rel_ring.hal_srng;
  222. num_entries = hal_srng_get_num_entries(
  223. soc->hal_soc,
  224. hal_ring_hdl);
  225. dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
  226. hal_ring_hdl, num_entries);
  227. }
  228. }
  229. /* restore the interrupt mask */
  230. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  231. soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
  232. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
  233. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
  234. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
  235. }
  236. }
  237. static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
  238. struct cdp_mlo_ctxt *cdp_ml_ctxt)
  239. {
  240. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  241. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  242. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  243. uint8_t pdev_id;
  244. if (!cdp_ml_ctxt)
  245. return;
  246. be_soc->ml_ctxt = mlo_ctxt;
  247. for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
  248. if (soc->pdev_list[pdev_id])
  249. dp_mlo_update_link_to_pdev_map(soc,
  250. soc->pdev_list[pdev_id]);
  251. }
  252. dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
  253. }
  254. static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
  255. struct cdp_mlo_ctxt *cdp_ml_ctxt,
  256. bool is_force_down)
  257. {
  258. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  259. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  260. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  261. if (!cdp_ml_ctxt)
  262. return;
  263. /* During the teardown drain the Rx buffers if any exist in the ring */
  264. dp_mlo_iter_ptnr_soc(be_soc,
  265. dp_mlo_soc_drain_rx_buf,
  266. NULL);
  267. dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
  268. be_soc->ml_ctxt = NULL;
  269. }
  270. static QDF_STATUS dp_mlo_add_ptnr_vdev(struct dp_vdev *vdev1,
  271. struct dp_vdev *vdev2,
  272. struct dp_soc *soc, uint8_t pdev_id)
  273. {
  274. struct dp_soc_be *soc_be = dp_get_be_soc_from_dp_soc(soc);
  275. struct dp_vdev_be *vdev2_be = dp_get_be_vdev_from_dp_vdev(vdev2);
  276. /* return when valid entry exists */
  277. if (vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
  278. CDP_INVALID_VDEV_ID)
  279. return QDF_STATUS_SUCCESS;
  280. vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] =
  281. vdev1->vdev_id;
  282. mlo_debug("Add vdev%d to vdev%d list, mlo_chip_id = %d pdev_id = %d\n",
  283. vdev1->vdev_id, vdev2->vdev_id, soc_be->mlo_chip_id, pdev_id);
  284. return QDF_STATUS_SUCCESS;
  285. }
  286. QDF_STATUS dp_update_mlo_ptnr_list(struct cdp_soc_t *soc_hdl,
  287. int8_t partner_vdev_ids[], uint8_t num_vdevs,
  288. uint8_t self_vdev_id)
  289. {
  290. int i, j;
  291. struct dp_soc *self_soc = cdp_soc_t_to_dp_soc(soc_hdl);
  292. struct dp_vdev *self_vdev;
  293. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  294. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(self_soc);
  295. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  296. if (!dp_mlo)
  297. return QDF_STATUS_E_FAILURE;
  298. self_vdev = dp_vdev_get_ref_by_id(self_soc, self_vdev_id, DP_MOD_ID_RX);
  299. if (!self_vdev)
  300. return QDF_STATUS_E_FAILURE;
  301. /* go through the input vdev id list and if there are partner vdevs,
  302. * - then add the current vdev's id to partner vdev's list using pdev_id and
  303. * increase the reference
  304. * - add partner vdev to self list and increase the reference
  305. */
  306. for (i = 0; i < num_vdevs; i++) {
  307. if (partner_vdev_ids[i] == CDP_INVALID_VDEV_ID)
  308. continue;
  309. for (j = 0; j < WLAN_MAX_MLO_CHIPS; j++) {
  310. struct dp_soc *soc =
  311. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, j);
  312. if (soc) {
  313. struct dp_vdev *vdev;
  314. vdev = dp_vdev_get_ref_by_id(soc,
  315. partner_vdev_ids[i], DP_MOD_ID_RX);
  316. if (vdev) {
  317. if (vdev == self_vdev) {
  318. dp_vdev_unref_delete(soc,
  319. vdev, DP_MOD_ID_RX);
  320. /*dp_soc_unref_delete(soc); */
  321. continue;
  322. }
  323. if (qdf_is_macaddr_equal(
  324. (struct qdf_mac_addr *)self_vdev->mld_mac_addr.raw,
  325. (struct qdf_mac_addr *)vdev->mld_mac_addr.raw)) {
  326. if (dp_mlo_add_ptnr_vdev(self_vdev,
  327. vdev, self_soc,
  328. self_vdev->pdev->pdev_id) !=
  329. QDF_STATUS_SUCCESS) {
  330. dp_err("Unable to add self to partner vdev's list");
  331. dp_vdev_unref_delete(soc,
  332. vdev, DP_MOD_ID_RX);
  333. /* TODO - release soc ref here */
  334. /* dp_soc_unref_delete(soc);*/
  335. ret = QDF_STATUS_E_FAILURE;
  336. goto exit;
  337. }
  338. /* add to self list */
  339. if (dp_mlo_add_ptnr_vdev(vdev, self_vdev, soc,
  340. vdev->pdev->pdev_id) !=
  341. QDF_STATUS_SUCCESS) {
  342. dp_err("Unable to add vdev to self vdev's list");
  343. dp_vdev_unref_delete(self_soc,
  344. vdev, DP_MOD_ID_RX);
  345. /* TODO - release soc ref here */
  346. /* dp_soc_unref_delete(soc);*/
  347. ret = QDF_STATUS_E_FAILURE;
  348. goto exit;
  349. }
  350. }
  351. dp_vdev_unref_delete(soc, vdev,
  352. DP_MOD_ID_RX);
  353. } /* vdev */
  354. /* TODO - release soc ref here */
  355. /* dp_soc_unref_delete(soc); */
  356. } /* soc */
  357. } /* for */
  358. } /* for */
  359. exit:
  360. dp_vdev_unref_delete(self_soc, self_vdev, DP_MOD_ID_RX);
  361. return ret;
  362. }
  363. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev)
  364. {
  365. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  366. struct dp_vdev_be *vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  367. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  368. uint8_t soc_id = be_soc->mlo_chip_id;
  369. uint8_t pdev_id = vdev->pdev->pdev_id;
  370. int i, j;
  371. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  372. for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
  373. struct dp_vdev *pr_vdev;
  374. struct dp_soc *pr_soc;
  375. struct dp_soc_be *pr_soc_be;
  376. struct dp_pdev *pr_pdev;
  377. struct dp_vdev_be *pr_vdev_be;
  378. if (vdev_be->partner_vdev_list[i][j] ==
  379. CDP_INVALID_VDEV_ID)
  380. continue;
  381. pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  382. if (!pr_soc)
  383. continue;
  384. pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
  385. pr_vdev = dp_vdev_get_ref_by_id(pr_soc,
  386. vdev_be->partner_vdev_list[i][j],
  387. DP_MOD_ID_RX);
  388. if (!pr_vdev)
  389. continue;
  390. /* remove self vdev from partner list */
  391. pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
  392. pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
  393. CDP_INVALID_VDEV_ID;
  394. /* remove partner vdev from self list */
  395. pr_pdev = pr_vdev->pdev;
  396. vdev_be->partner_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
  397. CDP_INVALID_VDEV_ID;
  398. dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
  399. }
  400. }
  401. }
  402. static QDF_STATUS
  403. dp_clear_mlo_ptnr_list(struct cdp_soc_t *soc_hdl, uint8_t self_vdev_id)
  404. {
  405. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  406. struct dp_vdev *vdev;
  407. vdev = dp_vdev_get_ref_by_id(soc, self_vdev_id, DP_MOD_ID_RX);
  408. if (!vdev)
  409. return QDF_STATUS_E_FAILURE;
  410. dp_clr_mlo_ptnr_list(soc, vdev);
  411. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  412. return QDF_STATUS_SUCCESS;
  413. }
  414. static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  415. {
  416. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  417. int i;
  418. struct dp_soc *soc;
  419. struct dp_soc_be *be_soc;
  420. QDF_STATUS qdf_status;
  421. if (!cdp_ml_ctxt)
  422. return;
  423. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  424. soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  425. if (!soc)
  426. continue;
  427. be_soc = dp_get_be_soc_from_dp_soc(soc);
  428. qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
  429. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  430. dp_alert("MLO partner SOC Rx desc CC init failed");
  431. qdf_assert_always(0);
  432. }
  433. }
  434. }
  435. static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
  436. uint8_t pdev_id, uint64_t delta_tsf2)
  437. {
  438. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  439. struct dp_pdev *pdev;
  440. struct dp_pdev_be *be_pdev;
  441. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  442. pdev_id);
  443. if (!pdev) {
  444. dp_err("pdev is NULL for pdev_id %u", pdev_id);
  445. return;
  446. }
  447. be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  448. be_pdev->delta_tsf2 = delta_tsf2;
  449. }
  450. static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
  451. uint64_t delta_tqm)
  452. {
  453. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  454. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  455. be_soc->delta_tqm = delta_tqm;
  456. }
  457. static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
  458. uint64_t offset)
  459. {
  460. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  461. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  462. be_soc->mlo_tstamp_offset = offset;
  463. }
  464. static struct cdp_mlo_ops dp_mlo_ops = {
  465. .mlo_soc_setup = dp_mlo_soc_setup,
  466. .mlo_soc_teardown = dp_mlo_soc_teardown,
  467. .update_mlo_ptnr_list = dp_update_mlo_ptnr_list,
  468. .clear_mlo_ptnr_list = dp_clear_mlo_ptnr_list,
  469. .mlo_setup_complete = dp_mlo_setup_complete,
  470. .mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
  471. .mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
  472. .mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
  473. .mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
  474. .mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
  475. };
  476. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  477. struct cdp_soc_attach_params *params)
  478. {
  479. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  480. if (!params->mlo_enabled) {
  481. dp_warn("MLO not enabled on SOC");
  482. return;
  483. }
  484. be_soc->mlo_chip_id = params->mlo_chip_id;
  485. be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
  486. be_soc->mlo_enabled = 1;
  487. soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
  488. }
  489. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  490. {
  491. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  492. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  493. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  494. uint8_t link_id;
  495. if (!be_soc->mlo_enabled)
  496. return;
  497. if (!ml_ctxt)
  498. return;
  499. link_id = be_pdev->mlo_link_id;
  500. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
  501. if (!ml_ctxt->link_to_pdev_map[link_id])
  502. ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
  503. else
  504. dp_alert("Attempt to update existing map for link %u",
  505. link_id);
  506. }
  507. }
  508. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  509. {
  510. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  511. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  512. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  513. uint8_t link_id;
  514. if (!be_soc->mlo_enabled)
  515. return;
  516. if (!ml_ctxt)
  517. return;
  518. link_id = be_pdev->mlo_link_id;
  519. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  520. ml_ctxt->link_to_pdev_map[link_id] = NULL;
  521. }
  522. static struct dp_pdev_be *
  523. dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
  524. {
  525. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  526. return ml_ctxt->link_to_pdev_map[link_id];
  527. return NULL;
  528. }
  529. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  530. struct cdp_pdev_attach_params *params)
  531. {
  532. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
  533. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  534. if (!be_soc->mlo_enabled) {
  535. dp_info("MLO not enabled on SOC");
  536. return;
  537. }
  538. be_pdev->mlo_link_id = params->mlo_link_id;
  539. }
  540. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  541. struct dp_peer *peer,
  542. uint16_t peer_id)
  543. {
  544. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  545. struct dp_mlo_ctxt *mlo_ctxt = NULL;
  546. bool is_ml_peer_id =
  547. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  548. uint8_t chip_id;
  549. struct dp_soc *temp_soc;
  550. /* for non ML peer dont map on partner chips*/
  551. if (!is_ml_peer_id)
  552. return;
  553. mlo_ctxt = be_soc->ml_ctxt;
  554. if (!mlo_ctxt)
  555. return;
  556. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  557. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  558. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  559. if (!temp_soc)
  560. continue;
  561. /* skip if this is current soc */
  562. if (temp_soc == soc)
  563. continue;
  564. dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
  565. }
  566. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  567. }
  568. qdf_export_symbol(dp_mlo_partner_chips_map);
  569. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  570. uint16_t peer_id)
  571. {
  572. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  573. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  574. bool is_ml_peer_id =
  575. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  576. uint8_t chip_id;
  577. struct dp_soc *temp_soc;
  578. if (!is_ml_peer_id)
  579. return;
  580. if (!mlo_ctxt)
  581. return;
  582. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  583. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  584. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  585. if (!temp_soc)
  586. continue;
  587. /* skip if this is current soc */
  588. if (temp_soc == soc)
  589. continue;
  590. dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
  591. }
  592. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  593. }
  594. qdf_export_symbol(dp_mlo_partner_chips_unmap);
  595. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  596. {
  597. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  598. return be_soc->mlo_chip_id;
  599. }
  600. qdf_export_symbol(dp_mlo_get_chip_id);
  601. struct dp_peer *
  602. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  603. uint8_t *peer_mac_addr,
  604. int mac_addr_is_aligned,
  605. uint8_t vdev_id,
  606. uint8_t chip_id,
  607. enum dp_mod_id mod_id)
  608. {
  609. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  610. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  611. struct dp_soc *link_peer_soc = NULL;
  612. struct dp_peer *peer = NULL;
  613. if (!mlo_ctxt)
  614. return NULL;
  615. link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  616. if (!link_peer_soc)
  617. return NULL;
  618. peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
  619. mac_addr_is_aligned, vdev_id,
  620. mod_id);
  621. qdf_atomic_dec(&link_peer_soc->ref_count);
  622. return peer;
  623. }
  624. qdf_export_symbol(dp_link_peer_hash_find_by_chip_id);
  625. void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
  626. struct cdp_lro_hash_config *lro_hash)
  627. {
  628. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  629. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  630. if (!be_soc->mlo_enabled || !ml_ctxt)
  631. return dp_get_rx_hash_key_bytes(lro_hash);
  632. qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
  633. (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
  634. LRO_IPV4_SEED_ARR_SZ));
  635. qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
  636. (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
  637. LRO_IPV6_SEED_ARR_SZ));
  638. }
  639. struct dp_soc *
  640. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
  641. {
  642. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  643. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  644. struct dp_soc *replenish_soc;
  645. if (!be_soc->mlo_enabled || !mlo_ctxt)
  646. return soc;
  647. if (be_soc->mlo_chip_id == chip_id)
  648. return soc;
  649. replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  650. if (qdf_unlikely(!replenish_soc)) {
  651. dp_alert("replenish SOC is NULL");
  652. qdf_assert_always(0);
  653. }
  654. return replenish_soc;
  655. }
  656. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
  657. {
  658. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  659. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  660. if (!be_soc->mlo_enabled || !mlo_ctxt)
  661. return 1;
  662. return mlo_ctxt->ml_soc_cnt;
  663. }
  664. struct dp_soc *
  665. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
  666. {
  667. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  668. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  669. struct dp_soc *partner_soc = NULL;
  670. uint8_t chip_id;
  671. if (!be_soc->mlo_enabled || !mlo_ctxt)
  672. return soc;
  673. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  674. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  675. if (!partner_soc)
  676. continue;
  677. if (partner_soc->idle_link_bm_id == idle_bm_id)
  678. return partner_soc;
  679. }
  680. return NULL;
  681. }
  682. #ifdef WLAN_MCAST_MLO
  683. void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  684. struct dp_vdev_be *be_vdev,
  685. dp_ptnr_vdev_iter_func func,
  686. void *arg,
  687. enum dp_mod_id mod_id)
  688. {
  689. int i = 0;
  690. int j = 0;
  691. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  692. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  693. struct dp_soc *ptnr_soc =
  694. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  695. if (!ptnr_soc)
  696. continue;
  697. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  698. struct dp_vdev *ptnr_vdev;
  699. ptnr_vdev = dp_vdev_get_ref_by_id(
  700. ptnr_soc,
  701. be_vdev->partner_vdev_list[i][j],
  702. mod_id);
  703. if (!ptnr_vdev)
  704. continue;
  705. (*func)(be_vdev, ptnr_vdev, arg);
  706. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  707. ptnr_vdev,
  708. mod_id);
  709. }
  710. }
  711. }
  712. qdf_export_symbol(dp_mcast_mlo_iter_ptnr_vdev);
  713. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  714. struct dp_vdev_be *be_vdev,
  715. enum dp_mod_id mod_id)
  716. {
  717. int i = 0;
  718. int j = 0;
  719. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  720. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  721. struct dp_soc *ptnr_soc =
  722. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  723. if (!ptnr_soc)
  724. continue;
  725. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  726. struct dp_vdev *ptnr_vdev = NULL;
  727. struct dp_vdev_be *be_ptnr_vdev = NULL;
  728. ptnr_vdev = dp_vdev_get_ref_by_id(
  729. ptnr_soc,
  730. be_vdev->partner_vdev_list[i][j],
  731. mod_id);
  732. if (!ptnr_vdev)
  733. continue;
  734. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  735. if (be_ptnr_vdev->mcast_primary)
  736. return ptnr_vdev;
  737. dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
  738. &be_ptnr_vdev->vdev,
  739. mod_id);
  740. }
  741. }
  742. return NULL;
  743. }
  744. qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
  745. #endif
  746. /**
  747. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  748. * @be_soc: dp_soc_be pointer
  749. * @func: Function to be called for each soc
  750. * @arg: context to be passed to the callback
  751. *
  752. * Return: true if mlo is enabled, false if mlo is disabled
  753. */
  754. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  755. void *arg)
  756. {
  757. int i = 0;
  758. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  759. if (!be_soc->mlo_enabled || !be_soc->ml_ctxt)
  760. return false;
  761. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  762. struct dp_soc *ptnr_soc =
  763. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  764. if (!ptnr_soc)
  765. continue;
  766. (*func)(ptnr_soc, arg, i);
  767. }
  768. return true;
  769. }
  770. qdf_export_symbol(dp_mlo_iter_ptnr_soc);
  771. static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
  772. {
  773. struct dp_soc *soc;
  774. struct dp_pdev *pdev;
  775. struct dp_soc_be *be_soc;
  776. uint32_t mlo_offset;
  777. pdev = &be_pdev->pdev;
  778. soc = pdev->soc;
  779. be_soc = dp_get_be_soc_from_dp_soc(soc);
  780. mlo_offset = be_soc->mlo_tstamp_offset;
  781. return mlo_offset;
  782. }
  783. int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
  784. uint8_t hw_link_id)
  785. {
  786. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  787. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  788. struct dp_pdev_be *be_pdev;
  789. int32_t delta_tsf2_mlo_offset;
  790. int32_t mlo_offset, delta_tsf2;
  791. if (!ml_ctxt)
  792. return 0;
  793. be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
  794. if (!be_pdev)
  795. return 0;
  796. mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
  797. delta_tsf2 = be_pdev->delta_tsf2;
  798. delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
  799. return delta_tsf2_mlo_offset;
  800. }
  801. int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
  802. {
  803. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  804. int32_t delta_tqm_mlo_offset;
  805. int32_t mlo_offset, delta_tqm;
  806. mlo_offset = be_soc->mlo_tstamp_offset;
  807. delta_tqm = be_soc->delta_tqm;
  808. delta_tqm_mlo_offset = mlo_offset - delta_tqm;
  809. return delta_tqm_mlo_offset;
  810. }
  811. #ifdef DP_UMAC_HW_RESET_SUPPORT
  812. /**
  813. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  814. * @mlo_ctx: DP ML context handle
  815. * @chip_id: chip id
  816. * @set: flag indicating whether to set or clear the bit
  817. *
  818. * Return: void
  819. */
  820. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  821. int chip_id, bool set)
  822. {
  823. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx =
  824. &mlo_ctx->grp_umac_reset_ctx;
  825. if (set)
  826. qdf_atomic_set_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  827. else
  828. qdf_atomic_clear_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  829. }
  830. /**
  831. * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
  832. * @soc: dp soc handle
  833. *
  834. * Return: void
  835. */
  836. void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc)
  837. {
  838. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  839. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  840. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  841. if (!mlo_ctx) {
  842. dp_umac_reset_alert("Umac reset was handled on soc %pK", soc);
  843. return;
  844. }
  845. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  846. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  847. grp_umac_reset_ctx->umac_reset_in_progress = false;
  848. grp_umac_reset_ctx->is_target_recovery = false;
  849. grp_umac_reset_ctx->response_map = 0;
  850. grp_umac_reset_ctx->request_map = 0;
  851. grp_umac_reset_ctx->initiator_chip_id = 0;
  852. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  853. dp_umac_reset_alert("Umac reset was handled on mlo group ctxt %pK",
  854. mlo_ctx);
  855. }
  856. /**
  857. * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
  858. * @soc: dp soc handle
  859. * @is_target_recovery: Flag to indicate if it is triggered for target recovery
  860. *
  861. * Return: void
  862. */
  863. void dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
  864. bool is_target_recovery)
  865. {
  866. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  867. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  868. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  869. if (!mlo_ctx)
  870. return;
  871. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  872. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  873. if (grp_umac_reset_ctx->umac_reset_in_progress) {
  874. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  875. return;
  876. }
  877. grp_umac_reset_ctx->umac_reset_in_progress = true;
  878. grp_umac_reset_ctx->is_target_recovery = is_target_recovery;
  879. /* We don't wait for the 'Umac trigger' message from all socs */
  880. grp_umac_reset_ctx->request_map = grp_umac_reset_ctx->partner_map;
  881. grp_umac_reset_ctx->response_map = grp_umac_reset_ctx->partner_map;
  882. grp_umac_reset_ctx->initiator_chip_id = dp_mlo_get_chip_id(soc);
  883. grp_umac_reset_ctx->umac_reset_count++;
  884. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  885. }
  886. /**
  887. * dp_umac_reset_handle_action_cb() - Function to call action callback
  888. * @soc: dp soc handle
  889. * @umac_reset_ctx: Umac reset context
  890. * @action: Action to call the callback for
  891. *
  892. * Return: QDF_STATUS status
  893. */
  894. QDF_STATUS
  895. dp_umac_reset_handle_action_cb(struct dp_soc *soc,
  896. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  897. enum umac_reset_action action)
  898. {
  899. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  900. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  901. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  902. if (!mlo_ctx) {
  903. dp_umac_reset_debug("MLO context is Null");
  904. goto handle;
  905. }
  906. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  907. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  908. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  909. &grp_umac_reset_ctx->request_map);
  910. dp_umac_reset_debug("partner_map %u request_map %u",
  911. grp_umac_reset_ctx->partner_map,
  912. grp_umac_reset_ctx->request_map);
  913. /* This logic is needed for synchronization between mlo socs */
  914. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->request_map)
  915. != grp_umac_reset_ctx->partner_map) {
  916. struct hif_softc *hif_sc = HIF_GET_SOFTC(soc->hif_handle);
  917. struct hif_umac_reset_ctx *hif_umac_reset_ctx;
  918. if (!hif_sc) {
  919. hif_err("scn is null");
  920. qdf_assert_always(0);
  921. return QDF_STATUS_E_FAILURE;
  922. }
  923. hif_umac_reset_ctx = &hif_sc->umac_reset_ctx;
  924. /* Mark the action as pending */
  925. umac_reset_ctx->pending_action = action;
  926. /* Reschedule the tasklet and exit */
  927. tasklet_hi_schedule(&hif_umac_reset_ctx->intr_tq);
  928. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  929. return QDF_STATUS_SUCCESS;
  930. }
  931. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  932. umac_reset_ctx->pending_action = UMAC_RESET_ACTION_NONE;
  933. handle:
  934. if (!umac_reset_ctx->rx_actions.cb[action]) {
  935. dp_umac_reset_err("rx callback is NULL");
  936. return QDF_STATUS_E_FAILURE;
  937. }
  938. return umac_reset_ctx->rx_actions.cb[action](soc);
  939. }
  940. /**
  941. * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
  942. * @umac_reset_ctx: UMAC reset context
  943. * @tx_cmd: Tx command to be posted
  944. *
  945. * Return: QDF status of operation
  946. */
  947. QDF_STATUS
  948. dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  949. enum umac_reset_tx_cmd tx_cmd)
  950. {
  951. struct dp_soc *soc = container_of(umac_reset_ctx, struct dp_soc,
  952. umac_reset_ctx);
  953. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  954. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  955. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  956. if (!mlo_ctx) {
  957. dp_umac_reset_post_tx_cmd_via_shmem(soc, &tx_cmd, 0);
  958. return QDF_STATUS_SUCCESS;
  959. }
  960. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  961. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  962. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  963. &grp_umac_reset_ctx->response_map);
  964. /* This logic is needed for synchronization between mlo socs */
  965. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->response_map)
  966. != grp_umac_reset_ctx->partner_map) {
  967. dp_umac_reset_debug(
  968. "Response(s) pending : expected map %u current map %u",
  969. grp_umac_reset_ctx->partner_map,
  970. grp_umac_reset_ctx->response_map);
  971. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  972. return QDF_STATUS_SUCCESS;
  973. }
  974. dp_umac_reset_debug(
  975. "All responses received: expected map %u current map %u",
  976. grp_umac_reset_ctx->partner_map,
  977. grp_umac_reset_ctx->response_map);
  978. grp_umac_reset_ctx->response_map = 0;
  979. grp_umac_reset_ctx->request_map = 0;
  980. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  981. dp_mlo_iter_ptnr_soc(be_soc, &dp_umac_reset_post_tx_cmd_via_shmem,
  982. &tx_cmd);
  983. if (tx_cmd == UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE)
  984. dp_umac_reset_complete_umac_recovery(soc);
  985. return QDF_STATUS_SUCCESS;
  986. }
  987. /**
  988. * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
  989. * @soc: dp soc handle
  990. *
  991. * Return: true if the soc is initiator or false otherwise
  992. */
  993. bool dp_umac_reset_initiator_check(struct dp_soc *soc)
  994. {
  995. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  996. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  997. if (!mlo_ctx)
  998. return true;
  999. return (mlo_ctx->grp_umac_reset_ctx.initiator_chip_id ==
  1000. dp_mlo_get_chip_id(soc));
  1001. }
  1002. /**
  1003. * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
  1004. * @soc: dp soc handle
  1005. *
  1006. * Return: true if the session is for target recovery or false otherwise
  1007. */
  1008. bool dp_umac_reset_target_recovery_check(struct dp_soc *soc)
  1009. {
  1010. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1011. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1012. if (!mlo_ctx)
  1013. return false;
  1014. return mlo_ctx->grp_umac_reset_ctx.is_target_recovery;
  1015. }
  1016. /**
  1017. * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
  1018. * @soc: dp soc handle
  1019. *
  1020. * Return: true if the soc is ignored or false otherwise
  1021. */
  1022. bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc)
  1023. {
  1024. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1025. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1026. if (!mlo_ctx)
  1027. return false;
  1028. return !qdf_atomic_test_bit(dp_mlo_get_chip_id(soc),
  1029. &mlo_ctx->grp_umac_reset_ctx.partner_map);
  1030. }
  1031. QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
  1032. {
  1033. struct dp_mlo_ctxt *mlo_ctx;
  1034. struct dp_soc_be *be_soc;
  1035. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1036. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1037. if (!be_soc) {
  1038. dp_umac_reset_err("null be_soc");
  1039. return QDF_STATUS_E_NULL_VALUE;
  1040. }
  1041. mlo_ctx = be_soc->ml_ctxt;
  1042. if (!mlo_ctx) {
  1043. /* This API can be called for non-MLO SOC as well. Hence, return
  1044. * the status as success when mlo_ctx is NULL.
  1045. */
  1046. return QDF_STATUS_SUCCESS;
  1047. }
  1048. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1049. DP_UMAC_RESET_PRINT_STATS("MLO UMAC RESET stats\n"
  1050. "\t\tPartner map :%x\n"
  1051. "\t\tRequest map :%x\n"
  1052. "\t\tResponse map :%x\n"
  1053. "\t\tIs target recovery :%d\n"
  1054. "\t\tIs Umac reset inprogress :%d\n"
  1055. "\t\tNumber of UMAC reset triggered:%d\n"
  1056. "\t\tInitiator chip ID :%d\n",
  1057. grp_umac_reset_ctx->partner_map,
  1058. grp_umac_reset_ctx->request_map,
  1059. grp_umac_reset_ctx->response_map,
  1060. grp_umac_reset_ctx->is_target_recovery,
  1061. grp_umac_reset_ctx->umac_reset_in_progress,
  1062. grp_umac_reset_ctx->umac_reset_count,
  1063. grp_umac_reset_ctx->initiator_chip_id);
  1064. return QDF_STATUS_SUCCESS;
  1065. }
  1066. #endif