dp_mlo.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706
  1. /*
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <wlan_utility.h>
  17. #include <dp_internal.h>
  18. #include <dp_htt.h>
  19. #include <hal_be_api.h>
  20. #include "dp_mlo.h"
  21. #include <dp_be.h>
  22. #include <dp_be_rx.h>
  23. #include <dp_htt.h>
  24. #include <dp_internal.h>
  25. #include <wlan_cfg.h>
  26. #include <wlan_mlo_mgr_cmn.h>
  27. #include "dp_umac_reset.h"
  28. #ifdef DP_UMAC_HW_RESET_SUPPORT
  29. /**
  30. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  31. * @mlo_ctx: mlo soc context
  32. * @chip_id: chip id
  33. * @set: flag indicating whether to set or clear the bit
  34. *
  35. * Return: void
  36. */
  37. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  38. int chip_id, bool set);
  39. #endif
  40. /**
  41. * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
  42. * @ctrl_ctxt: CDP control context
  43. *
  44. * Return: DP MLO context handle on success, NULL on failure
  45. */
  46. static struct cdp_mlo_ctxt *
  47. dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
  48. {
  49. struct dp_mlo_ctxt *mlo_ctxt =
  50. qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
  51. if (!mlo_ctxt) {
  52. dp_err("Failed to allocate DP MLO Context");
  53. return NULL;
  54. }
  55. mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
  56. if (dp_mlo_peer_find_hash_attach_be
  57. (mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
  58. dp_err("Failed to allocate peer hash");
  59. qdf_mem_free(mlo_ctxt);
  60. return NULL;
  61. }
  62. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
  63. (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
  64. LRO_IPV4_SEED_ARR_SZ));
  65. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
  66. (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
  67. LRO_IPV6_SEED_ARR_SZ));
  68. qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
  69. qdf_spinlock_create(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  70. return dp_mlo_ctx_to_cdp(mlo_ctxt);
  71. }
  72. /**
  73. * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
  74. * @cdp_ml_ctxt: pointer to CDP DP MLO context
  75. *
  76. * Return: void
  77. */
  78. static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  79. {
  80. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  81. if (!cdp_ml_ctxt)
  82. return;
  83. qdf_spinlock_destroy(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  84. qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
  85. dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
  86. qdf_mem_free(mlo_ctxt);
  87. }
  88. /**
  89. * dp_mlo_set_soc_by_chip_id() - Add DP soc to ML context soc list
  90. * @ml_ctxt: DP ML context handle
  91. * @soc: DP soc handle
  92. * @chip_id: MLO chip id
  93. *
  94. * Return: void
  95. */
  96. static void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  97. struct dp_soc *soc,
  98. uint8_t chip_id)
  99. {
  100. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  101. ml_ctxt->ml_soc_list[chip_id] = soc;
  102. /* The same API is called during soc_attach and soc_detach
  103. * soc parameter is non-null or null accordingly.
  104. */
  105. if (soc)
  106. ml_ctxt->ml_soc_cnt++;
  107. else
  108. ml_ctxt->ml_soc_cnt--;
  109. dp_umac_reset_update_partner_map(ml_ctxt, chip_id, !!soc);
  110. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  111. }
  112. struct dp_soc*
  113. dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  114. uint8_t chip_id)
  115. {
  116. struct dp_soc *soc = NULL;
  117. if (!ml_ctxt) {
  118. dp_warn("MLO context not created, MLO not enabled");
  119. return NULL;
  120. }
  121. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  122. soc = ml_ctxt->ml_soc_list[chip_id];
  123. if (!soc) {
  124. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  125. return NULL;
  126. }
  127. qdf_atomic_inc(&soc->ref_count);
  128. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  129. return soc;
  130. }
  131. static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
  132. struct dp_soc_be *be_soc)
  133. {
  134. uint8_t i;
  135. struct dp_soc *partner_soc;
  136. struct dp_soc_be *be_partner_soc;
  137. uint8_t pool_id;
  138. QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
  139. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  140. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  141. if (!partner_soc) {
  142. dp_err("partner_soc is NULL");
  143. continue;
  144. }
  145. be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
  146. for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
  147. qdf_status =
  148. dp_hw_cookie_conversion_init
  149. (be_soc,
  150. &be_partner_soc->rx_cc_ctx[pool_id]);
  151. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  152. dp_alert("MLO partner soc RX CC init failed");
  153. return qdf_status;
  154. }
  155. }
  156. }
  157. return qdf_status;
  158. }
  159. static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg, int chip_id)
  160. {
  161. uint8_t i = 0;
  162. uint8_t cpu = 0;
  163. uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  164. uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  165. uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  166. uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  167. /* Save the current interrupt mask and disable the interrupts */
  168. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  169. rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
  170. rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
  171. rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
  172. reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
  173. soc->intr_ctx[i].rx_ring_mask = 0;
  174. soc->intr_ctx[i].rx_err_ring_mask = 0;
  175. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  176. soc->intr_ctx[i].reo_status_ring_mask = 0;
  177. }
  178. /* make sure dp_service_srngs not running on any of the CPU */
  179. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  180. while (qdf_atomic_test_bit(cpu,
  181. &soc->service_rings_running))
  182. ;
  183. }
  184. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  185. uint8_t ring = 0;
  186. uint32_t num_entries = 0;
  187. hal_ring_handle_t hal_ring_hdl = NULL;
  188. uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
  189. soc->wlan_cfg_ctx, i);
  190. uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
  191. soc->wlan_cfg_ctx, i);
  192. uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  193. soc->wlan_cfg_ctx, i);
  194. if (rx_mask) {
  195. /* iterate through each reo ring and process the buf */
  196. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  197. if (!(rx_mask & (1 << ring)))
  198. continue;
  199. hal_ring_hdl =
  200. soc->reo_dest_ring[ring].hal_srng;
  201. num_entries = hal_srng_get_num_entries(
  202. soc->hal_soc,
  203. hal_ring_hdl);
  204. dp_rx_process_be(&soc->intr_ctx[i],
  205. hal_ring_hdl,
  206. ring,
  207. num_entries);
  208. }
  209. }
  210. /* Process REO Exception ring */
  211. if (rx_err_mask) {
  212. hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  213. num_entries = hal_srng_get_num_entries(
  214. soc->hal_soc,
  215. hal_ring_hdl);
  216. dp_rx_err_process(&soc->intr_ctx[i], soc,
  217. hal_ring_hdl, num_entries);
  218. }
  219. /* Process Rx WBM release ring */
  220. if (rx_wbm_rel_mask) {
  221. hal_ring_hdl = soc->rx_rel_ring.hal_srng;
  222. num_entries = hal_srng_get_num_entries(
  223. soc->hal_soc,
  224. hal_ring_hdl);
  225. dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
  226. hal_ring_hdl, num_entries);
  227. }
  228. }
  229. /* restore the interrupt mask */
  230. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  231. soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
  232. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
  233. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
  234. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
  235. }
  236. }
  237. static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
  238. struct cdp_mlo_ctxt *cdp_ml_ctxt)
  239. {
  240. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  241. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  242. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  243. uint8_t pdev_id;
  244. if (!cdp_ml_ctxt)
  245. return;
  246. be_soc->ml_ctxt = mlo_ctxt;
  247. for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
  248. if (soc->pdev_list[pdev_id])
  249. dp_mlo_update_link_to_pdev_map(soc,
  250. soc->pdev_list[pdev_id]);
  251. }
  252. dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
  253. }
  254. static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
  255. struct cdp_mlo_ctxt *cdp_ml_ctxt,
  256. bool is_force_down)
  257. {
  258. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  259. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  260. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  261. if (!cdp_ml_ctxt)
  262. return;
  263. /* During the teardown drain the Rx buffers if any exist in the ring */
  264. dp_mlo_iter_ptnr_soc(be_soc,
  265. dp_mlo_soc_drain_rx_buf,
  266. NULL);
  267. dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
  268. be_soc->ml_ctxt = NULL;
  269. }
  270. static QDF_STATUS dp_mlo_add_ptnr_vdev(struct dp_vdev *vdev1,
  271. struct dp_vdev *vdev2,
  272. struct dp_soc *soc, uint8_t pdev_id)
  273. {
  274. struct dp_soc_be *soc_be = dp_get_be_soc_from_dp_soc(soc);
  275. struct dp_vdev_be *vdev2_be = dp_get_be_vdev_from_dp_vdev(vdev2);
  276. /* return when valid entry exists */
  277. if (vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
  278. CDP_INVALID_VDEV_ID)
  279. return QDF_STATUS_SUCCESS;
  280. vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] =
  281. vdev1->vdev_id;
  282. mlo_debug("Add vdev%d to vdev%d list, mlo_chip_id = %d pdev_id = %d\n",
  283. vdev1->vdev_id, vdev2->vdev_id, soc_be->mlo_chip_id, pdev_id);
  284. return QDF_STATUS_SUCCESS;
  285. }
  286. QDF_STATUS dp_update_mlo_ptnr_list(struct cdp_soc_t *soc_hdl,
  287. int8_t partner_vdev_ids[], uint8_t num_vdevs,
  288. uint8_t self_vdev_id)
  289. {
  290. int i, j;
  291. struct dp_soc *self_soc = cdp_soc_t_to_dp_soc(soc_hdl);
  292. struct dp_vdev *self_vdev;
  293. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  294. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(self_soc);
  295. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  296. if (!dp_mlo)
  297. return QDF_STATUS_E_FAILURE;
  298. self_vdev = dp_vdev_get_ref_by_id(self_soc, self_vdev_id, DP_MOD_ID_RX);
  299. if (!self_vdev)
  300. return QDF_STATUS_E_FAILURE;
  301. /* go through the input vdev id list and if there are partner vdevs,
  302. * - then add the current vdev's id to partner vdev's list using pdev_id and
  303. * increase the reference
  304. * - add partner vdev to self list and increase the reference
  305. */
  306. for (i = 0; i < num_vdevs; i++) {
  307. if (partner_vdev_ids[i] == CDP_INVALID_VDEV_ID)
  308. continue;
  309. for (j = 0; j < WLAN_MAX_MLO_CHIPS; j++) {
  310. struct dp_soc *soc =
  311. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, j);
  312. if (soc) {
  313. struct dp_vdev *vdev;
  314. vdev = dp_vdev_get_ref_by_id(soc,
  315. partner_vdev_ids[i], DP_MOD_ID_RX);
  316. if (vdev) {
  317. if (vdev == self_vdev) {
  318. dp_vdev_unref_delete(soc,
  319. vdev, DP_MOD_ID_RX);
  320. /*dp_soc_unref_delete(soc); */
  321. continue;
  322. }
  323. if (qdf_is_macaddr_equal(
  324. (struct qdf_mac_addr *)self_vdev->mld_mac_addr.raw,
  325. (struct qdf_mac_addr *)vdev->mld_mac_addr.raw)) {
  326. if (dp_mlo_add_ptnr_vdev(self_vdev,
  327. vdev, self_soc,
  328. self_vdev->pdev->pdev_id) !=
  329. QDF_STATUS_SUCCESS) {
  330. dp_err("Unable to add self to partner vdev's list");
  331. dp_vdev_unref_delete(soc,
  332. vdev, DP_MOD_ID_RX);
  333. /* TODO - release soc ref here */
  334. /* dp_soc_unref_delete(soc);*/
  335. ret = QDF_STATUS_E_FAILURE;
  336. goto exit;
  337. }
  338. /* add to self list */
  339. if (dp_mlo_add_ptnr_vdev(vdev, self_vdev, soc,
  340. vdev->pdev->pdev_id) !=
  341. QDF_STATUS_SUCCESS) {
  342. dp_err("Unable to add vdev to self vdev's list");
  343. dp_vdev_unref_delete(self_soc,
  344. vdev, DP_MOD_ID_RX);
  345. /* TODO - release soc ref here */
  346. /* dp_soc_unref_delete(soc);*/
  347. ret = QDF_STATUS_E_FAILURE;
  348. goto exit;
  349. }
  350. }
  351. dp_vdev_unref_delete(soc, vdev,
  352. DP_MOD_ID_RX);
  353. } /* vdev */
  354. /* TODO - release soc ref here */
  355. /* dp_soc_unref_delete(soc); */
  356. } /* soc */
  357. } /* for */
  358. } /* for */
  359. exit:
  360. dp_vdev_unref_delete(self_soc, self_vdev, DP_MOD_ID_RX);
  361. return ret;
  362. }
  363. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev)
  364. {
  365. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  366. struct dp_vdev_be *vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  367. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  368. uint8_t soc_id = be_soc->mlo_chip_id;
  369. uint8_t pdev_id = vdev->pdev->pdev_id;
  370. int i, j;
  371. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  372. for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
  373. struct dp_vdev *pr_vdev;
  374. struct dp_soc *pr_soc;
  375. struct dp_soc_be *pr_soc_be;
  376. struct dp_pdev *pr_pdev;
  377. struct dp_vdev_be *pr_vdev_be;
  378. if (vdev_be->partner_vdev_list[i][j] ==
  379. CDP_INVALID_VDEV_ID)
  380. continue;
  381. pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  382. if (!pr_soc)
  383. continue;
  384. pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
  385. pr_vdev = dp_vdev_get_ref_by_id(pr_soc,
  386. vdev_be->partner_vdev_list[i][j],
  387. DP_MOD_ID_RX);
  388. if (!pr_vdev)
  389. continue;
  390. /* remove self vdev from partner list */
  391. pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
  392. pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
  393. CDP_INVALID_VDEV_ID;
  394. /* remove partner vdev from self list */
  395. pr_pdev = pr_vdev->pdev;
  396. vdev_be->partner_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
  397. CDP_INVALID_VDEV_ID;
  398. dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
  399. }
  400. }
  401. }
  402. static QDF_STATUS
  403. dp_clear_mlo_ptnr_list(struct cdp_soc_t *soc_hdl, uint8_t self_vdev_id)
  404. {
  405. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  406. struct dp_vdev *vdev;
  407. vdev = dp_vdev_get_ref_by_id(soc, self_vdev_id, DP_MOD_ID_RX);
  408. if (!vdev)
  409. return QDF_STATUS_E_FAILURE;
  410. dp_clr_mlo_ptnr_list(soc, vdev);
  411. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  412. return QDF_STATUS_SUCCESS;
  413. }
  414. static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  415. {
  416. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  417. int i;
  418. struct dp_soc *soc;
  419. struct dp_soc_be *be_soc;
  420. QDF_STATUS qdf_status;
  421. if (!cdp_ml_ctxt)
  422. return;
  423. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  424. soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  425. if (!soc)
  426. continue;
  427. be_soc = dp_get_be_soc_from_dp_soc(soc);
  428. qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
  429. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  430. dp_alert("MLO partner SOC Rx desc CC init failed");
  431. qdf_assert_always(0);
  432. }
  433. }
  434. }
  435. static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
  436. uint8_t pdev_id, uint64_t delta_tsf2)
  437. {
  438. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  439. struct dp_pdev *pdev;
  440. struct dp_pdev_be *be_pdev;
  441. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  442. pdev_id);
  443. if (!pdev) {
  444. dp_err("pdev is NULL for pdev_id %u", pdev_id);
  445. return;
  446. }
  447. be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  448. be_pdev->delta_tsf2 = delta_tsf2;
  449. }
  450. static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
  451. uint64_t delta_tqm)
  452. {
  453. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  454. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  455. be_soc->delta_tqm = delta_tqm;
  456. }
  457. static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
  458. uint64_t offset)
  459. {
  460. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  461. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  462. be_soc->mlo_tstamp_offset = offset;
  463. }
  464. #ifdef CONFIG_MLO_SINGLE_DEV
  465. /**
  466. * dp_aggregate_vdev_basic_stats() - aggregate vdev basic stats
  467. * @tgt_vdev_stats: target vdev buffer
  468. * @src_vdev_stats: source vdev buffer
  469. *
  470. * return: void
  471. */
  472. static inline
  473. void dp_aggregate_vdev_basic_stats(
  474. struct cdp_vdev_stats *tgt_vdev_stats,
  475. struct cdp_vdev_stats *src_vdev_stats)
  476. {
  477. DP_UPDATE_BASIC_STATS(tgt_vdev_stats, src_vdev_stats);
  478. }
  479. /**
  480. * dp_aggregate_vdev_ingress_stats() - aggregate vdev ingress stats
  481. * @tgt_vdev_stats: target vdev buffer
  482. * @src_vdev_stats: source vdev buffer
  483. *
  484. * return: void
  485. */
  486. static inline
  487. void dp_aggregate_vdev_ingress_stats(
  488. struct cdp_vdev_stats *tgt_vdev_stats,
  489. struct cdp_vdev_stats *src_vdev_stats)
  490. {
  491. /* Aggregate vdev ingress stats */
  492. DP_UPDATE_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats);
  493. }
  494. /**
  495. * dp_aggregate_vdev_stats_for_unmapped_peers() - aggregate unmap peer stats
  496. * @tgt_vdev_stats: target vdev buffer
  497. * @src_vdev_stats: source vdev buffer
  498. *
  499. * return: void
  500. */
  501. static inline
  502. void dp_aggregate_vdev_stats_for_unmapped_peers(
  503. struct cdp_vdev_stats *tgt_vdev_stats,
  504. struct cdp_vdev_stats *src_vdev_stats)
  505. {
  506. /* Aggregate unmapped peers stats */
  507. DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(tgt_vdev_stats, src_vdev_stats);
  508. }
  509. /**
  510. * dp_aggregate_all_vdev_stats() - aggregate vdev ingress and unmap peer stats
  511. * @tgt_vdev_stats: target vdev buffer
  512. * @src_vdev_stats: source vdev buffer
  513. *
  514. * return: void
  515. */
  516. static inline
  517. void dp_aggregate_all_vdev_stats(
  518. struct cdp_vdev_stats *tgt_vdev_stats,
  519. struct cdp_vdev_stats *src_vdev_stats)
  520. {
  521. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats);
  522. dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
  523. src_vdev_stats);
  524. }
  525. /**
  526. * dp_aggregate_interface_stats_based_on_peer_type() - aggregate stats at
  527. * VDEV level based on peer type connected to vdev
  528. * @vdev: DP VDEV handle
  529. * @vdev_stats: target vdev stats pointer
  530. * @peer_type: type of peer - MLO Link or Legacy peer
  531. *
  532. * return: void
  533. */
  534. static
  535. void dp_aggregate_interface_stats_based_on_peer_type(
  536. struct dp_vdev *vdev,
  537. struct cdp_vdev_stats *vdev_stats,
  538. enum dp_peer_type peer_type)
  539. {
  540. struct cdp_vdev_stats *tgt_vdev_stats = NULL;
  541. struct dp_vdev_be *be_vdev = NULL;
  542. if (!vdev || !vdev->pdev)
  543. return;
  544. tgt_vdev_stats = vdev_stats;
  545. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  546. if (!be_vdev)
  547. return;
  548. if (peer_type == DP_PEER_TYPE_LEGACY) {
  549. dp_aggregate_all_vdev_stats(tgt_vdev_stats,
  550. &vdev->stats);
  551. } else {
  552. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats,
  553. &vdev->stats);
  554. dp_aggregate_vdev_stats_for_unmapped_peers(
  555. tgt_vdev_stats,
  556. &be_vdev->mlo_stats);
  557. }
  558. /* Aggregate associated peer stats */
  559. dp_vdev_iterate_specific_peer_type(vdev,
  560. dp_update_vdev_stats,
  561. vdev_stats,
  562. DP_MOD_ID_GENERIC_STATS,
  563. peer_type);
  564. }
  565. /**
  566. * dp_aggregate_interface_stats() - aggregate stats at VDEV level
  567. * @vdev: DP VDEV handle
  568. * @vdev_stats: target vdev stats pointer
  569. *
  570. * return: void
  571. */
  572. static
  573. void dp_aggregate_interface_stats(struct dp_vdev *vdev,
  574. struct cdp_vdev_stats *vdev_stats)
  575. {
  576. struct dp_vdev_be *be_vdev = NULL;
  577. if (!vdev || !vdev->pdev)
  578. return;
  579. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  580. if (!be_vdev)
  581. return;
  582. dp_aggregate_all_vdev_stats(vdev_stats, &be_vdev->mlo_stats);
  583. dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats);
  584. dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
  585. DP_MOD_ID_GENERIC_STATS);
  586. dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
  587. }
  588. /**
  589. * dp_mlo_aggr_ptnr_iface_stats() - aggregate mlo partner vdev stats
  590. * @be_vdev: vdev handle
  591. * @ptnr_vdev: partner vdev handle
  592. * @arg: target buffer for aggregation
  593. *
  594. * return: void
  595. */
  596. static
  597. void dp_mlo_aggr_ptnr_iface_stats(struct dp_vdev_be *be_vdev,
  598. struct dp_vdev *ptnr_vdev,
  599. void *arg)
  600. {
  601. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  602. dp_aggregate_interface_stats(ptnr_vdev, tgt_vdev_stats);
  603. }
  604. /**
  605. * dp_mlo_aggr_ptnr_iface_stats_mlo_links() - aggregate mlo partner vdev stats
  606. * based on peer type
  607. * @be_vdev: vdev handle
  608. * @ptnr_vdev: partner vdev handle
  609. * @arg: target buffer for aggregation
  610. *
  611. * return: void
  612. */
  613. static
  614. void dp_mlo_aggr_ptnr_iface_stats_mlo_links(
  615. struct dp_vdev_be *be_vdev,
  616. struct dp_vdev *ptnr_vdev,
  617. void *arg)
  618. {
  619. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  620. dp_aggregate_interface_stats_based_on_peer_type(ptnr_vdev,
  621. tgt_vdev_stats,
  622. DP_PEER_TYPE_MLO_LINK);
  623. }
  624. /**
  625. * dp_aggregate_sta_interface_stats() - for sta mode aggregate vdev stats from
  626. * all link peers
  627. * @soc: soc handle
  628. * @vdev: vdev handle
  629. * @buf: target buffer for aggregation
  630. *
  631. * return: QDF_STATUS
  632. */
  633. static QDF_STATUS
  634. dp_aggregate_sta_interface_stats(struct dp_soc *soc,
  635. struct dp_vdev *vdev,
  636. void *buf)
  637. {
  638. struct dp_peer *vap_bss_peer = NULL;
  639. struct dp_peer *mld_peer = NULL;
  640. struct dp_peer *link_peer = NULL;
  641. struct dp_mld_link_peers link_peers_info;
  642. uint8_t i = 0;
  643. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  644. vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
  645. DP_MOD_ID_GENERIC_STATS);
  646. if (!vap_bss_peer)
  647. return QDF_STATUS_E_FAILURE;
  648. mld_peer = DP_GET_MLD_PEER_FROM_PEER(vap_bss_peer);
  649. if (!mld_peer) {
  650. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  651. return QDF_STATUS_E_FAILURE;
  652. }
  653. dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, &link_peers_info,
  654. DP_MOD_ID_GENERIC_STATS);
  655. for (i = 0; i < link_peers_info.num_links; i++) {
  656. link_peer = link_peers_info.link_peers[i];
  657. dp_update_vdev_stats(soc, link_peer, buf);
  658. dp_aggregate_vdev_ingress_stats((struct cdp_vdev_stats *)buf,
  659. &link_peer->vdev->stats);
  660. dp_aggregate_vdev_basic_stats(
  661. (struct cdp_vdev_stats *)buf,
  662. &link_peer->vdev->stats);
  663. }
  664. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_GENERIC_STATS);
  665. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  666. return ret;
  667. }
  668. static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
  669. uint8_t vdev_id, void *buf,
  670. bool link_vdev_only)
  671. {
  672. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  673. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  674. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  675. DP_MOD_ID_GENERIC_STATS);
  676. struct dp_vdev_be *vdev_be = NULL;
  677. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  678. if (!vdev)
  679. return QDF_STATUS_E_FAILURE;
  680. vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  681. if (!vdev_be) {
  682. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  683. return QDF_STATUS_E_FAILURE;
  684. }
  685. if (vdev->opmode == wlan_op_mode_sta) {
  686. ret = dp_aggregate_sta_interface_stats(soc, vdev, buf);
  687. goto complete;
  688. }
  689. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  690. vdev->opmode == wlan_op_mode_ap) {
  691. dp_aggregate_interface_stats_based_on_peer_type(
  692. vdev, buf,
  693. DP_PEER_TYPE_MLO_LINK);
  694. if (link_vdev_only)
  695. goto complete;
  696. /* Aggregate stats from partner vdevs */
  697. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  698. dp_mlo_aggr_ptnr_iface_stats_mlo_links,
  699. buf,
  700. DP_MOD_ID_GENERIC_STATS);
  701. } else {
  702. dp_aggregate_interface_stats(vdev, buf);
  703. if (link_vdev_only)
  704. goto complete;
  705. /* Aggregate stats from partner vdevs */
  706. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  707. dp_mlo_aggr_ptnr_iface_stats, buf,
  708. DP_MOD_ID_GENERIC_STATS);
  709. }
  710. complete:
  711. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  712. return ret;
  713. }
  714. QDF_STATUS
  715. dp_get_interface_stats_be(struct cdp_soc_t *soc_hdl,
  716. uint8_t vdev_id,
  717. void *buf,
  718. bool is_aggregate)
  719. {
  720. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  721. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  722. DP_MOD_ID_GENERIC_STATS);
  723. if (!vdev)
  724. return QDF_STATUS_E_FAILURE;
  725. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  726. vdev->opmode == wlan_op_mode_ap) {
  727. dp_aggregate_interface_stats_based_on_peer_type(
  728. vdev, buf,
  729. DP_PEER_TYPE_LEGACY);
  730. } else {
  731. dp_aggregate_interface_stats(vdev, buf);
  732. }
  733. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  734. return QDF_STATUS_SUCCESS;
  735. }
  736. #endif
  737. static struct cdp_mlo_ops dp_mlo_ops = {
  738. .mlo_soc_setup = dp_mlo_soc_setup,
  739. .mlo_soc_teardown = dp_mlo_soc_teardown,
  740. .update_mlo_ptnr_list = dp_update_mlo_ptnr_list,
  741. .clear_mlo_ptnr_list = dp_clear_mlo_ptnr_list,
  742. .mlo_setup_complete = dp_mlo_setup_complete,
  743. .mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
  744. .mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
  745. .mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
  746. .mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
  747. .mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
  748. #ifdef CONFIG_MLO_SINGLE_DEV
  749. .mlo_get_mld_vdev_stats = dp_mlo_get_mld_vdev_stats,
  750. #endif
  751. };
  752. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  753. struct cdp_soc_attach_params *params)
  754. {
  755. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  756. if (!params->mlo_enabled) {
  757. dp_warn("MLO not enabled on SOC");
  758. return;
  759. }
  760. be_soc->mlo_chip_id = params->mlo_chip_id;
  761. be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
  762. be_soc->mlo_enabled = 1;
  763. soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
  764. }
  765. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  766. {
  767. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  768. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  769. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  770. uint8_t link_id;
  771. if (!be_soc->mlo_enabled)
  772. return;
  773. if (!ml_ctxt)
  774. return;
  775. link_id = be_pdev->mlo_link_id;
  776. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
  777. if (!ml_ctxt->link_to_pdev_map[link_id])
  778. ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
  779. else
  780. dp_alert("Attempt to update existing map for link %u",
  781. link_id);
  782. }
  783. }
  784. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  785. {
  786. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  787. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  788. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  789. uint8_t link_id;
  790. if (!be_soc->mlo_enabled)
  791. return;
  792. if (!ml_ctxt)
  793. return;
  794. link_id = be_pdev->mlo_link_id;
  795. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  796. ml_ctxt->link_to_pdev_map[link_id] = NULL;
  797. }
  798. static struct dp_pdev_be *
  799. dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
  800. {
  801. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  802. return ml_ctxt->link_to_pdev_map[link_id];
  803. return NULL;
  804. }
  805. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  806. struct cdp_pdev_attach_params *params)
  807. {
  808. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
  809. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  810. if (!be_soc->mlo_enabled) {
  811. dp_info("MLO not enabled on SOC");
  812. return;
  813. }
  814. be_pdev->mlo_link_id = params->mlo_link_id;
  815. }
  816. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  817. struct dp_peer *peer,
  818. uint16_t peer_id)
  819. {
  820. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  821. struct dp_mlo_ctxt *mlo_ctxt = NULL;
  822. bool is_ml_peer_id =
  823. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  824. uint8_t chip_id;
  825. struct dp_soc *temp_soc;
  826. /* for non ML peer dont map on partner chips*/
  827. if (!is_ml_peer_id)
  828. return;
  829. mlo_ctxt = be_soc->ml_ctxt;
  830. if (!mlo_ctxt)
  831. return;
  832. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  833. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  834. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  835. if (!temp_soc)
  836. continue;
  837. /* skip if this is current soc */
  838. if (temp_soc == soc)
  839. continue;
  840. dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
  841. }
  842. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  843. }
  844. qdf_export_symbol(dp_mlo_partner_chips_map);
  845. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  846. uint16_t peer_id)
  847. {
  848. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  849. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  850. bool is_ml_peer_id =
  851. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  852. uint8_t chip_id;
  853. struct dp_soc *temp_soc;
  854. if (!is_ml_peer_id)
  855. return;
  856. if (!mlo_ctxt)
  857. return;
  858. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  859. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  860. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  861. if (!temp_soc)
  862. continue;
  863. /* skip if this is current soc */
  864. if (temp_soc == soc)
  865. continue;
  866. dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
  867. }
  868. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  869. }
  870. qdf_export_symbol(dp_mlo_partner_chips_unmap);
  871. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  872. {
  873. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  874. return be_soc->mlo_chip_id;
  875. }
  876. qdf_export_symbol(dp_mlo_get_chip_id);
  877. struct dp_peer *
  878. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  879. uint8_t *peer_mac_addr,
  880. int mac_addr_is_aligned,
  881. uint8_t vdev_id,
  882. uint8_t chip_id,
  883. enum dp_mod_id mod_id)
  884. {
  885. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  886. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  887. struct dp_soc *link_peer_soc = NULL;
  888. struct dp_peer *peer = NULL;
  889. if (!mlo_ctxt)
  890. return NULL;
  891. link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  892. if (!link_peer_soc)
  893. return NULL;
  894. peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
  895. mac_addr_is_aligned, vdev_id,
  896. mod_id);
  897. qdf_atomic_dec(&link_peer_soc->ref_count);
  898. return peer;
  899. }
  900. qdf_export_symbol(dp_link_peer_hash_find_by_chip_id);
  901. void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
  902. struct cdp_lro_hash_config *lro_hash)
  903. {
  904. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  905. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  906. if (!be_soc->mlo_enabled || !ml_ctxt)
  907. return dp_get_rx_hash_key_bytes(lro_hash);
  908. qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
  909. (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
  910. LRO_IPV4_SEED_ARR_SZ));
  911. qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
  912. (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
  913. LRO_IPV6_SEED_ARR_SZ));
  914. }
  915. struct dp_soc *
  916. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
  917. {
  918. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  919. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  920. struct dp_soc *replenish_soc;
  921. if (!be_soc->mlo_enabled || !mlo_ctxt)
  922. return soc;
  923. if (be_soc->mlo_chip_id == chip_id)
  924. return soc;
  925. replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  926. if (qdf_unlikely(!replenish_soc)) {
  927. dp_alert("replenish SOC is NULL");
  928. qdf_assert_always(0);
  929. }
  930. return replenish_soc;
  931. }
  932. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
  933. {
  934. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  935. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  936. if (!be_soc->mlo_enabled || !mlo_ctxt)
  937. return 1;
  938. return mlo_ctxt->ml_soc_cnt;
  939. }
  940. struct dp_soc *
  941. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
  942. {
  943. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  944. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  945. struct dp_soc *partner_soc = NULL;
  946. uint8_t chip_id;
  947. if (!be_soc->mlo_enabled || !mlo_ctxt)
  948. return soc;
  949. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  950. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  951. if (!partner_soc)
  952. continue;
  953. if (partner_soc->idle_link_bm_id == idle_bm_id)
  954. return partner_soc;
  955. }
  956. return NULL;
  957. }
  958. #ifdef WLAN_MLO_MULTI_CHIP
  959. void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  960. struct dp_vdev_be *be_vdev,
  961. dp_ptnr_vdev_iter_func func,
  962. void *arg,
  963. enum dp_mod_id mod_id)
  964. {
  965. int i = 0;
  966. int j = 0;
  967. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  968. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  969. struct dp_soc *ptnr_soc =
  970. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  971. if (!ptnr_soc)
  972. continue;
  973. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  974. struct dp_vdev *ptnr_vdev;
  975. ptnr_vdev = dp_vdev_get_ref_by_id(
  976. ptnr_soc,
  977. be_vdev->partner_vdev_list[i][j],
  978. mod_id);
  979. if (!ptnr_vdev)
  980. continue;
  981. (*func)(be_vdev, ptnr_vdev, arg);
  982. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  983. ptnr_vdev,
  984. mod_id);
  985. }
  986. }
  987. }
  988. qdf_export_symbol(dp_mlo_iter_ptnr_vdev);
  989. #endif
  990. #ifdef WLAN_MCAST_MLO
  991. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  992. struct dp_vdev_be *be_vdev,
  993. enum dp_mod_id mod_id)
  994. {
  995. int i = 0;
  996. int j = 0;
  997. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  998. struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
  999. if (be_vdev->mcast_primary) {
  1000. if (dp_vdev_get_ref((struct dp_soc *)be_soc, vdev, mod_id) !=
  1001. QDF_STATUS_SUCCESS)
  1002. return NULL;
  1003. return vdev;
  1004. }
  1005. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1006. struct dp_soc *ptnr_soc =
  1007. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1008. if (!ptnr_soc)
  1009. continue;
  1010. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  1011. struct dp_vdev *ptnr_vdev = NULL;
  1012. struct dp_vdev_be *be_ptnr_vdev = NULL;
  1013. ptnr_vdev = dp_vdev_get_ref_by_id(
  1014. ptnr_soc,
  1015. be_vdev->partner_vdev_list[i][j],
  1016. mod_id);
  1017. if (!ptnr_vdev)
  1018. continue;
  1019. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  1020. if (be_ptnr_vdev->mcast_primary)
  1021. return ptnr_vdev;
  1022. dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
  1023. &be_ptnr_vdev->vdev,
  1024. mod_id);
  1025. }
  1026. }
  1027. return NULL;
  1028. }
  1029. qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
  1030. #endif
  1031. /**
  1032. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  1033. * @be_soc: dp_soc_be pointer
  1034. * @func: Function to be called for each soc
  1035. * @arg: context to be passed to the callback
  1036. *
  1037. * Return: true if mlo is enabled, false if mlo is disabled
  1038. */
  1039. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  1040. void *arg)
  1041. {
  1042. int i = 0;
  1043. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1044. if (!be_soc->mlo_enabled || !be_soc->ml_ctxt)
  1045. return false;
  1046. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1047. struct dp_soc *ptnr_soc =
  1048. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1049. if (!ptnr_soc)
  1050. continue;
  1051. (*func)(ptnr_soc, arg, i);
  1052. }
  1053. return true;
  1054. }
  1055. qdf_export_symbol(dp_mlo_iter_ptnr_soc);
  1056. static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
  1057. {
  1058. struct dp_soc *soc;
  1059. struct dp_pdev *pdev;
  1060. struct dp_soc_be *be_soc;
  1061. uint32_t mlo_offset;
  1062. pdev = &be_pdev->pdev;
  1063. soc = pdev->soc;
  1064. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1065. mlo_offset = be_soc->mlo_tstamp_offset;
  1066. return mlo_offset;
  1067. }
  1068. int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
  1069. uint8_t hw_link_id)
  1070. {
  1071. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1072. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  1073. struct dp_pdev_be *be_pdev;
  1074. int32_t delta_tsf2_mlo_offset;
  1075. int32_t mlo_offset, delta_tsf2;
  1076. if (!ml_ctxt)
  1077. return 0;
  1078. be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
  1079. if (!be_pdev)
  1080. return 0;
  1081. mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
  1082. delta_tsf2 = be_pdev->delta_tsf2;
  1083. delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
  1084. return delta_tsf2_mlo_offset;
  1085. }
  1086. int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
  1087. {
  1088. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1089. int32_t delta_tqm_mlo_offset;
  1090. int32_t mlo_offset, delta_tqm;
  1091. mlo_offset = be_soc->mlo_tstamp_offset;
  1092. delta_tqm = be_soc->delta_tqm;
  1093. delta_tqm_mlo_offset = mlo_offset - delta_tqm;
  1094. return delta_tqm_mlo_offset;
  1095. }
  1096. #ifdef DP_UMAC_HW_RESET_SUPPORT
  1097. /**
  1098. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  1099. * @mlo_ctx: DP ML context handle
  1100. * @chip_id: chip id
  1101. * @set: flag indicating whether to set or clear the bit
  1102. *
  1103. * Return: void
  1104. */
  1105. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  1106. int chip_id, bool set)
  1107. {
  1108. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx =
  1109. &mlo_ctx->grp_umac_reset_ctx;
  1110. if (set)
  1111. qdf_atomic_set_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1112. else
  1113. qdf_atomic_clear_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1114. }
  1115. QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc)
  1116. {
  1117. struct dp_mlo_ctxt *mlo_ctx;
  1118. struct dp_soc_be *be_soc;
  1119. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1120. if (!be_soc) {
  1121. dp_umac_reset_err("null be_soc");
  1122. return QDF_STATUS_E_NULL_VALUE;
  1123. }
  1124. mlo_ctx = be_soc->ml_ctxt;
  1125. if (!mlo_ctx) {
  1126. /* This API can be called for non-MLO SOC as well. Hence, return
  1127. * the status as success when mlo_ctx is NULL.
  1128. */
  1129. return QDF_STATUS_SUCCESS;
  1130. }
  1131. dp_umac_reset_update_partner_map(mlo_ctx, be_soc->mlo_chip_id, false);
  1132. return QDF_STATUS_SUCCESS;
  1133. }
  1134. /**
  1135. * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
  1136. * @soc: dp soc handle
  1137. *
  1138. * Return: void
  1139. */
  1140. void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc)
  1141. {
  1142. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1143. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1144. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1145. if (!mlo_ctx) {
  1146. dp_umac_reset_alert("Umac reset was handled on soc %pK", soc);
  1147. return;
  1148. }
  1149. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1150. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1151. grp_umac_reset_ctx->umac_reset_in_progress = false;
  1152. grp_umac_reset_ctx->is_target_recovery = false;
  1153. grp_umac_reset_ctx->response_map = 0;
  1154. grp_umac_reset_ctx->request_map = 0;
  1155. grp_umac_reset_ctx->initiator_chip_id = 0;
  1156. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1157. dp_umac_reset_alert("Umac reset was handled on mlo group ctxt %pK",
  1158. mlo_ctx);
  1159. }
  1160. /**
  1161. * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
  1162. * @soc: dp soc handle
  1163. * @umac_reset_ctx: Umac reset context
  1164. * @rx_event: Rx event received
  1165. * @is_target_recovery: Flag to indicate if it is triggered for target recovery
  1166. *
  1167. * Return: status
  1168. */
  1169. QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
  1170. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1171. enum umac_reset_rx_event rx_event,
  1172. bool is_target_recovery)
  1173. {
  1174. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1175. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1176. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1177. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1178. if (!mlo_ctx)
  1179. return dp_umac_reset_validate_n_update_state_machine_on_rx(
  1180. umac_reset_ctx, rx_event,
  1181. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1182. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1183. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1184. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1185. if (grp_umac_reset_ctx->umac_reset_in_progress) {
  1186. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1187. return QDF_STATUS_E_INVAL;
  1188. }
  1189. status = dp_umac_reset_validate_n_update_state_machine_on_rx(
  1190. umac_reset_ctx, rx_event,
  1191. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1192. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1193. if (status != QDF_STATUS_SUCCESS) {
  1194. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1195. return status;
  1196. }
  1197. grp_umac_reset_ctx->umac_reset_in_progress = true;
  1198. grp_umac_reset_ctx->is_target_recovery = is_target_recovery;
  1199. /* We don't wait for the 'Umac trigger' message from all socs */
  1200. grp_umac_reset_ctx->request_map = grp_umac_reset_ctx->partner_map;
  1201. grp_umac_reset_ctx->response_map = grp_umac_reset_ctx->partner_map;
  1202. grp_umac_reset_ctx->initiator_chip_id = dp_mlo_get_chip_id(soc);
  1203. grp_umac_reset_ctx->umac_reset_count++;
  1204. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1205. return QDF_STATUS_SUCCESS;
  1206. }
  1207. /**
  1208. * dp_umac_reset_handle_action_cb() - Function to call action callback
  1209. * @soc: dp soc handle
  1210. * @umac_reset_ctx: Umac reset context
  1211. * @action: Action to call the callback for
  1212. *
  1213. * Return: QDF_STATUS status
  1214. */
  1215. QDF_STATUS
  1216. dp_umac_reset_handle_action_cb(struct dp_soc *soc,
  1217. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1218. enum umac_reset_action action)
  1219. {
  1220. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1221. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1222. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1223. if (!mlo_ctx) {
  1224. dp_umac_reset_debug("MLO context is Null");
  1225. goto handle;
  1226. }
  1227. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1228. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1229. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1230. &grp_umac_reset_ctx->request_map);
  1231. dp_umac_reset_debug("partner_map %u request_map %u",
  1232. grp_umac_reset_ctx->partner_map,
  1233. grp_umac_reset_ctx->request_map);
  1234. /* This logic is needed for synchronization between mlo socs */
  1235. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->request_map)
  1236. != grp_umac_reset_ctx->partner_map) {
  1237. struct hif_softc *hif_sc = HIF_GET_SOFTC(soc->hif_handle);
  1238. struct hif_umac_reset_ctx *hif_umac_reset_ctx;
  1239. if (!hif_sc) {
  1240. hif_err("scn is null");
  1241. qdf_assert_always(0);
  1242. return QDF_STATUS_E_FAILURE;
  1243. }
  1244. hif_umac_reset_ctx = &hif_sc->umac_reset_ctx;
  1245. /* Mark the action as pending */
  1246. umac_reset_ctx->pending_action = action;
  1247. /* Reschedule the tasklet and exit */
  1248. tasklet_hi_schedule(&hif_umac_reset_ctx->intr_tq);
  1249. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1250. return QDF_STATUS_SUCCESS;
  1251. }
  1252. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1253. umac_reset_ctx->pending_action = UMAC_RESET_ACTION_NONE;
  1254. handle:
  1255. if (!umac_reset_ctx->rx_actions.cb[action]) {
  1256. dp_umac_reset_err("rx callback is NULL");
  1257. return QDF_STATUS_E_FAILURE;
  1258. }
  1259. return umac_reset_ctx->rx_actions.cb[action](soc);
  1260. }
  1261. /**
  1262. * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
  1263. * @umac_reset_ctx: UMAC reset context
  1264. * @tx_cmd: Tx command to be posted
  1265. *
  1266. * Return: QDF status of operation
  1267. */
  1268. QDF_STATUS
  1269. dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1270. enum umac_reset_tx_cmd tx_cmd)
  1271. {
  1272. struct dp_soc *soc = container_of(umac_reset_ctx, struct dp_soc,
  1273. umac_reset_ctx);
  1274. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1275. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1276. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1277. if (!mlo_ctx) {
  1278. dp_umac_reset_post_tx_cmd_via_shmem(soc, &tx_cmd, 0);
  1279. return QDF_STATUS_SUCCESS;
  1280. }
  1281. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1282. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1283. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1284. &grp_umac_reset_ctx->response_map);
  1285. /* This logic is needed for synchronization between mlo socs */
  1286. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->response_map)
  1287. != grp_umac_reset_ctx->partner_map) {
  1288. dp_umac_reset_debug(
  1289. "Response(s) pending : expected map %u current map %u",
  1290. grp_umac_reset_ctx->partner_map,
  1291. grp_umac_reset_ctx->response_map);
  1292. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1293. return QDF_STATUS_SUCCESS;
  1294. }
  1295. dp_umac_reset_debug(
  1296. "All responses received: expected map %u current map %u",
  1297. grp_umac_reset_ctx->partner_map,
  1298. grp_umac_reset_ctx->response_map);
  1299. grp_umac_reset_ctx->response_map = 0;
  1300. grp_umac_reset_ctx->request_map = 0;
  1301. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1302. dp_mlo_iter_ptnr_soc(be_soc, &dp_umac_reset_post_tx_cmd_via_shmem,
  1303. &tx_cmd);
  1304. if (tx_cmd == UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE)
  1305. dp_umac_reset_complete_umac_recovery(soc);
  1306. return QDF_STATUS_SUCCESS;
  1307. }
  1308. /**
  1309. * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
  1310. * @soc: dp soc handle
  1311. *
  1312. * Return: true if the soc is initiator or false otherwise
  1313. */
  1314. bool dp_umac_reset_initiator_check(struct dp_soc *soc)
  1315. {
  1316. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1317. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1318. if (!mlo_ctx)
  1319. return true;
  1320. return (mlo_ctx->grp_umac_reset_ctx.initiator_chip_id ==
  1321. dp_mlo_get_chip_id(soc));
  1322. }
  1323. /**
  1324. * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
  1325. * @soc: dp soc handle
  1326. *
  1327. * Return: true if the session is for target recovery or false otherwise
  1328. */
  1329. bool dp_umac_reset_target_recovery_check(struct dp_soc *soc)
  1330. {
  1331. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1332. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1333. if (!mlo_ctx)
  1334. return false;
  1335. return mlo_ctx->grp_umac_reset_ctx.is_target_recovery;
  1336. }
  1337. /**
  1338. * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
  1339. * @soc: dp soc handle
  1340. *
  1341. * Return: true if the soc is ignored or false otherwise
  1342. */
  1343. bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc)
  1344. {
  1345. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1346. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1347. if (!mlo_ctx)
  1348. return false;
  1349. return !qdf_atomic_test_bit(dp_mlo_get_chip_id(soc),
  1350. &mlo_ctx->grp_umac_reset_ctx.partner_map);
  1351. }
  1352. QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
  1353. {
  1354. struct dp_mlo_ctxt *mlo_ctx;
  1355. struct dp_soc_be *be_soc;
  1356. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1357. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1358. if (!be_soc) {
  1359. dp_umac_reset_err("null be_soc");
  1360. return QDF_STATUS_E_NULL_VALUE;
  1361. }
  1362. mlo_ctx = be_soc->ml_ctxt;
  1363. if (!mlo_ctx) {
  1364. /* This API can be called for non-MLO SOC as well. Hence, return
  1365. * the status as success when mlo_ctx is NULL.
  1366. */
  1367. return QDF_STATUS_SUCCESS;
  1368. }
  1369. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1370. DP_UMAC_RESET_PRINT_STATS("MLO UMAC RESET stats\n"
  1371. "\t\tPartner map :%x\n"
  1372. "\t\tRequest map :%x\n"
  1373. "\t\tResponse map :%x\n"
  1374. "\t\tIs target recovery :%d\n"
  1375. "\t\tIs Umac reset inprogress :%d\n"
  1376. "\t\tNumber of UMAC reset triggered:%d\n"
  1377. "\t\tInitiator chip ID :%d\n",
  1378. grp_umac_reset_ctx->partner_map,
  1379. grp_umac_reset_ctx->request_map,
  1380. grp_umac_reset_ctx->response_map,
  1381. grp_umac_reset_ctx->is_target_recovery,
  1382. grp_umac_reset_ctx->umac_reset_in_progress,
  1383. grp_umac_reset_ctx->umac_reset_count,
  1384. grp_umac_reset_ctx->initiator_chip_id);
  1385. return QDF_STATUS_SUCCESS;
  1386. }
  1387. bool dp_umac_reset_is_inprogress(struct cdp_soc_t *psoc)
  1388. {
  1389. struct dp_soc_umac_reset_ctx *umac_reset_ctx;
  1390. struct dp_soc *soc = (struct dp_soc *)psoc;
  1391. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1392. struct dp_soc_be *be_soc = NULL;
  1393. struct dp_mlo_ctxt *mlo_ctx = NULL;
  1394. if (!soc) {
  1395. dp_umac_reset_err("DP SOC is null");
  1396. return false;
  1397. }
  1398. umac_reset_ctx = &soc->umac_reset_ctx;
  1399. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1400. if (be_soc)
  1401. mlo_ctx = be_soc->ml_ctxt;
  1402. if (mlo_ctx) {
  1403. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1404. return grp_umac_reset_ctx->umac_reset_in_progress;
  1405. } else {
  1406. return (umac_reset_ctx->current_state !=
  1407. UMAC_RESET_STATE_WAIT_FOR_TRIGGER);
  1408. }
  1409. }
  1410. #endif
  1411. struct dp_soc *
  1412. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
  1413. {
  1414. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1415. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1416. struct dp_soc *partner_soc;
  1417. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1418. return soc;
  1419. if (be_soc->mlo_chip_id == chip_id)
  1420. return soc;
  1421. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1422. return partner_soc;
  1423. }