dp_mlo.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899
  1. /*
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <wlan_utility.h>
  17. #include <dp_internal.h>
  18. #include <dp_htt.h>
  19. #include <hal_be_api.h>
  20. #include "dp_mlo.h"
  21. #include <dp_be.h>
  22. #include <dp_be_rx.h>
  23. #include <dp_htt.h>
  24. #include <dp_internal.h>
  25. #include <wlan_cfg.h>
  26. #include <wlan_mlo_mgr_cmn.h>
  27. #include "dp_umac_reset.h"
  28. #ifdef DP_UMAC_HW_RESET_SUPPORT
  29. /**
  30. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  31. * @mlo_ctx: mlo soc context
  32. * @chip_id: chip id
  33. * @set: flag indicating whether to set or clear the bit
  34. *
  35. * Return: void
  36. */
  37. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  38. int chip_id, bool set);
  39. #endif
  40. /**
  41. * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
  42. * @ctrl_ctxt: CDP control context
  43. *
  44. * Return: DP MLO context handle on success, NULL on failure
  45. */
  46. static struct cdp_mlo_ctxt *
  47. dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
  48. {
  49. struct dp_mlo_ctxt *mlo_ctxt =
  50. qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
  51. if (!mlo_ctxt) {
  52. dp_err("Failed to allocate DP MLO Context");
  53. return NULL;
  54. }
  55. mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
  56. if (dp_mlo_peer_find_hash_attach_be
  57. (mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
  58. dp_err("Failed to allocate peer hash");
  59. qdf_mem_free(mlo_ctxt);
  60. return NULL;
  61. }
  62. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
  63. (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
  64. LRO_IPV4_SEED_ARR_SZ));
  65. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
  66. (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
  67. LRO_IPV6_SEED_ARR_SZ));
  68. qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
  69. qdf_spinlock_create(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  70. dp_mlo_dev_ctxt_list_attach(mlo_ctxt);
  71. return dp_mlo_ctx_to_cdp(mlo_ctxt);
  72. }
  73. /**
  74. * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
  75. * @cdp_ml_ctxt: pointer to CDP DP MLO context
  76. *
  77. * Return: void
  78. */
  79. static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  80. {
  81. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  82. if (!cdp_ml_ctxt)
  83. return;
  84. qdf_spinlock_destroy(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
  85. qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
  86. dp_mlo_dev_ctxt_list_detach(mlo_ctxt);
  87. dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
  88. qdf_mem_free(mlo_ctxt);
  89. }
  90. /**
  91. * dp_mlo_set_soc_by_chip_id() - Add DP soc to ML context soc list
  92. * @ml_ctxt: DP ML context handle
  93. * @soc: DP soc handle
  94. * @chip_id: MLO chip id
  95. *
  96. * Return: void
  97. */
  98. static void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  99. struct dp_soc *soc,
  100. uint8_t chip_id)
  101. {
  102. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  103. ml_ctxt->ml_soc_list[chip_id] = soc;
  104. /* The same API is called during soc_attach and soc_detach
  105. * soc parameter is non-null or null accordingly.
  106. */
  107. if (soc)
  108. ml_ctxt->ml_soc_cnt++;
  109. else
  110. ml_ctxt->ml_soc_cnt--;
  111. dp_umac_reset_update_partner_map(ml_ctxt, chip_id, !!soc);
  112. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  113. }
  114. struct dp_soc*
  115. dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  116. uint8_t chip_id)
  117. {
  118. struct dp_soc *soc = NULL;
  119. if (!ml_ctxt) {
  120. dp_warn("MLO context not created, MLO not enabled");
  121. return NULL;
  122. }
  123. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  124. soc = ml_ctxt->ml_soc_list[chip_id];
  125. if (!soc) {
  126. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  127. return NULL;
  128. }
  129. qdf_atomic_inc(&soc->ref_count);
  130. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  131. return soc;
  132. }
  133. static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
  134. struct dp_soc_be *be_soc)
  135. {
  136. uint8_t i;
  137. struct dp_soc *partner_soc;
  138. struct dp_soc_be *be_partner_soc;
  139. uint8_t pool_id;
  140. QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
  141. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  142. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  143. if (!partner_soc) {
  144. dp_err("partner_soc is NULL");
  145. continue;
  146. }
  147. be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
  148. for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
  149. qdf_status =
  150. dp_hw_cookie_conversion_init
  151. (be_soc,
  152. &be_partner_soc->rx_cc_ctx[pool_id]);
  153. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  154. dp_alert("MLO partner soc RX CC init failed");
  155. return qdf_status;
  156. }
  157. }
  158. }
  159. return qdf_status;
  160. }
  161. static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg, int chip_id)
  162. {
  163. uint8_t i = 0;
  164. uint8_t cpu = 0;
  165. uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  166. uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  167. uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  168. uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  169. /* Save the current interrupt mask and disable the interrupts */
  170. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  171. rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
  172. rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
  173. rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
  174. reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
  175. soc->intr_ctx[i].rx_ring_mask = 0;
  176. soc->intr_ctx[i].rx_err_ring_mask = 0;
  177. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  178. soc->intr_ctx[i].reo_status_ring_mask = 0;
  179. }
  180. /* make sure dp_service_srngs not running on any of the CPU */
  181. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  182. while (qdf_atomic_test_bit(cpu,
  183. &soc->service_rings_running))
  184. ;
  185. }
  186. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  187. uint8_t ring = 0;
  188. uint32_t num_entries = 0;
  189. hal_ring_handle_t hal_ring_hdl = NULL;
  190. uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
  191. soc->wlan_cfg_ctx, i);
  192. uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
  193. soc->wlan_cfg_ctx, i);
  194. uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  195. soc->wlan_cfg_ctx, i);
  196. if (rx_mask) {
  197. /* iterate through each reo ring and process the buf */
  198. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  199. if (!(rx_mask & (1 << ring)))
  200. continue;
  201. hal_ring_hdl =
  202. soc->reo_dest_ring[ring].hal_srng;
  203. num_entries = hal_srng_get_num_entries(
  204. soc->hal_soc,
  205. hal_ring_hdl);
  206. dp_rx_process_be(&soc->intr_ctx[i],
  207. hal_ring_hdl,
  208. ring,
  209. num_entries);
  210. }
  211. }
  212. /* Process REO Exception ring */
  213. if (rx_err_mask) {
  214. hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  215. num_entries = hal_srng_get_num_entries(
  216. soc->hal_soc,
  217. hal_ring_hdl);
  218. dp_rx_err_process(&soc->intr_ctx[i], soc,
  219. hal_ring_hdl, num_entries);
  220. }
  221. /* Process Rx WBM release ring */
  222. if (rx_wbm_rel_mask) {
  223. hal_ring_hdl = soc->rx_rel_ring.hal_srng;
  224. num_entries = hal_srng_get_num_entries(
  225. soc->hal_soc,
  226. hal_ring_hdl);
  227. dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
  228. hal_ring_hdl, num_entries);
  229. }
  230. }
  231. /* restore the interrupt mask */
  232. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  233. soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
  234. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
  235. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
  236. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
  237. }
  238. }
  239. static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
  240. struct cdp_mlo_ctxt *cdp_ml_ctxt)
  241. {
  242. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  243. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  244. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  245. uint8_t pdev_id;
  246. if (!cdp_ml_ctxt)
  247. return;
  248. be_soc->ml_ctxt = mlo_ctxt;
  249. for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
  250. if (soc->pdev_list[pdev_id])
  251. dp_mlo_update_link_to_pdev_map(soc,
  252. soc->pdev_list[pdev_id]);
  253. }
  254. dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
  255. }
  256. static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
  257. struct cdp_mlo_ctxt *cdp_ml_ctxt,
  258. bool is_force_down)
  259. {
  260. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  261. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  262. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  263. if (!cdp_ml_ctxt)
  264. return;
  265. /* During the teardown drain the Rx buffers if any exist in the ring */
  266. dp_mlo_iter_ptnr_soc(be_soc,
  267. dp_mlo_soc_drain_rx_buf,
  268. NULL);
  269. dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
  270. be_soc->ml_ctxt = NULL;
  271. }
  272. static QDF_STATUS dp_mlo_add_ptnr_vdev(struct dp_vdev *vdev1,
  273. struct dp_vdev *vdev2,
  274. struct dp_soc *soc, uint8_t pdev_id)
  275. {
  276. struct dp_soc_be *soc_be = dp_get_be_soc_from_dp_soc(soc);
  277. struct dp_vdev_be *vdev2_be = dp_get_be_vdev_from_dp_vdev(vdev2);
  278. /* return when valid entry exists */
  279. if (vdev1->is_bridge_vdev) {
  280. if (vdev2_be->bridge_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
  281. CDP_INVALID_VDEV_ID)
  282. return QDF_STATUS_SUCCESS;
  283. vdev2_be->bridge_vdev_list[soc_be->mlo_chip_id][pdev_id] =
  284. vdev1->vdev_id;
  285. } else {
  286. if (vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
  287. CDP_INVALID_VDEV_ID)
  288. return QDF_STATUS_SUCCESS;
  289. vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] =
  290. vdev1->vdev_id;
  291. }
  292. mlo_debug("Add vdev%d to vdev%d list, mlo_chip_id = %d pdev_id = %d\n",
  293. vdev1->vdev_id, vdev2->vdev_id, soc_be->mlo_chip_id, pdev_id);
  294. return QDF_STATUS_SUCCESS;
  295. }
  296. QDF_STATUS dp_update_mlo_ptnr_list(struct cdp_soc_t *soc_hdl,
  297. int8_t partner_vdev_ids[], uint8_t num_vdevs,
  298. uint8_t self_vdev_id)
  299. {
  300. int i, j;
  301. struct dp_soc *self_soc = cdp_soc_t_to_dp_soc(soc_hdl);
  302. struct dp_vdev *self_vdev;
  303. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  304. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(self_soc);
  305. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  306. if (!dp_mlo)
  307. return QDF_STATUS_E_FAILURE;
  308. self_vdev = dp_vdev_get_ref_by_id(self_soc, self_vdev_id, DP_MOD_ID_RX);
  309. if (!self_vdev)
  310. return QDF_STATUS_E_FAILURE;
  311. /* go through the input vdev id list and if there are partner vdevs,
  312. * - then add the current vdev's id to partner vdev's list using pdev_id and
  313. * increase the reference
  314. * - add partner vdev to self list and increase the reference
  315. */
  316. for (i = 0; i < num_vdevs; i++) {
  317. if (partner_vdev_ids[i] == CDP_INVALID_VDEV_ID)
  318. continue;
  319. for (j = 0; j < WLAN_MAX_MLO_CHIPS; j++) {
  320. struct dp_soc *soc =
  321. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, j);
  322. if (soc) {
  323. struct dp_vdev *vdev;
  324. vdev = dp_vdev_get_ref_by_id(soc,
  325. partner_vdev_ids[i], DP_MOD_ID_RX);
  326. if (vdev) {
  327. if (vdev == self_vdev) {
  328. dp_vdev_unref_delete(soc,
  329. vdev, DP_MOD_ID_RX);
  330. /*dp_soc_unref_delete(soc); */
  331. continue;
  332. }
  333. if (qdf_is_macaddr_equal(
  334. (struct qdf_mac_addr *)self_vdev->mld_mac_addr.raw,
  335. (struct qdf_mac_addr *)vdev->mld_mac_addr.raw)) {
  336. if (dp_mlo_add_ptnr_vdev(self_vdev,
  337. vdev, self_soc,
  338. self_vdev->pdev->pdev_id) !=
  339. QDF_STATUS_SUCCESS) {
  340. dp_err("Unable to add self to partner vdev's list");
  341. dp_vdev_unref_delete(soc,
  342. vdev, DP_MOD_ID_RX);
  343. /* TODO - release soc ref here */
  344. /* dp_soc_unref_delete(soc);*/
  345. ret = QDF_STATUS_E_FAILURE;
  346. goto exit;
  347. }
  348. /* add to self list */
  349. if (dp_mlo_add_ptnr_vdev(vdev, self_vdev, soc,
  350. vdev->pdev->pdev_id) !=
  351. QDF_STATUS_SUCCESS) {
  352. dp_err("Unable to add vdev to self vdev's list");
  353. dp_vdev_unref_delete(self_soc,
  354. vdev, DP_MOD_ID_RX);
  355. /* TODO - release soc ref here */
  356. /* dp_soc_unref_delete(soc);*/
  357. ret = QDF_STATUS_E_FAILURE;
  358. goto exit;
  359. }
  360. }
  361. dp_vdev_unref_delete(soc, vdev,
  362. DP_MOD_ID_RX);
  363. } /* vdev */
  364. /* TODO - release soc ref here */
  365. /* dp_soc_unref_delete(soc); */
  366. } /* soc */
  367. } /* for */
  368. } /* for */
  369. exit:
  370. dp_vdev_unref_delete(self_soc, self_vdev, DP_MOD_ID_RX);
  371. return ret;
  372. }
  373. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev)
  374. {
  375. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  376. struct dp_vdev_be *vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  377. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  378. uint8_t soc_id = be_soc->mlo_chip_id;
  379. uint8_t pdev_id = vdev->pdev->pdev_id;
  380. int i, j;
  381. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  382. for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
  383. struct dp_vdev *pr_vdev;
  384. struct dp_soc *pr_soc;
  385. struct dp_soc_be *pr_soc_be;
  386. struct dp_pdev *pr_pdev;
  387. struct dp_vdev_be *pr_vdev_be;
  388. if (vdev_be->partner_vdev_list[i][j] ==
  389. CDP_INVALID_VDEV_ID)
  390. continue;
  391. pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  392. if (!pr_soc)
  393. continue;
  394. pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
  395. pr_vdev = dp_vdev_get_ref_by_id(pr_soc,
  396. vdev_be->partner_vdev_list[i][j],
  397. DP_MOD_ID_RX);
  398. if (!pr_vdev)
  399. continue;
  400. /* remove self vdev from partner list */
  401. pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
  402. if (vdev->is_bridge_vdev)
  403. pr_vdev_be->bridge_vdev_list[soc_id][pdev_id] =
  404. CDP_INVALID_VDEV_ID;
  405. else
  406. pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
  407. CDP_INVALID_VDEV_ID;
  408. /* remove partner vdev from self list */
  409. pr_pdev = pr_vdev->pdev;
  410. vdev_be->partner_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
  411. CDP_INVALID_VDEV_ID;
  412. dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
  413. }
  414. }
  415. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  416. for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
  417. struct dp_vdev *pr_vdev = NULL;
  418. struct dp_soc *pr_soc = NULL;
  419. struct dp_soc_be *pr_soc_be = NULL;
  420. struct dp_pdev *pr_pdev = NULL;
  421. struct dp_vdev_be *pr_vdev_be = NULL;
  422. if (vdev_be->bridge_vdev_list[i][j] ==
  423. CDP_INVALID_VDEV_ID)
  424. continue;
  425. pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  426. if (!pr_soc)
  427. continue;
  428. pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
  429. pr_vdev = dp_vdev_get_ref_by_id(
  430. pr_soc,
  431. vdev_be->bridge_vdev_list[i][j],
  432. DP_MOD_ID_RX);
  433. if (!pr_vdev)
  434. continue;
  435. /* remove self vdev from partner list */
  436. pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
  437. if (vdev->is_bridge_vdev)
  438. pr_vdev_be->bridge_vdev_list[soc_id][pdev_id] =
  439. CDP_INVALID_VDEV_ID;
  440. else
  441. pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
  442. CDP_INVALID_VDEV_ID;
  443. /* remove partner vdev from self list */
  444. pr_pdev = pr_vdev->pdev;
  445. vdev_be->bridge_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
  446. CDP_INVALID_VDEV_ID;
  447. dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
  448. }
  449. }
  450. }
  451. static QDF_STATUS
  452. dp_clear_mlo_ptnr_list(struct cdp_soc_t *soc_hdl, uint8_t self_vdev_id)
  453. {
  454. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  455. struct dp_vdev *vdev;
  456. vdev = dp_vdev_get_ref_by_id(soc, self_vdev_id, DP_MOD_ID_RX);
  457. if (!vdev)
  458. return QDF_STATUS_E_FAILURE;
  459. dp_clr_mlo_ptnr_list(soc, vdev);
  460. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  461. return QDF_STATUS_SUCCESS;
  462. }
  463. static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  464. {
  465. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  466. int i;
  467. struct dp_soc *soc;
  468. struct dp_soc_be *be_soc;
  469. QDF_STATUS qdf_status;
  470. if (!cdp_ml_ctxt)
  471. return;
  472. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  473. soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  474. if (!soc)
  475. continue;
  476. be_soc = dp_get_be_soc_from_dp_soc(soc);
  477. qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
  478. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  479. dp_alert("MLO partner SOC Rx desc CC init failed");
  480. qdf_assert_always(0);
  481. }
  482. }
  483. }
  484. static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
  485. uint8_t pdev_id, uint64_t delta_tsf2)
  486. {
  487. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  488. struct dp_pdev *pdev;
  489. struct dp_pdev_be *be_pdev;
  490. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  491. pdev_id);
  492. if (!pdev) {
  493. dp_err("pdev is NULL for pdev_id %u", pdev_id);
  494. return;
  495. }
  496. be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  497. be_pdev->delta_tsf2 = delta_tsf2;
  498. }
  499. static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
  500. uint64_t delta_tqm)
  501. {
  502. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  503. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  504. be_soc->delta_tqm = delta_tqm;
  505. }
  506. static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
  507. uint64_t offset)
  508. {
  509. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  510. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  511. be_soc->mlo_tstamp_offset = offset;
  512. }
  513. #ifdef CONFIG_MLO_SINGLE_DEV
  514. /**
  515. * dp_aggregate_vdev_basic_stats() - aggregate vdev basic stats
  516. * @tgt_vdev_stats: target vdev buffer
  517. * @src_vdev_stats: source vdev buffer
  518. *
  519. * return: void
  520. */
  521. static inline
  522. void dp_aggregate_vdev_basic_stats(
  523. struct cdp_vdev_stats *tgt_vdev_stats,
  524. struct cdp_vdev_stats *src_vdev_stats)
  525. {
  526. DP_UPDATE_BASIC_STATS(tgt_vdev_stats, src_vdev_stats);
  527. }
  528. /**
  529. * dp_aggregate_vdev_ingress_stats() - aggregate vdev ingress stats
  530. * @tgt_vdev_stats: target vdev buffer
  531. * @src_vdev_stats: source vdev buffer
  532. *
  533. * return: void
  534. */
  535. static inline
  536. void dp_aggregate_vdev_ingress_stats(
  537. struct cdp_vdev_stats *tgt_vdev_stats,
  538. struct cdp_vdev_stats *src_vdev_stats)
  539. {
  540. /* Aggregate vdev ingress stats */
  541. DP_UPDATE_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats);
  542. }
  543. /**
  544. * dp_aggregate_vdev_stats_for_unmapped_peers() - aggregate unmap peer stats
  545. * @tgt_vdev_stats: target vdev buffer
  546. * @src_vdev_stats: source vdev buffer
  547. *
  548. * return: void
  549. */
  550. static inline
  551. void dp_aggregate_vdev_stats_for_unmapped_peers(
  552. struct cdp_vdev_stats *tgt_vdev_stats,
  553. struct cdp_vdev_stats *src_vdev_stats)
  554. {
  555. /* Aggregate unmapped peers stats */
  556. DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(tgt_vdev_stats, src_vdev_stats);
  557. }
  558. /**
  559. * dp_aggregate_all_vdev_stats() - aggregate vdev ingress and unmap peer stats
  560. * @tgt_vdev_stats: target vdev buffer
  561. * @src_vdev_stats: source vdev buffer
  562. *
  563. * return: void
  564. */
  565. static inline
  566. void dp_aggregate_all_vdev_stats(
  567. struct cdp_vdev_stats *tgt_vdev_stats,
  568. struct cdp_vdev_stats *src_vdev_stats)
  569. {
  570. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats);
  571. dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
  572. src_vdev_stats);
  573. }
  574. /**
  575. * dp_mlo_vdev_stats_aggr_bridge_vap() - aggregate bridge vdev stats
  576. * @be_vdev: Dp Vdev handle
  577. * @bridge_vdev: Dp vdev handle for bridge vdev
  578. * @arg: buffer for target vdev stats
  579. *
  580. * return: void
  581. */
  582. static
  583. void dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be *be_vdev,
  584. struct dp_vdev *bridge_vdev,
  585. void *arg)
  586. {
  587. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  588. struct dp_vdev_be *bridge_be_vdev = NULL;
  589. bridge_be_vdev = dp_get_be_vdev_from_dp_vdev(bridge_vdev);
  590. if (!bridge_be_vdev)
  591. return;
  592. dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_vdev->stats);
  593. dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_be_vdev->mlo_stats);
  594. dp_vdev_iterate_peer(bridge_vdev, dp_update_vdev_stats, tgt_vdev_stats,
  595. DP_MOD_ID_GENERIC_STATS);
  596. }
  597. /**
  598. * dp_aggregate_interface_stats_based_on_peer_type() - aggregate stats at
  599. * VDEV level based on peer type connected to vdev
  600. * @vdev: DP VDEV handle
  601. * @vdev_stats: target vdev stats pointer
  602. * @peer_type: type of peer - MLO Link or Legacy peer
  603. *
  604. * return: void
  605. */
  606. static
  607. void dp_aggregate_interface_stats_based_on_peer_type(
  608. struct dp_vdev *vdev,
  609. struct cdp_vdev_stats *vdev_stats,
  610. enum dp_peer_type peer_type)
  611. {
  612. struct cdp_vdev_stats *tgt_vdev_stats = NULL;
  613. struct dp_vdev_be *be_vdev = NULL;
  614. struct dp_soc_be *be_soc = NULL;
  615. if (!vdev || !vdev->pdev)
  616. return;
  617. tgt_vdev_stats = vdev_stats;
  618. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  619. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  620. if (!be_vdev)
  621. return;
  622. if (peer_type == DP_PEER_TYPE_LEGACY) {
  623. dp_aggregate_all_vdev_stats(tgt_vdev_stats,
  624. &vdev->stats);
  625. } else {
  626. if (be_vdev->mcast_primary) {
  627. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  628. dp_mlo_vdev_stats_aggr_bridge_vap,
  629. (void *)vdev_stats,
  630. DP_MOD_ID_GENERIC_STATS,
  631. DP_BRIDGE_VDEV_ITER);
  632. }
  633. dp_aggregate_vdev_ingress_stats(tgt_vdev_stats,
  634. &vdev->stats);
  635. dp_aggregate_vdev_stats_for_unmapped_peers(
  636. tgt_vdev_stats,
  637. &be_vdev->mlo_stats);
  638. }
  639. /* Aggregate associated peer stats */
  640. dp_vdev_iterate_specific_peer_type(vdev,
  641. dp_update_vdev_stats,
  642. vdev_stats,
  643. DP_MOD_ID_GENERIC_STATS,
  644. peer_type);
  645. }
  646. /**
  647. * dp_aggregate_interface_stats() - aggregate stats at VDEV level
  648. * @vdev: DP VDEV handle
  649. * @vdev_stats: target vdev stats pointer
  650. *
  651. * return: void
  652. */
  653. static
  654. void dp_aggregate_interface_stats(struct dp_vdev *vdev,
  655. struct cdp_vdev_stats *vdev_stats)
  656. {
  657. struct dp_vdev_be *be_vdev = NULL;
  658. struct dp_soc_be *be_soc = NULL;
  659. if (!vdev || !vdev->pdev)
  660. return;
  661. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  662. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  663. if (!be_vdev)
  664. return;
  665. if (be_vdev->mcast_primary) {
  666. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  667. dp_mlo_vdev_stats_aggr_bridge_vap,
  668. (void *)vdev_stats, DP_MOD_ID_GENERIC_STATS,
  669. DP_BRIDGE_VDEV_ITER);
  670. }
  671. dp_aggregate_all_vdev_stats(vdev_stats, &be_vdev->mlo_stats);
  672. dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats);
  673. dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
  674. DP_MOD_ID_GENERIC_STATS);
  675. dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
  676. }
  677. /**
  678. * dp_mlo_aggr_ptnr_iface_stats() - aggregate mlo partner vdev stats
  679. * @be_vdev: vdev handle
  680. * @ptnr_vdev: partner vdev handle
  681. * @arg: target buffer for aggregation
  682. *
  683. * return: void
  684. */
  685. static
  686. void dp_mlo_aggr_ptnr_iface_stats(struct dp_vdev_be *be_vdev,
  687. struct dp_vdev *ptnr_vdev,
  688. void *arg)
  689. {
  690. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  691. dp_aggregate_interface_stats(ptnr_vdev, tgt_vdev_stats);
  692. }
  693. /**
  694. * dp_mlo_aggr_ptnr_iface_stats_mlo_links() - aggregate mlo partner vdev stats
  695. * based on peer type
  696. * @be_vdev: vdev handle
  697. * @ptnr_vdev: partner vdev handle
  698. * @arg: target buffer for aggregation
  699. *
  700. * return: void
  701. */
  702. static
  703. void dp_mlo_aggr_ptnr_iface_stats_mlo_links(
  704. struct dp_vdev_be *be_vdev,
  705. struct dp_vdev *ptnr_vdev,
  706. void *arg)
  707. {
  708. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
  709. dp_aggregate_interface_stats_based_on_peer_type(ptnr_vdev,
  710. tgt_vdev_stats,
  711. DP_PEER_TYPE_MLO_LINK);
  712. }
  713. /**
  714. * dp_aggregate_sta_interface_stats() - for sta mode aggregate vdev stats from
  715. * all link peers
  716. * @soc: soc handle
  717. * @vdev: vdev handle
  718. * @buf: target buffer for aggregation
  719. *
  720. * return: QDF_STATUS
  721. */
  722. static QDF_STATUS
  723. dp_aggregate_sta_interface_stats(struct dp_soc *soc,
  724. struct dp_vdev *vdev,
  725. void *buf)
  726. {
  727. struct dp_peer *vap_bss_peer = NULL;
  728. struct dp_peer *mld_peer = NULL;
  729. struct dp_peer *link_peer = NULL;
  730. struct dp_mld_link_peers link_peers_info;
  731. uint8_t i = 0;
  732. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  733. vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
  734. DP_MOD_ID_GENERIC_STATS);
  735. if (!vap_bss_peer)
  736. return QDF_STATUS_E_FAILURE;
  737. mld_peer = DP_GET_MLD_PEER_FROM_PEER(vap_bss_peer);
  738. if (!mld_peer) {
  739. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  740. return QDF_STATUS_E_FAILURE;
  741. }
  742. dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, &link_peers_info,
  743. DP_MOD_ID_GENERIC_STATS);
  744. for (i = 0; i < link_peers_info.num_links; i++) {
  745. link_peer = link_peers_info.link_peers[i];
  746. dp_update_vdev_stats(soc, link_peer, buf);
  747. dp_aggregate_vdev_ingress_stats((struct cdp_vdev_stats *)buf,
  748. &link_peer->vdev->stats);
  749. dp_aggregate_vdev_basic_stats(
  750. (struct cdp_vdev_stats *)buf,
  751. &link_peer->vdev->stats);
  752. }
  753. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_GENERIC_STATS);
  754. dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
  755. return ret;
  756. }
  757. static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
  758. uint8_t vdev_id, void *buf,
  759. bool link_vdev_only)
  760. {
  761. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  762. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  763. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  764. DP_MOD_ID_GENERIC_STATS);
  765. struct dp_vdev_be *vdev_be = NULL;
  766. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  767. if (!vdev)
  768. return QDF_STATUS_E_FAILURE;
  769. vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  770. if (!vdev_be) {
  771. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  772. return QDF_STATUS_E_FAILURE;
  773. }
  774. if (vdev->opmode == wlan_op_mode_sta) {
  775. ret = dp_aggregate_sta_interface_stats(soc, vdev, buf);
  776. goto complete;
  777. }
  778. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  779. vdev->opmode == wlan_op_mode_ap) {
  780. dp_aggregate_interface_stats_based_on_peer_type(
  781. vdev, buf,
  782. DP_PEER_TYPE_MLO_LINK);
  783. if (link_vdev_only)
  784. goto complete;
  785. /* Aggregate stats from partner vdevs */
  786. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  787. dp_mlo_aggr_ptnr_iface_stats_mlo_links,
  788. buf,
  789. DP_MOD_ID_GENERIC_STATS,
  790. DP_LINK_VDEV_ITER);
  791. } else {
  792. dp_aggregate_interface_stats(vdev, buf);
  793. if (link_vdev_only)
  794. goto complete;
  795. /* Aggregate stats from partner vdevs */
  796. dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
  797. dp_mlo_aggr_ptnr_iface_stats, buf,
  798. DP_MOD_ID_GENERIC_STATS,
  799. DP_LINK_VDEV_ITER);
  800. }
  801. /* Aggregate vdev stats from MLO ctx for detached MLO Links */
  802. dp_update_mlo_ctxt_stats(buf, &vdev_be->mlo_dev_ctxt->stats);
  803. complete:
  804. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  805. return ret;
  806. }
  807. QDF_STATUS
  808. dp_get_interface_stats_be(struct cdp_soc_t *soc_hdl,
  809. uint8_t vdev_id,
  810. void *buf,
  811. bool is_aggregate)
  812. {
  813. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  814. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  815. DP_MOD_ID_GENERIC_STATS);
  816. if (!vdev)
  817. return QDF_STATUS_E_FAILURE;
  818. if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
  819. vdev->opmode == wlan_op_mode_ap) {
  820. dp_aggregate_interface_stats_based_on_peer_type(
  821. vdev, buf,
  822. DP_PEER_TYPE_LEGACY);
  823. } else {
  824. dp_aggregate_interface_stats(vdev, buf);
  825. }
  826. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
  827. return QDF_STATUS_SUCCESS;
  828. }
  829. #endif
  830. static struct cdp_mlo_ops dp_mlo_ops = {
  831. .mlo_soc_setup = dp_mlo_soc_setup,
  832. .mlo_soc_teardown = dp_mlo_soc_teardown,
  833. .update_mlo_ptnr_list = dp_update_mlo_ptnr_list,
  834. .clear_mlo_ptnr_list = dp_clear_mlo_ptnr_list,
  835. .mlo_setup_complete = dp_mlo_setup_complete,
  836. .mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
  837. .mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
  838. .mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
  839. .mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
  840. .mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
  841. #ifdef CONFIG_MLO_SINGLE_DEV
  842. .mlo_get_mld_vdev_stats = dp_mlo_get_mld_vdev_stats,
  843. #endif
  844. };
  845. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  846. struct cdp_soc_attach_params *params)
  847. {
  848. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  849. if (!params->mlo_enabled) {
  850. dp_warn("MLO not enabled on SOC");
  851. return;
  852. }
  853. be_soc->mlo_chip_id = params->mlo_chip_id;
  854. be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
  855. be_soc->mlo_enabled = 1;
  856. soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
  857. }
  858. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  859. {
  860. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  861. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  862. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  863. uint8_t link_id;
  864. if (!be_soc->mlo_enabled)
  865. return;
  866. if (!ml_ctxt)
  867. return;
  868. link_id = be_pdev->mlo_link_id;
  869. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
  870. if (!ml_ctxt->link_to_pdev_map[link_id])
  871. ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
  872. else
  873. dp_alert("Attempt to update existing map for link %u",
  874. link_id);
  875. }
  876. }
  877. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  878. {
  879. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  880. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  881. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  882. uint8_t link_id;
  883. if (!be_soc->mlo_enabled)
  884. return;
  885. if (!ml_ctxt)
  886. return;
  887. link_id = be_pdev->mlo_link_id;
  888. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  889. ml_ctxt->link_to_pdev_map[link_id] = NULL;
  890. }
  891. static struct dp_pdev_be *
  892. dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
  893. {
  894. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  895. return ml_ctxt->link_to_pdev_map[link_id];
  896. return NULL;
  897. }
  898. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  899. struct cdp_pdev_attach_params *params)
  900. {
  901. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
  902. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  903. if (!be_soc->mlo_enabled) {
  904. dp_info("MLO not enabled on SOC");
  905. return;
  906. }
  907. be_pdev->mlo_link_id = params->mlo_link_id;
  908. }
  909. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  910. struct dp_peer *peer,
  911. uint16_t peer_id)
  912. {
  913. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  914. struct dp_mlo_ctxt *mlo_ctxt = NULL;
  915. bool is_ml_peer_id =
  916. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  917. uint8_t chip_id;
  918. struct dp_soc *temp_soc;
  919. /* for non ML peer dont map on partner chips*/
  920. if (!is_ml_peer_id)
  921. return;
  922. mlo_ctxt = be_soc->ml_ctxt;
  923. if (!mlo_ctxt)
  924. return;
  925. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  926. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  927. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  928. if (!temp_soc)
  929. continue;
  930. /* skip if this is current soc */
  931. if (temp_soc == soc)
  932. continue;
  933. dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
  934. }
  935. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  936. }
  937. qdf_export_symbol(dp_mlo_partner_chips_map);
  938. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  939. uint16_t peer_id)
  940. {
  941. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  942. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  943. bool is_ml_peer_id =
  944. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  945. uint8_t chip_id;
  946. struct dp_soc *temp_soc;
  947. if (!is_ml_peer_id)
  948. return;
  949. if (!mlo_ctxt)
  950. return;
  951. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  952. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  953. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  954. if (!temp_soc)
  955. continue;
  956. /* skip if this is current soc */
  957. if (temp_soc == soc)
  958. continue;
  959. dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
  960. }
  961. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  962. }
  963. qdf_export_symbol(dp_mlo_partner_chips_unmap);
  964. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  965. {
  966. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  967. return be_soc->mlo_chip_id;
  968. }
  969. qdf_export_symbol(dp_mlo_get_chip_id);
  970. struct dp_peer *
  971. dp_mlo_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  972. uint8_t *peer_mac_addr,
  973. int mac_addr_is_aligned,
  974. uint8_t vdev_id,
  975. uint8_t chip_id,
  976. enum dp_mod_id mod_id)
  977. {
  978. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  979. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  980. struct dp_soc *link_peer_soc = NULL;
  981. struct dp_peer *peer = NULL;
  982. if (!mlo_ctxt)
  983. return NULL;
  984. link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  985. if (!link_peer_soc)
  986. return NULL;
  987. peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
  988. mac_addr_is_aligned, vdev_id,
  989. mod_id);
  990. qdf_atomic_dec(&link_peer_soc->ref_count);
  991. return peer;
  992. }
  993. qdf_export_symbol(dp_mlo_link_peer_hash_find_by_chip_id);
  994. void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
  995. struct cdp_lro_hash_config *lro_hash)
  996. {
  997. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  998. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  999. if (!be_soc->mlo_enabled || !ml_ctxt)
  1000. return dp_get_rx_hash_key_bytes(lro_hash);
  1001. qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
  1002. (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
  1003. LRO_IPV4_SEED_ARR_SZ));
  1004. qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
  1005. (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
  1006. LRO_IPV6_SEED_ARR_SZ));
  1007. }
  1008. struct dp_soc *
  1009. dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
  1010. {
  1011. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1012. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1013. struct dp_soc *replenish_soc;
  1014. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1015. return soc;
  1016. if (be_soc->mlo_chip_id == chip_id)
  1017. return soc;
  1018. replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1019. if (qdf_unlikely(!replenish_soc)) {
  1020. dp_alert("replenish SOC is NULL");
  1021. qdf_assert_always(0);
  1022. }
  1023. return replenish_soc;
  1024. }
  1025. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
  1026. {
  1027. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1028. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1029. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1030. return 1;
  1031. return mlo_ctxt->ml_soc_cnt;
  1032. }
  1033. struct dp_soc *
  1034. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
  1035. {
  1036. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1037. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1038. struct dp_soc *partner_soc = NULL;
  1039. uint8_t chip_id;
  1040. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1041. return soc;
  1042. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  1043. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1044. if (!partner_soc)
  1045. continue;
  1046. if (partner_soc->idle_link_bm_id == idle_bm_id)
  1047. return partner_soc;
  1048. }
  1049. return NULL;
  1050. }
  1051. #ifdef WLAN_MLO_MULTI_CHIP
  1052. static void dp_print_mlo_partner_list(struct dp_vdev_be *be_vdev,
  1053. struct dp_vdev *partner_vdev,
  1054. void *arg)
  1055. {
  1056. struct dp_vdev_be *partner_vdev_be = NULL;
  1057. struct dp_soc_be *partner_soc_be = NULL;
  1058. partner_vdev_be = dp_get_be_vdev_from_dp_vdev(partner_vdev);
  1059. partner_soc_be = dp_get_be_soc_from_dp_soc(partner_vdev->pdev->soc);
  1060. DP_PRINT_STATS("is_bridge_vap = %s, mcast_primary = %s, vdev_id = %d, pdev_id = %d, chip_id = %d",
  1061. partner_vdev->is_bridge_vdev ? "true" : "false",
  1062. partner_vdev_be->mcast_primary ? "true" : "false",
  1063. partner_vdev->vdev_id,
  1064. partner_vdev->pdev->pdev_id,
  1065. partner_soc_be->mlo_chip_id);
  1066. }
  1067. void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  1068. struct dp_vdev_be *be_vdev,
  1069. dp_ptnr_vdev_iter_func func,
  1070. void *arg,
  1071. enum dp_mod_id mod_id,
  1072. uint8_t type)
  1073. {
  1074. int i = 0;
  1075. int j = 0;
  1076. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1077. if (type < DP_LINK_VDEV_ITER || type > DP_ALL_VDEV_ITER) {
  1078. dp_err("invalid iterate type");
  1079. return;
  1080. }
  1081. for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
  1082. IS_LINK_VDEV_ITER_REQUIRED(type); i++) {
  1083. struct dp_soc *ptnr_soc =
  1084. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1085. if (!ptnr_soc)
  1086. continue;
  1087. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  1088. struct dp_vdev *ptnr_vdev;
  1089. ptnr_vdev = dp_vdev_get_ref_by_id(
  1090. ptnr_soc,
  1091. be_vdev->partner_vdev_list[i][j],
  1092. mod_id);
  1093. if (!ptnr_vdev)
  1094. continue;
  1095. (*func)(be_vdev, ptnr_vdev, arg);
  1096. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  1097. ptnr_vdev,
  1098. mod_id);
  1099. }
  1100. }
  1101. for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
  1102. IS_BRIDGE_VDEV_ITER_REQUIRED(type); i++) {
  1103. struct dp_soc *ptnr_soc =
  1104. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1105. if (!ptnr_soc)
  1106. continue;
  1107. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  1108. struct dp_vdev *bridge_vdev;
  1109. bridge_vdev = dp_vdev_get_ref_by_id(
  1110. ptnr_soc,
  1111. be_vdev->bridge_vdev_list[i][j],
  1112. mod_id);
  1113. if (!bridge_vdev)
  1114. continue;
  1115. (*func)(be_vdev, bridge_vdev, arg);
  1116. dp_vdev_unref_delete(bridge_vdev->pdev->soc,
  1117. bridge_vdev,
  1118. mod_id);
  1119. }
  1120. }
  1121. }
  1122. qdf_export_symbol(dp_mlo_iter_ptnr_vdev);
  1123. void dp_mlo_debug_print_ptnr_info(struct dp_vdev *vdev)
  1124. {
  1125. struct dp_vdev_be *be_vdev = NULL;
  1126. struct dp_soc_be *be_soc = NULL;
  1127. be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
  1128. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  1129. DP_PRINT_STATS("self vdev is_bridge_vap = %s, mcast_primary = %s, vdev = %d, pdev_id = %d, chip_id = %d",
  1130. vdev->is_bridge_vdev ? "true" : "false",
  1131. be_vdev->mcast_primary ? "true" : "false",
  1132. vdev->vdev_id,
  1133. vdev->pdev->pdev_id,
  1134. dp_mlo_get_chip_id(vdev->pdev->soc));
  1135. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  1136. dp_print_mlo_partner_list,
  1137. NULL, DP_MOD_ID_GENERIC_STATS,
  1138. DP_ALL_VDEV_ITER);
  1139. }
  1140. #endif
  1141. #ifdef WLAN_MCAST_MLO
  1142. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  1143. struct dp_vdev_be *be_vdev,
  1144. enum dp_mod_id mod_id)
  1145. {
  1146. int i = 0;
  1147. int j = 0;
  1148. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1149. struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
  1150. if (be_vdev->mcast_primary) {
  1151. if (dp_vdev_get_ref((struct dp_soc *)be_soc, vdev, mod_id) !=
  1152. QDF_STATUS_SUCCESS)
  1153. return NULL;
  1154. return vdev;
  1155. }
  1156. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1157. struct dp_soc *ptnr_soc =
  1158. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1159. if (!ptnr_soc)
  1160. continue;
  1161. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  1162. struct dp_vdev *ptnr_vdev = NULL;
  1163. struct dp_vdev_be *be_ptnr_vdev = NULL;
  1164. ptnr_vdev = dp_vdev_get_ref_by_id(
  1165. ptnr_soc,
  1166. be_vdev->partner_vdev_list[i][j],
  1167. mod_id);
  1168. if (!ptnr_vdev)
  1169. continue;
  1170. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  1171. if (be_ptnr_vdev->mcast_primary)
  1172. return ptnr_vdev;
  1173. dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
  1174. &be_ptnr_vdev->vdev,
  1175. mod_id);
  1176. }
  1177. }
  1178. return NULL;
  1179. }
  1180. qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
  1181. #endif
  1182. /**
  1183. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  1184. * @be_soc: dp_soc_be pointer
  1185. * @func: Function to be called for each soc
  1186. * @arg: context to be passed to the callback
  1187. *
  1188. * Return: true if mlo is enabled, false if mlo is disabled
  1189. */
  1190. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  1191. void *arg)
  1192. {
  1193. int i = 0;
  1194. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  1195. if (!be_soc->mlo_enabled || !be_soc->ml_ctxt)
  1196. return false;
  1197. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  1198. struct dp_soc *ptnr_soc =
  1199. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  1200. if (!ptnr_soc)
  1201. continue;
  1202. (*func)(ptnr_soc, arg, i);
  1203. }
  1204. return true;
  1205. }
  1206. qdf_export_symbol(dp_mlo_iter_ptnr_soc);
  1207. static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
  1208. {
  1209. struct dp_soc *soc;
  1210. struct dp_pdev *pdev;
  1211. struct dp_soc_be *be_soc;
  1212. uint32_t mlo_offset;
  1213. pdev = &be_pdev->pdev;
  1214. soc = pdev->soc;
  1215. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1216. mlo_offset = be_soc->mlo_tstamp_offset;
  1217. return mlo_offset;
  1218. }
  1219. int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
  1220. uint8_t hw_link_id)
  1221. {
  1222. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1223. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  1224. struct dp_pdev_be *be_pdev;
  1225. int32_t delta_tsf2_mlo_offset;
  1226. int32_t mlo_offset, delta_tsf2;
  1227. if (!ml_ctxt)
  1228. return 0;
  1229. be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
  1230. if (!be_pdev)
  1231. return 0;
  1232. mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
  1233. delta_tsf2 = be_pdev->delta_tsf2;
  1234. delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
  1235. return delta_tsf2_mlo_offset;
  1236. }
  1237. int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
  1238. {
  1239. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1240. int32_t delta_tqm_mlo_offset;
  1241. int32_t mlo_offset, delta_tqm;
  1242. mlo_offset = be_soc->mlo_tstamp_offset;
  1243. delta_tqm = be_soc->delta_tqm;
  1244. delta_tqm_mlo_offset = mlo_offset - delta_tqm;
  1245. return delta_tqm_mlo_offset;
  1246. }
  1247. #ifdef DP_UMAC_HW_RESET_SUPPORT
  1248. /**
  1249. * dp_umac_reset_update_partner_map() - Update Umac reset partner map
  1250. * @mlo_ctx: DP ML context handle
  1251. * @chip_id: chip id
  1252. * @set: flag indicating whether to set or clear the bit
  1253. *
  1254. * Return: void
  1255. */
  1256. static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
  1257. int chip_id, bool set)
  1258. {
  1259. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx =
  1260. &mlo_ctx->grp_umac_reset_ctx;
  1261. if (set)
  1262. qdf_atomic_set_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1263. else
  1264. qdf_atomic_clear_bit(chip_id, &grp_umac_reset_ctx->partner_map);
  1265. }
  1266. QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc)
  1267. {
  1268. struct dp_mlo_ctxt *mlo_ctx;
  1269. struct dp_soc_be *be_soc;
  1270. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1271. if (!be_soc) {
  1272. dp_umac_reset_err("null be_soc");
  1273. return QDF_STATUS_E_NULL_VALUE;
  1274. }
  1275. mlo_ctx = be_soc->ml_ctxt;
  1276. if (!mlo_ctx) {
  1277. /* This API can be called for non-MLO SOC as well. Hence, return
  1278. * the status as success when mlo_ctx is NULL.
  1279. */
  1280. return QDF_STATUS_SUCCESS;
  1281. }
  1282. dp_umac_reset_update_partner_map(mlo_ctx, be_soc->mlo_chip_id, false);
  1283. return QDF_STATUS_SUCCESS;
  1284. }
  1285. /**
  1286. * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
  1287. * @soc: dp soc handle
  1288. *
  1289. * Return: void
  1290. */
  1291. void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc)
  1292. {
  1293. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1294. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1295. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1296. if (!mlo_ctx) {
  1297. dp_umac_reset_alert("Umac reset was handled on soc %pK", soc);
  1298. return;
  1299. }
  1300. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1301. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1302. grp_umac_reset_ctx->umac_reset_in_progress = false;
  1303. grp_umac_reset_ctx->is_target_recovery = false;
  1304. grp_umac_reset_ctx->response_map = 0;
  1305. grp_umac_reset_ctx->request_map = 0;
  1306. grp_umac_reset_ctx->initiator_chip_id = 0;
  1307. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1308. dp_umac_reset_alert("Umac reset was handled on mlo group ctxt %pK",
  1309. mlo_ctx);
  1310. }
  1311. /**
  1312. * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
  1313. * @soc: dp soc handle
  1314. * @umac_reset_ctx: Umac reset context
  1315. * @rx_event: Rx event received
  1316. * @is_target_recovery: Flag to indicate if it is triggered for target recovery
  1317. *
  1318. * Return: status
  1319. */
  1320. QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
  1321. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1322. enum umac_reset_rx_event rx_event,
  1323. bool is_target_recovery)
  1324. {
  1325. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1326. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1327. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1328. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1329. if (!mlo_ctx)
  1330. return dp_umac_reset_validate_n_update_state_machine_on_rx(
  1331. umac_reset_ctx, rx_event,
  1332. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1333. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1334. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1335. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1336. if (grp_umac_reset_ctx->umac_reset_in_progress) {
  1337. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1338. return QDF_STATUS_E_INVAL;
  1339. }
  1340. status = dp_umac_reset_validate_n_update_state_machine_on_rx(
  1341. umac_reset_ctx, rx_event,
  1342. UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
  1343. UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
  1344. if (status != QDF_STATUS_SUCCESS) {
  1345. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1346. return status;
  1347. }
  1348. grp_umac_reset_ctx->umac_reset_in_progress = true;
  1349. grp_umac_reset_ctx->is_target_recovery = is_target_recovery;
  1350. /* We don't wait for the 'Umac trigger' message from all socs */
  1351. grp_umac_reset_ctx->request_map = grp_umac_reset_ctx->partner_map;
  1352. grp_umac_reset_ctx->response_map = grp_umac_reset_ctx->partner_map;
  1353. grp_umac_reset_ctx->initiator_chip_id = dp_mlo_get_chip_id(soc);
  1354. grp_umac_reset_ctx->umac_reset_count++;
  1355. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1356. return QDF_STATUS_SUCCESS;
  1357. }
  1358. /**
  1359. * dp_umac_reset_handle_action_cb() - Function to call action callback
  1360. * @soc: dp soc handle
  1361. * @umac_reset_ctx: Umac reset context
  1362. * @action: Action to call the callback for
  1363. *
  1364. * Return: QDF_STATUS status
  1365. */
  1366. QDF_STATUS
  1367. dp_umac_reset_handle_action_cb(struct dp_soc *soc,
  1368. struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1369. enum umac_reset_action action)
  1370. {
  1371. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1372. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1373. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1374. if (!mlo_ctx) {
  1375. dp_umac_reset_debug("MLO context is Null");
  1376. goto handle;
  1377. }
  1378. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1379. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1380. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1381. &grp_umac_reset_ctx->request_map);
  1382. dp_umac_reset_debug("partner_map %u request_map %u",
  1383. grp_umac_reset_ctx->partner_map,
  1384. grp_umac_reset_ctx->request_map);
  1385. /* This logic is needed for synchronization between mlo socs */
  1386. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->request_map)
  1387. != grp_umac_reset_ctx->partner_map) {
  1388. struct hif_softc *hif_sc = HIF_GET_SOFTC(soc->hif_handle);
  1389. struct hif_umac_reset_ctx *hif_umac_reset_ctx;
  1390. if (!hif_sc) {
  1391. hif_err("scn is null");
  1392. qdf_assert_always(0);
  1393. return QDF_STATUS_E_FAILURE;
  1394. }
  1395. hif_umac_reset_ctx = &hif_sc->umac_reset_ctx;
  1396. /* Mark the action as pending */
  1397. umac_reset_ctx->pending_action = action;
  1398. /* Reschedule the tasklet and exit */
  1399. tasklet_hi_schedule(&hif_umac_reset_ctx->intr_tq);
  1400. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1401. return QDF_STATUS_SUCCESS;
  1402. }
  1403. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1404. umac_reset_ctx->pending_action = UMAC_RESET_ACTION_NONE;
  1405. handle:
  1406. if (!umac_reset_ctx->rx_actions.cb[action]) {
  1407. dp_umac_reset_err("rx callback is NULL");
  1408. return QDF_STATUS_E_FAILURE;
  1409. }
  1410. return umac_reset_ctx->rx_actions.cb[action](soc);
  1411. }
  1412. /**
  1413. * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
  1414. * @umac_reset_ctx: UMAC reset context
  1415. * @tx_cmd: Tx command to be posted
  1416. *
  1417. * Return: QDF status of operation
  1418. */
  1419. QDF_STATUS
  1420. dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
  1421. enum umac_reset_tx_cmd tx_cmd)
  1422. {
  1423. struct dp_soc *soc = container_of(umac_reset_ctx, struct dp_soc,
  1424. umac_reset_ctx);
  1425. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1426. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1427. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1428. if (!mlo_ctx) {
  1429. dp_umac_reset_post_tx_cmd_via_shmem(soc, &tx_cmd, 0);
  1430. return QDF_STATUS_SUCCESS;
  1431. }
  1432. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1433. qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1434. qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
  1435. &grp_umac_reset_ctx->response_map);
  1436. /* This logic is needed for synchronization between mlo socs */
  1437. if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->response_map)
  1438. != grp_umac_reset_ctx->partner_map) {
  1439. dp_umac_reset_debug(
  1440. "Response(s) pending : expected map %u current map %u",
  1441. grp_umac_reset_ctx->partner_map,
  1442. grp_umac_reset_ctx->response_map);
  1443. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1444. return QDF_STATUS_SUCCESS;
  1445. }
  1446. dp_umac_reset_debug(
  1447. "All responses received: expected map %u current map %u",
  1448. grp_umac_reset_ctx->partner_map,
  1449. grp_umac_reset_ctx->response_map);
  1450. grp_umac_reset_ctx->response_map = 0;
  1451. grp_umac_reset_ctx->request_map = 0;
  1452. qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
  1453. dp_mlo_iter_ptnr_soc(be_soc, &dp_umac_reset_post_tx_cmd_via_shmem,
  1454. &tx_cmd);
  1455. if (tx_cmd == UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE)
  1456. dp_umac_reset_complete_umac_recovery(soc);
  1457. return QDF_STATUS_SUCCESS;
  1458. }
  1459. /**
  1460. * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
  1461. * @soc: dp soc handle
  1462. *
  1463. * Return: true if the soc is initiator or false otherwise
  1464. */
  1465. bool dp_umac_reset_initiator_check(struct dp_soc *soc)
  1466. {
  1467. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1468. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1469. if (!mlo_ctx)
  1470. return true;
  1471. return (mlo_ctx->grp_umac_reset_ctx.initiator_chip_id ==
  1472. dp_mlo_get_chip_id(soc));
  1473. }
  1474. /**
  1475. * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
  1476. * @soc: dp soc handle
  1477. *
  1478. * Return: true if the session is for target recovery or false otherwise
  1479. */
  1480. bool dp_umac_reset_target_recovery_check(struct dp_soc *soc)
  1481. {
  1482. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1483. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1484. if (!mlo_ctx)
  1485. return false;
  1486. return mlo_ctx->grp_umac_reset_ctx.is_target_recovery;
  1487. }
  1488. /**
  1489. * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
  1490. * @soc: dp soc handle
  1491. *
  1492. * Return: true if the soc is ignored or false otherwise
  1493. */
  1494. bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc)
  1495. {
  1496. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1497. struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
  1498. if (!mlo_ctx)
  1499. return false;
  1500. return !qdf_atomic_test_bit(dp_mlo_get_chip_id(soc),
  1501. &mlo_ctx->grp_umac_reset_ctx.partner_map);
  1502. }
  1503. QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
  1504. {
  1505. struct dp_mlo_ctxt *mlo_ctx;
  1506. struct dp_soc_be *be_soc;
  1507. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1508. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1509. if (!be_soc) {
  1510. dp_umac_reset_err("null be_soc");
  1511. return QDF_STATUS_E_NULL_VALUE;
  1512. }
  1513. mlo_ctx = be_soc->ml_ctxt;
  1514. if (!mlo_ctx) {
  1515. /* This API can be called for non-MLO SOC as well. Hence, return
  1516. * the status as success when mlo_ctx is NULL.
  1517. */
  1518. return QDF_STATUS_SUCCESS;
  1519. }
  1520. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1521. DP_UMAC_RESET_PRINT_STATS("MLO UMAC RESET stats\n"
  1522. "\t\tPartner map :%x\n"
  1523. "\t\tRequest map :%x\n"
  1524. "\t\tResponse map :%x\n"
  1525. "\t\tIs target recovery :%d\n"
  1526. "\t\tIs Umac reset inprogress :%d\n"
  1527. "\t\tNumber of UMAC reset triggered:%d\n"
  1528. "\t\tInitiator chip ID :%d\n",
  1529. grp_umac_reset_ctx->partner_map,
  1530. grp_umac_reset_ctx->request_map,
  1531. grp_umac_reset_ctx->response_map,
  1532. grp_umac_reset_ctx->is_target_recovery,
  1533. grp_umac_reset_ctx->umac_reset_in_progress,
  1534. grp_umac_reset_ctx->umac_reset_count,
  1535. grp_umac_reset_ctx->initiator_chip_id);
  1536. return QDF_STATUS_SUCCESS;
  1537. }
  1538. enum cdp_umac_reset_state
  1539. dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc)
  1540. {
  1541. struct dp_soc_umac_reset_ctx *umac_reset_ctx;
  1542. struct dp_soc *soc = (struct dp_soc *)psoc;
  1543. struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
  1544. struct dp_soc_be *be_soc = NULL;
  1545. struct dp_mlo_ctxt *mlo_ctx = NULL;
  1546. enum cdp_umac_reset_state umac_reset_is_inprogress;
  1547. if (!soc) {
  1548. dp_umac_reset_err("DP SOC is null");
  1549. return CDP_UMAC_RESET_INVALID_STATE;
  1550. }
  1551. umac_reset_ctx = &soc->umac_reset_ctx;
  1552. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1553. if (be_soc)
  1554. mlo_ctx = be_soc->ml_ctxt;
  1555. if (mlo_ctx) {
  1556. grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
  1557. umac_reset_is_inprogress =
  1558. grp_umac_reset_ctx->umac_reset_in_progress;
  1559. } else {
  1560. umac_reset_is_inprogress = (umac_reset_ctx->current_state !=
  1561. UMAC_RESET_STATE_WAIT_FOR_TRIGGER);
  1562. }
  1563. if (umac_reset_is_inprogress)
  1564. return CDP_UMAC_RESET_IN_PROGRESS;
  1565. /* Check if the umac reset was in progress during the buffer
  1566. * window.
  1567. */
  1568. umac_reset_is_inprogress =
  1569. ((qdf_get_log_timestamp_usecs() -
  1570. umac_reset_ctx->ts.post_reset_complete_done) <=
  1571. (wlan_cfg_get_umac_reset_buffer_window_ms(soc->wlan_cfg_ctx) *
  1572. 1000));
  1573. return (umac_reset_is_inprogress ?
  1574. CDP_UMAC_RESET_IN_PROGRESS_DURING_BUFFER_WINDOW :
  1575. CDP_UMAC_RESET_NOT_IN_PROGRESS);
  1576. }
  1577. #endif
  1578. struct dp_soc *
  1579. dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
  1580. {
  1581. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  1582. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  1583. struct dp_soc *partner_soc;
  1584. if (!be_soc->mlo_enabled || !mlo_ctxt)
  1585. return soc;
  1586. if (be_soc->mlo_chip_id == chip_id)
  1587. return soc;
  1588. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  1589. return partner_soc;
  1590. }