dp_mlo.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <wlan_utility.h>
  17. #include <dp_internal.h>
  18. #include <dp_htt.h>
  19. #include <hal_be_api.h>
  20. #include "dp_mlo.h"
  21. #include <dp_be.h>
  22. #include <dp_be_rx.h>
  23. #include <dp_htt.h>
  24. #include <dp_internal.h>
  25. #include <wlan_cfg.h>
  26. #include <wlan_mlo_mgr_cmn.h>
  27. /**
  28. * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
  29. * @ctrl_ctxt: CDP control context
  30. *
  31. * Return: DP MLO context handle on success, NULL on failure
  32. */
  33. static struct cdp_mlo_ctxt *
  34. dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
  35. {
  36. struct dp_mlo_ctxt *mlo_ctxt =
  37. qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
  38. if (!mlo_ctxt) {
  39. dp_err("Failed to allocate DP MLO Context");
  40. return NULL;
  41. }
  42. mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
  43. if (dp_mlo_peer_find_hash_attach_be
  44. (mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
  45. dp_err("Failed to allocate peer hash");
  46. qdf_mem_free(mlo_ctxt);
  47. return NULL;
  48. }
  49. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
  50. (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
  51. LRO_IPV4_SEED_ARR_SZ));
  52. qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
  53. (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
  54. LRO_IPV6_SEED_ARR_SZ));
  55. qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
  56. return dp_mlo_ctx_to_cdp(mlo_ctxt);
  57. }
  58. /**
  59. * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
  60. * @cdp_ml_ctxt: pointer to CDP DP MLO context
  61. *
  62. * Return: void
  63. */
  64. static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  65. {
  66. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  67. if (!cdp_ml_ctxt)
  68. return;
  69. qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
  70. dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
  71. qdf_mem_free(mlo_ctxt);
  72. }
  73. /*
  74. * dp_mlo_set_soc_by_chip_id() – Add DP soc to ML context soc list
  75. *
  76. * @ml_ctxt: DP ML context handle
  77. * @soc: DP soc handle
  78. * @chip_id: MLO chip id
  79. *
  80. * Return: void
  81. */
  82. void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  83. struct dp_soc *soc,
  84. uint8_t chip_id)
  85. {
  86. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  87. ml_ctxt->ml_soc_list[chip_id] = soc;
  88. /* The same API is called during soc_attach and soc_detach
  89. * soc parameter is non-null or null accordingly.
  90. */
  91. if (soc)
  92. ml_ctxt->ml_soc_cnt++;
  93. else
  94. ml_ctxt->ml_soc_cnt--;
  95. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  96. }
  97. /*
  98. * dp_mlo_get_soc_ref_by_chip_id() – Get DP soc from DP ML context.
  99. * This API will increment a reference count for DP soc. Caller has
  100. * to take care for decrementing refcount.
  101. *
  102. * @ml_ctxt: DP ML context handle
  103. * @chip_id: MLO chip id
  104. *
  105. * Return: dp_soc
  106. */
  107. struct dp_soc*
  108. dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
  109. uint8_t chip_id)
  110. {
  111. struct dp_soc *soc = NULL;
  112. if (!ml_ctxt) {
  113. dp_warn("MLO context not created, MLO not enabled");
  114. return NULL;
  115. }
  116. qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
  117. soc = ml_ctxt->ml_soc_list[chip_id];
  118. if (!soc) {
  119. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  120. return NULL;
  121. }
  122. qdf_atomic_inc(&soc->ref_count);
  123. qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
  124. return soc;
  125. }
  126. static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
  127. struct dp_soc_be *be_soc)
  128. {
  129. uint8_t i;
  130. struct dp_soc *partner_soc;
  131. struct dp_soc_be *be_partner_soc;
  132. uint8_t pool_id;
  133. QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
  134. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  135. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  136. if (!partner_soc) {
  137. dp_err("partner_soc is NULL");
  138. continue;
  139. }
  140. be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
  141. for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
  142. qdf_status =
  143. dp_hw_cookie_conversion_init
  144. (be_soc,
  145. &be_partner_soc->rx_cc_ctx[pool_id]);
  146. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  147. dp_alert("MLO partner soc RX CC init failed");
  148. return qdf_status;
  149. }
  150. }
  151. }
  152. return qdf_status;
  153. }
  154. static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg)
  155. {
  156. uint8_t i = 0;
  157. uint8_t cpu = 0;
  158. uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  159. uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  160. uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  161. uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
  162. /* Save the current interrupt mask and disable the interrupts */
  163. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  164. rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
  165. rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
  166. rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
  167. reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
  168. soc->intr_ctx[i].rx_ring_mask = 0;
  169. soc->intr_ctx[i].rx_err_ring_mask = 0;
  170. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  171. soc->intr_ctx[i].reo_status_ring_mask = 0;
  172. }
  173. /* make sure dp_service_srngs not running on any of the CPU */
  174. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  175. while (qdf_atomic_test_bit(cpu,
  176. &soc->service_rings_running))
  177. ;
  178. }
  179. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  180. uint8_t ring = 0;
  181. uint32_t num_entries = 0;
  182. hal_ring_handle_t hal_ring_hdl = NULL;
  183. uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
  184. soc->wlan_cfg_ctx, i);
  185. uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
  186. soc->wlan_cfg_ctx, i);
  187. uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
  188. soc->wlan_cfg_ctx, i);
  189. if (rx_mask) {
  190. /* iterate through each reo ring and process the buf */
  191. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  192. if (!(rx_mask & (1 << ring)))
  193. continue;
  194. hal_ring_hdl =
  195. soc->reo_dest_ring[ring].hal_srng;
  196. num_entries = hal_srng_get_num_entries(
  197. soc->hal_soc,
  198. hal_ring_hdl);
  199. dp_rx_process_be(&soc->intr_ctx[i],
  200. hal_ring_hdl,
  201. ring,
  202. num_entries);
  203. }
  204. }
  205. /* Process REO Exception ring */
  206. if (rx_err_mask) {
  207. hal_ring_hdl = soc->reo_exception_ring.hal_srng;
  208. num_entries = hal_srng_get_num_entries(
  209. soc->hal_soc,
  210. hal_ring_hdl);
  211. dp_rx_err_process(&soc->intr_ctx[i], soc,
  212. hal_ring_hdl, num_entries);
  213. }
  214. /* Process Rx WBM release ring */
  215. if (rx_wbm_rel_mask) {
  216. hal_ring_hdl = soc->rx_rel_ring.hal_srng;
  217. num_entries = hal_srng_get_num_entries(
  218. soc->hal_soc,
  219. hal_ring_hdl);
  220. dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
  221. hal_ring_hdl, num_entries);
  222. }
  223. }
  224. /* restore the interrupt mask */
  225. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  226. soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
  227. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
  228. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
  229. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
  230. }
  231. }
  232. static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
  233. struct cdp_mlo_ctxt *cdp_ml_ctxt)
  234. {
  235. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  236. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  237. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  238. uint8_t pdev_id;
  239. if (!cdp_ml_ctxt)
  240. return;
  241. be_soc->ml_ctxt = mlo_ctxt;
  242. for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
  243. if (soc->pdev_list[pdev_id])
  244. dp_mlo_update_link_to_pdev_map(soc,
  245. soc->pdev_list[pdev_id]);
  246. }
  247. dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
  248. }
  249. static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
  250. struct cdp_mlo_ctxt *cdp_ml_ctxt,
  251. bool is_force_down)
  252. {
  253. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  254. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  255. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  256. if (!cdp_ml_ctxt)
  257. return;
  258. /* During the teardown drain the Rx buffers if any exist in the ring */
  259. dp_mcast_mlo_iter_ptnr_soc(be_soc,
  260. dp_mlo_soc_drain_rx_buf,
  261. NULL);
  262. dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
  263. be_soc->ml_ctxt = NULL;
  264. }
  265. static QDF_STATUS dp_mlo_add_ptnr_vdev(struct dp_vdev *vdev1,
  266. struct dp_vdev *vdev2,
  267. struct dp_soc *soc, uint8_t pdev_id)
  268. {
  269. struct dp_soc_be *soc_be = dp_get_be_soc_from_dp_soc(soc);
  270. struct dp_vdev_be *vdev2_be = dp_get_be_vdev_from_dp_vdev(vdev2);
  271. /* return when valid entry exists */
  272. if (vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] !=
  273. CDP_INVALID_VDEV_ID)
  274. return QDF_STATUS_SUCCESS;
  275. if (dp_vdev_get_ref(soc, vdev1, DP_MOD_ID_RX) !=
  276. QDF_STATUS_SUCCESS) {
  277. qdf_info("%pK: unable to get vdev reference vdev %pK vdev_id %u",
  278. soc, vdev1, vdev1->vdev_id);
  279. return QDF_STATUS_E_FAILURE;
  280. }
  281. vdev2_be->partner_vdev_list[soc_be->mlo_chip_id][pdev_id] =
  282. vdev1->vdev_id;
  283. mlo_debug("Add vdev%d to vdev%d list, mlo_chip_id = %d pdev_id = %d\n",
  284. vdev1->vdev_id, vdev2->vdev_id, soc_be->mlo_chip_id, pdev_id);
  285. return QDF_STATUS_SUCCESS;
  286. }
  287. QDF_STATUS dp_update_mlo_ptnr_list(struct cdp_soc_t *soc_hdl,
  288. int8_t partner_vdev_ids[], uint8_t num_vdevs,
  289. uint8_t self_vdev_id)
  290. {
  291. int i, j;
  292. struct dp_soc *self_soc = cdp_soc_t_to_dp_soc(soc_hdl);
  293. struct dp_vdev *self_vdev;
  294. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  295. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(self_soc);
  296. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  297. if (!dp_mlo)
  298. return QDF_STATUS_E_FAILURE;
  299. self_vdev = dp_vdev_get_ref_by_id(self_soc, self_vdev_id, DP_MOD_ID_RX);
  300. if (!self_vdev)
  301. return QDF_STATUS_E_FAILURE;
  302. /* go through the input vdev id list and if there are partner vdevs,
  303. * - then add the current vdev's id to partner vdev's list using pdev_id and
  304. * increase the reference
  305. * - add partner vdev to self list and increase the reference
  306. */
  307. for (i = 0; i < num_vdevs; i++) {
  308. if (partner_vdev_ids[i] == CDP_INVALID_VDEV_ID)
  309. continue;
  310. for (j = 0; j < WLAN_MAX_MLO_CHIPS; j++) {
  311. struct dp_soc *soc =
  312. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, j);
  313. if (soc) {
  314. struct dp_vdev *vdev;
  315. vdev = dp_vdev_get_ref_by_id(soc,
  316. partner_vdev_ids[i], DP_MOD_ID_RX);
  317. if (vdev) {
  318. if (vdev == self_vdev) {
  319. dp_vdev_unref_delete(soc,
  320. vdev, DP_MOD_ID_RX);
  321. /*dp_soc_unref_delete(soc); */
  322. continue;
  323. }
  324. if (qdf_is_macaddr_equal(
  325. (struct qdf_mac_addr *)self_vdev->mld_mac_addr.raw,
  326. (struct qdf_mac_addr *)vdev->mld_mac_addr.raw)) {
  327. if (dp_mlo_add_ptnr_vdev(self_vdev,
  328. vdev, self_soc,
  329. self_vdev->pdev->pdev_id) !=
  330. QDF_STATUS_SUCCESS) {
  331. dp_err("Unable to add self to partner vdev's list");
  332. dp_vdev_unref_delete(soc,
  333. vdev, DP_MOD_ID_RX);
  334. /* TODO - release soc ref here */
  335. /* dp_soc_unref_delete(soc);*/
  336. ret = QDF_STATUS_E_FAILURE;
  337. goto exit;
  338. }
  339. /* add to self list */
  340. if (dp_mlo_add_ptnr_vdev(vdev, self_vdev, soc,
  341. vdev->pdev->pdev_id) !=
  342. QDF_STATUS_SUCCESS) {
  343. dp_err("Unable to add vdev to self vdev's list");
  344. dp_vdev_unref_delete(self_soc,
  345. vdev, DP_MOD_ID_RX);
  346. /* TODO - release soc ref here */
  347. /* dp_soc_unref_delete(soc);*/
  348. ret = QDF_STATUS_E_FAILURE;
  349. goto exit;
  350. }
  351. }
  352. dp_vdev_unref_delete(soc, vdev,
  353. DP_MOD_ID_RX);
  354. } /* vdev */
  355. /* TODO - release soc ref here */
  356. /* dp_soc_unref_delete(soc); */
  357. } /* soc */
  358. } /* for */
  359. } /* for */
  360. exit:
  361. dp_vdev_unref_delete(self_soc, self_vdev, DP_MOD_ID_RX);
  362. return ret;
  363. }
  364. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev)
  365. {
  366. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  367. struct dp_vdev_be *vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
  368. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  369. uint8_t soc_id = be_soc->mlo_chip_id;
  370. uint8_t pdev_id = vdev->pdev->pdev_id;
  371. int i, j;
  372. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  373. for (j = 0; j < WLAN_MAX_MLO_LINKS_PER_SOC; j++) {
  374. struct dp_vdev *pr_vdev;
  375. struct dp_soc *pr_soc;
  376. struct dp_soc_be *pr_soc_be;
  377. struct dp_pdev *pr_pdev;
  378. struct dp_vdev_be *pr_vdev_be;
  379. if (vdev_be->partner_vdev_list[i][j] ==
  380. CDP_INVALID_VDEV_ID)
  381. continue;
  382. pr_soc = dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  383. if (!pr_soc)
  384. continue;
  385. pr_soc_be = dp_get_be_soc_from_dp_soc(pr_soc);
  386. pr_vdev = dp_vdev_get_ref_by_id(pr_soc,
  387. vdev_be->partner_vdev_list[i][j],
  388. DP_MOD_ID_RX);
  389. if (!pr_vdev)
  390. continue;
  391. /* release ref and remove self vdev from partner list */
  392. pr_vdev_be = dp_get_be_vdev_from_dp_vdev(pr_vdev);
  393. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  394. pr_vdev_be->partner_vdev_list[soc_id][pdev_id] =
  395. CDP_INVALID_VDEV_ID;
  396. /* release ref and remove partner vdev from self list */
  397. dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
  398. pr_pdev = pr_vdev->pdev;
  399. vdev_be->partner_vdev_list[pr_soc_be->mlo_chip_id][pr_pdev->pdev_id] =
  400. CDP_INVALID_VDEV_ID;
  401. dp_vdev_unref_delete(pr_soc, pr_vdev, DP_MOD_ID_RX);
  402. }
  403. }
  404. }
  405. static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
  406. {
  407. struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
  408. int i;
  409. struct dp_soc *soc;
  410. struct dp_soc_be *be_soc;
  411. QDF_STATUS qdf_status;
  412. if (!cdp_ml_ctxt)
  413. return;
  414. for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
  415. soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
  416. if (!soc)
  417. continue;
  418. be_soc = dp_get_be_soc_from_dp_soc(soc);
  419. qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
  420. if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
  421. dp_alert("MLO partner SOC Rx desc CC init failed");
  422. qdf_assert_always(0);
  423. }
  424. }
  425. }
  426. static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
  427. uint8_t pdev_id, uint64_t delta_tsf2)
  428. {
  429. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  430. struct dp_pdev *pdev;
  431. struct dp_pdev_be *be_pdev;
  432. pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  433. pdev_id);
  434. if (!pdev) {
  435. dp_err("pdev is NULL for pdev_id %u", pdev_id);
  436. return;
  437. }
  438. be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  439. be_pdev->delta_tsf2 = delta_tsf2;
  440. }
  441. static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
  442. uint64_t delta_tqm)
  443. {
  444. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  445. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  446. be_soc->delta_tqm = delta_tqm;
  447. }
  448. static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
  449. uint64_t offset)
  450. {
  451. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  452. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  453. be_soc->mlo_tstamp_offset = offset;
  454. }
  455. static struct cdp_mlo_ops dp_mlo_ops = {
  456. .mlo_soc_setup = dp_mlo_soc_setup,
  457. .mlo_soc_teardown = dp_mlo_soc_teardown,
  458. .update_mlo_ptnr_list = dp_update_mlo_ptnr_list,
  459. .mlo_setup_complete = dp_mlo_setup_complete,
  460. .mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
  461. .mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
  462. .mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
  463. .mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
  464. .mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
  465. };
  466. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  467. struct cdp_soc_attach_params *params)
  468. {
  469. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  470. if (!params->mlo_enabled) {
  471. dp_warn("MLO not enabled on SOC");
  472. return;
  473. }
  474. be_soc->mlo_chip_id = params->mlo_chip_id;
  475. be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
  476. be_soc->mlo_enabled = 1;
  477. soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
  478. }
  479. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  480. {
  481. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  482. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  483. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  484. uint8_t link_id;
  485. if (!be_soc->mlo_enabled)
  486. return;
  487. if (!ml_ctxt)
  488. return;
  489. link_id = be_pdev->mlo_link_id;
  490. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
  491. if (!ml_ctxt->link_to_pdev_map[link_id])
  492. ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
  493. else
  494. dp_alert("Attempt to update existing map for link %u",
  495. link_id);
  496. }
  497. }
  498. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  499. {
  500. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  501. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  502. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  503. uint8_t link_id;
  504. if (!be_soc->mlo_enabled)
  505. return;
  506. if (!ml_ctxt)
  507. return;
  508. link_id = be_pdev->mlo_link_id;
  509. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  510. ml_ctxt->link_to_pdev_map[link_id] = NULL;
  511. }
  512. static struct dp_pdev_be *
  513. dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
  514. {
  515. if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
  516. return ml_ctxt->link_to_pdev_map[link_id];
  517. return NULL;
  518. }
  519. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  520. struct cdp_pdev_attach_params *params)
  521. {
  522. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
  523. struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
  524. if (!be_soc->mlo_enabled) {
  525. dp_info("MLO not enabled on SOC");
  526. return;
  527. }
  528. be_pdev->mlo_link_id = params->mlo_link_id;
  529. }
  530. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  531. struct dp_peer *peer,
  532. uint16_t peer_id)
  533. {
  534. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  535. struct dp_mlo_ctxt *mlo_ctxt = NULL;
  536. bool is_ml_peer_id =
  537. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  538. uint8_t chip_id;
  539. struct dp_soc *temp_soc;
  540. /* for non ML peer dont map on partner chips*/
  541. if (!is_ml_peer_id)
  542. return;
  543. mlo_ctxt = be_soc->ml_ctxt;
  544. if (!mlo_ctxt)
  545. return;
  546. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  547. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  548. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  549. if (!temp_soc)
  550. continue;
  551. /* skip if this is current soc */
  552. if (temp_soc == soc)
  553. continue;
  554. dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
  555. }
  556. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  557. }
  558. qdf_export_symbol(dp_mlo_partner_chips_map);
  559. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  560. uint16_t peer_id)
  561. {
  562. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  563. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  564. bool is_ml_peer_id =
  565. HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
  566. uint8_t chip_id;
  567. struct dp_soc *temp_soc;
  568. if (!is_ml_peer_id)
  569. return;
  570. if (!mlo_ctxt)
  571. return;
  572. qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
  573. for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
  574. temp_soc = mlo_ctxt->ml_soc_list[chip_id];
  575. if (!temp_soc)
  576. continue;
  577. /* skip if this is current soc */
  578. if (temp_soc == soc)
  579. continue;
  580. dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
  581. }
  582. qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
  583. }
  584. qdf_export_symbol(dp_mlo_partner_chips_unmap);
  585. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  586. {
  587. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  588. return be_soc->mlo_chip_id;
  589. }
  590. qdf_export_symbol(dp_mlo_get_chip_id);
  591. struct dp_peer *
  592. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  593. uint8_t *peer_mac_addr,
  594. int mac_addr_is_aligned,
  595. uint8_t vdev_id,
  596. uint8_t chip_id,
  597. enum dp_mod_id mod_id)
  598. {
  599. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  600. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  601. struct dp_soc *link_peer_soc = NULL;
  602. struct dp_peer *peer = NULL;
  603. if (!mlo_ctxt)
  604. return NULL;
  605. link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  606. if (!link_peer_soc)
  607. return NULL;
  608. peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
  609. mac_addr_is_aligned, vdev_id,
  610. mod_id);
  611. qdf_atomic_dec(&link_peer_soc->ref_count);
  612. return peer;
  613. }
  614. qdf_export_symbol(dp_link_peer_hash_find_by_chip_id);
  615. void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
  616. struct cdp_lro_hash_config *lro_hash)
  617. {
  618. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  619. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  620. if (!be_soc->mlo_enabled || !ml_ctxt)
  621. return dp_get_rx_hash_key_bytes(lro_hash);
  622. qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
  623. (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
  624. LRO_IPV4_SEED_ARR_SZ));
  625. qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
  626. (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
  627. LRO_IPV6_SEED_ARR_SZ));
  628. }
  629. void dp_mlo_set_rx_fst(struct dp_soc *soc, struct dp_rx_fst *fst)
  630. {
  631. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  632. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  633. if (be_soc->mlo_enabled && ml_ctxt)
  634. ml_ctxt->rx_fst = fst;
  635. }
  636. struct dp_rx_fst *dp_mlo_get_rx_fst(struct dp_soc *soc)
  637. {
  638. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  639. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  640. if (be_soc->mlo_enabled && ml_ctxt)
  641. return ml_ctxt->rx_fst;
  642. return NULL;
  643. }
  644. void dp_mlo_rx_fst_ref(struct dp_soc *soc)
  645. {
  646. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  647. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  648. if (be_soc->mlo_enabled && ml_ctxt)
  649. ml_ctxt->rx_fst_ref_cnt++;
  650. }
  651. uint8_t dp_mlo_rx_fst_deref(struct dp_soc *soc)
  652. {
  653. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  654. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  655. uint8_t rx_fst_ref_cnt;
  656. if (be_soc->mlo_enabled && ml_ctxt) {
  657. rx_fst_ref_cnt = ml_ctxt->rx_fst_ref_cnt;
  658. ml_ctxt->rx_fst_ref_cnt--;
  659. return rx_fst_ref_cnt;
  660. }
  661. return 1;
  662. }
  663. struct dp_soc *
  664. dp_rx_replensih_soc_get(struct dp_soc *soc, uint8_t chip_id)
  665. {
  666. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  667. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  668. struct dp_soc *replenish_soc;
  669. if (!be_soc->mlo_enabled || !mlo_ctxt)
  670. return soc;
  671. if (be_soc->mlo_chip_id == chip_id)
  672. return soc;
  673. replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  674. if (qdf_unlikely(!replenish_soc)) {
  675. dp_alert("replenish SOC is NULL");
  676. qdf_assert_always(0);
  677. }
  678. return replenish_soc;
  679. }
  680. uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
  681. {
  682. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  683. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  684. if (!be_soc->mlo_enabled || !mlo_ctxt)
  685. return 1;
  686. return mlo_ctxt->ml_soc_cnt;
  687. }
  688. struct dp_soc *
  689. dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
  690. {
  691. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  692. struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
  693. struct dp_soc *partner_soc = NULL;
  694. uint8_t chip_id;
  695. if (!be_soc->mlo_enabled || !mlo_ctxt)
  696. return soc;
  697. for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
  698. partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
  699. if (!partner_soc)
  700. continue;
  701. if (partner_soc->idle_link_bm_id == idle_bm_id)
  702. return partner_soc;
  703. }
  704. return NULL;
  705. }
  706. #ifdef WLAN_MCAST_MLO
  707. void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
  708. dp_ptnr_soc_iter_func func,
  709. void *arg)
  710. {
  711. int i = 0;
  712. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  713. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  714. struct dp_soc *ptnr_soc =
  715. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  716. if (!ptnr_soc)
  717. continue;
  718. (*func)(ptnr_soc, arg);
  719. }
  720. }
  721. qdf_export_symbol(dp_mcast_mlo_iter_ptnr_soc);
  722. void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  723. struct dp_vdev_be *be_vdev,
  724. dp_ptnr_vdev_iter_func func,
  725. void *arg,
  726. enum dp_mod_id mod_id)
  727. {
  728. int i = 0;
  729. int j = 0;
  730. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  731. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  732. struct dp_soc *ptnr_soc =
  733. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  734. if (!ptnr_soc)
  735. continue;
  736. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  737. struct dp_vdev *ptnr_vdev;
  738. ptnr_vdev = dp_vdev_get_ref_by_id(
  739. ptnr_soc,
  740. be_vdev->partner_vdev_list[i][j],
  741. mod_id);
  742. if (!ptnr_vdev)
  743. continue;
  744. (*func)(be_vdev, ptnr_vdev, arg);
  745. dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
  746. ptnr_vdev,
  747. mod_id);
  748. }
  749. }
  750. }
  751. qdf_export_symbol(dp_mcast_mlo_iter_ptnr_vdev);
  752. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  753. struct dp_vdev_be *be_vdev,
  754. enum dp_mod_id mod_id)
  755. {
  756. int i = 0;
  757. int j = 0;
  758. struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
  759. for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
  760. struct dp_soc *ptnr_soc =
  761. dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
  762. if (!ptnr_soc)
  763. continue;
  764. for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
  765. struct dp_vdev *ptnr_vdev = NULL;
  766. struct dp_vdev_be *be_ptnr_vdev = NULL;
  767. ptnr_vdev = dp_vdev_get_ref_by_id(
  768. ptnr_soc,
  769. be_vdev->partner_vdev_list[i][j],
  770. mod_id);
  771. if (!ptnr_vdev)
  772. continue;
  773. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  774. if (be_ptnr_vdev->mcast_primary)
  775. return ptnr_vdev;
  776. dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
  777. &be_ptnr_vdev->vdev,
  778. mod_id);
  779. }
  780. }
  781. return NULL;
  782. }
  783. qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
  784. #endif
  785. static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
  786. {
  787. struct dp_soc *soc;
  788. struct dp_pdev *pdev;
  789. struct dp_soc_be *be_soc;
  790. uint32_t mlo_offset;
  791. pdev = &be_pdev->pdev;
  792. soc = pdev->soc;
  793. be_soc = dp_get_be_soc_from_dp_soc(soc);
  794. mlo_offset = be_soc->mlo_tstamp_offset;
  795. return mlo_offset;
  796. }
  797. int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
  798. uint8_t hw_link_id)
  799. {
  800. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  801. struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
  802. struct dp_pdev_be *be_pdev;
  803. int32_t delta_tsf2_mlo_offset;
  804. int32_t mlo_offset, delta_tsf2;
  805. if (!ml_ctxt)
  806. return 0;
  807. be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
  808. if (!be_pdev)
  809. return 0;
  810. mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
  811. delta_tsf2 = be_pdev->delta_tsf2;
  812. delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
  813. return delta_tsf2_mlo_offset;
  814. }
  815. int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
  816. {
  817. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  818. int32_t delta_tqm_mlo_offset;
  819. int32_t mlo_offset, delta_tqm;
  820. mlo_offset = be_soc->mlo_tstamp_offset;
  821. delta_tqm = be_soc->delta_tqm;
  822. delta_tqm_mlo_offset = mlo_offset - delta_tqm;
  823. return delta_tqm_mlo_offset;
  824. }