dp_peer.h 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #include "dp_internal.h"
  25. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  26. #include "hal_reo.h"
  27. #endif
  28. #define DP_INVALID_PEER_ID 0xffff
  29. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  30. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  31. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  32. #define DP_PEER_HASH_LOAD_MULT 2
  33. #define DP_PEER_HASH_LOAD_SHIFT 0
  34. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  35. #define DP_RX_CACHED_BUFQ_THRESH 64
  36. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  39. #define dp_peer_info(params...) \
  40. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  41. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  42. #ifdef REO_QDESC_HISTORY
  43. enum reo_qdesc_event_type {
  44. REO_QDESC_UPDATE_CB = 0,
  45. REO_QDESC_FREE,
  46. };
  47. struct reo_qdesc_event {
  48. qdf_dma_addr_t qdesc_addr;
  49. uint64_t ts;
  50. enum reo_qdesc_event_type type;
  51. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  52. };
  53. #endif
  54. struct ast_del_ctxt {
  55. bool age;
  56. int del_count;
  57. };
  58. #ifdef QCA_SUPPORT_WDS_EXTENDED
  59. /**
  60. * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
  61. *
  62. * @peer: DP peer context
  63. *
  64. * This API checks whether the peer is WDS_EXT peer or not
  65. *
  66. * Return: true in the wds_ext peer else flase
  67. */
  68. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  69. {
  70. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  71. }
  72. #else
  73. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  74. {
  75. return false;
  76. }
  77. #endif
  78. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  79. void *arg);
  80. /**
  81. * dp_peer_unref_delete() - unref and delete peer
  82. * @peer: Datapath peer handle
  83. * @id: ID of module releasing reference
  84. *
  85. */
  86. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  87. /**
  88. * dp_txrx_peer_unref_delete() - unref and delete peer
  89. * @handle: Datapath txrx ref handle
  90. * @id: Module ID of the caller
  91. *
  92. */
  93. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  94. /**
  95. * dp_peer_find_hash_find() - returns legacy or mlo link peer from
  96. * peer_hash_table matching vdev_id and mac_address
  97. * @soc: soc handle
  98. * @peer_mac_addr: peer mac address
  99. * @mac_addr_is_aligned: is mac addr aligned
  100. * @vdev_id: vdev_id
  101. * @mod_id: id of module requesting reference
  102. *
  103. * return: peer in success
  104. * NULL in failure
  105. */
  106. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  107. uint8_t *peer_mac_addr,
  108. int mac_addr_is_aligned,
  109. uint8_t vdev_id,
  110. enum dp_mod_id mod_id);
  111. /**
  112. * dp_peer_find_by_id_valid - check if peer exists for given id
  113. * @soc: core DP soc context
  114. * @peer_id: peer id from peer object can be retrieved
  115. *
  116. * Return: true if peer exists of false otherwise
  117. */
  118. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  119. /**
  120. * dp_peer_get_ref() - Returns peer object given the peer id
  121. *
  122. * @soc: core DP soc context
  123. * @peer: DP peer
  124. * @mod_id: id of module requesting the reference
  125. *
  126. * Return: QDF_STATUS_SUCCESS if reference held successfully
  127. * else QDF_STATUS_E_INVAL
  128. */
  129. static inline
  130. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  131. struct dp_peer *peer,
  132. enum dp_mod_id mod_id)
  133. {
  134. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  135. return QDF_STATUS_E_INVAL;
  136. if (mod_id > DP_MOD_ID_RX)
  137. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  138. return QDF_STATUS_SUCCESS;
  139. }
  140. /**
  141. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  142. *
  143. * @soc: core DP soc context
  144. * @peer_id: peer id from peer object can be retrieved
  145. * @mod_id: module id
  146. *
  147. * Return: struct dp_peer*: Pointer to DP peer object
  148. */
  149. static inline struct dp_peer *
  150. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  151. uint16_t peer_id,
  152. enum dp_mod_id mod_id)
  153. {
  154. struct dp_peer *peer;
  155. qdf_spin_lock_bh(&soc->peer_map_lock);
  156. peer = (peer_id >= soc->max_peer_id) ? NULL :
  157. soc->peer_id_to_obj_map[peer_id];
  158. if (!peer ||
  159. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  160. qdf_spin_unlock_bh(&soc->peer_map_lock);
  161. return NULL;
  162. }
  163. qdf_spin_unlock_bh(&soc->peer_map_lock);
  164. return peer;
  165. }
  166. /**
  167. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  168. * if peer state is active
  169. *
  170. * @soc: core DP soc context
  171. * @peer_id: peer id from peer object can be retrieved
  172. * @mod_id: ID of module requesting reference
  173. *
  174. * Return: struct dp_peer*: Pointer to DP peer object
  175. */
  176. static inline
  177. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  178. uint16_t peer_id,
  179. enum dp_mod_id mod_id)
  180. {
  181. struct dp_peer *peer;
  182. qdf_spin_lock_bh(&soc->peer_map_lock);
  183. peer = (peer_id >= soc->max_peer_id) ? NULL :
  184. soc->peer_id_to_obj_map[peer_id];
  185. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  186. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  187. qdf_spin_unlock_bh(&soc->peer_map_lock);
  188. return NULL;
  189. }
  190. qdf_spin_unlock_bh(&soc->peer_map_lock);
  191. return peer;
  192. }
  193. /**
  194. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  195. *
  196. * @soc: core DP soc context
  197. * @peer_id: peer id from peer object can be retrieved
  198. * @handle: reference handle
  199. * @mod_id: ID of module requesting reference
  200. *
  201. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  202. */
  203. static inline struct dp_txrx_peer *
  204. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  205. uint16_t peer_id,
  206. dp_txrx_ref_handle *handle,
  207. enum dp_mod_id mod_id)
  208. {
  209. struct dp_peer *peer;
  210. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  211. if (!peer)
  212. return NULL;
  213. if (!peer->txrx_peer) {
  214. dp_peer_unref_delete(peer, mod_id);
  215. return NULL;
  216. }
  217. *handle = (dp_txrx_ref_handle)peer;
  218. return peer->txrx_peer;
  219. }
  220. #ifdef PEER_CACHE_RX_PKTS
  221. /**
  222. * dp_rx_flush_rx_cached() - flush cached rx frames
  223. * @peer: peer
  224. * @drop: set flag to drop frames
  225. *
  226. * Return: None
  227. */
  228. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  229. #else
  230. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  231. {
  232. }
  233. #endif
  234. static inline void
  235. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  236. {
  237. qdf_spin_lock_bh(&peer->peer_info_lock);
  238. peer->state = OL_TXRX_PEER_STATE_DISC;
  239. qdf_spin_unlock_bh(&peer->peer_info_lock);
  240. dp_rx_flush_rx_cached(peer, true);
  241. }
  242. /**
  243. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  244. *
  245. * @vdev: DP vdev context
  246. * @func: function to be called for each peer
  247. * @arg: argument need to be passed to func
  248. * @mod_id: module_id
  249. *
  250. * Return: void
  251. */
  252. static inline void
  253. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  254. enum dp_mod_id mod_id)
  255. {
  256. struct dp_peer *peer;
  257. struct dp_peer *tmp_peer;
  258. struct dp_soc *soc = NULL;
  259. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  260. return;
  261. soc = vdev->pdev->soc;
  262. qdf_spin_lock_bh(&vdev->peer_list_lock);
  263. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  264. peer_list_elem,
  265. tmp_peer) {
  266. if (dp_peer_get_ref(soc, peer, mod_id) ==
  267. QDF_STATUS_SUCCESS) {
  268. (*func)(soc, peer, arg);
  269. dp_peer_unref_delete(peer, mod_id);
  270. }
  271. }
  272. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  273. }
  274. /**
  275. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  276. *
  277. * @pdev: DP pdev context
  278. * @func: function to be called for each peer
  279. * @arg: argument need to be passed to func
  280. * @mod_id: module_id
  281. *
  282. * Return: void
  283. */
  284. static inline void
  285. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  286. enum dp_mod_id mod_id)
  287. {
  288. struct dp_vdev *vdev;
  289. if (!pdev)
  290. return;
  291. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  292. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  293. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  294. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  295. }
  296. /**
  297. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  298. *
  299. * @soc: DP soc context
  300. * @func: function to be called for each peer
  301. * @arg: argument need to be passed to func
  302. * @mod_id: module_id
  303. *
  304. * Return: void
  305. */
  306. static inline void
  307. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  308. enum dp_mod_id mod_id)
  309. {
  310. struct dp_pdev *pdev;
  311. int i;
  312. if (!soc)
  313. return;
  314. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  315. pdev = soc->pdev_list[i];
  316. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  317. }
  318. }
  319. /**
  320. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  321. *
  322. * This API will cache the peers in local allocated memory and calls
  323. * iterate function outside the lock.
  324. *
  325. * As this API is allocating new memory it is suggested to use this
  326. * only when lock cannot be held
  327. *
  328. * @vdev: DP vdev context
  329. * @func: function to be called for each peer
  330. * @arg: argument need to be passed to func
  331. * @mod_id: module_id
  332. *
  333. * Return: void
  334. */
  335. static inline void
  336. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  337. dp_peer_iter_func *func,
  338. void *arg,
  339. enum dp_mod_id mod_id)
  340. {
  341. struct dp_peer *peer;
  342. struct dp_peer *tmp_peer;
  343. struct dp_soc *soc = NULL;
  344. struct dp_peer **peer_array = NULL;
  345. int i = 0;
  346. uint32_t num_peers = 0;
  347. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  348. return;
  349. num_peers = vdev->num_peers;
  350. soc = vdev->pdev->soc;
  351. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  352. if (!peer_array)
  353. return;
  354. qdf_spin_lock_bh(&vdev->peer_list_lock);
  355. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  356. peer_list_elem,
  357. tmp_peer) {
  358. if (i >= num_peers)
  359. break;
  360. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  361. peer_array[i] = peer;
  362. i = (i + 1);
  363. }
  364. }
  365. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  366. for (i = 0; i < num_peers; i++) {
  367. peer = peer_array[i];
  368. if (!peer)
  369. continue;
  370. (*func)(soc, peer, arg);
  371. dp_peer_unref_delete(peer, mod_id);
  372. }
  373. qdf_mem_free(peer_array);
  374. }
  375. /**
  376. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  377. *
  378. * This API will cache the peers in local allocated memory and calls
  379. * iterate function outside the lock.
  380. *
  381. * As this API is allocating new memory it is suggested to use this
  382. * only when lock cannot be held
  383. *
  384. * @pdev: DP pdev context
  385. * @func: function to be called for each peer
  386. * @arg: argument need to be passed to func
  387. * @mod_id: module_id
  388. *
  389. * Return: void
  390. */
  391. static inline void
  392. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  393. dp_peer_iter_func *func,
  394. void *arg,
  395. enum dp_mod_id mod_id)
  396. {
  397. struct dp_peer *peer;
  398. struct dp_peer *tmp_peer;
  399. struct dp_soc *soc = NULL;
  400. struct dp_vdev *vdev = NULL;
  401. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  402. int i = 0;
  403. int j = 0;
  404. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  405. if (!pdev || !pdev->soc)
  406. return;
  407. soc = pdev->soc;
  408. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  409. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  410. num_peers[i] = vdev->num_peers;
  411. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  412. sizeof(struct dp_peer *));
  413. if (!peer_array[i])
  414. break;
  415. qdf_spin_lock_bh(&vdev->peer_list_lock);
  416. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  417. peer_list_elem,
  418. tmp_peer) {
  419. if (j >= num_peers[i])
  420. break;
  421. if (dp_peer_get_ref(soc, peer, mod_id) ==
  422. QDF_STATUS_SUCCESS) {
  423. peer_array[i][j] = peer;
  424. j = (j + 1);
  425. }
  426. }
  427. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  428. i = (i + 1);
  429. }
  430. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  431. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  432. if (!peer_array[i])
  433. break;
  434. for (j = 0; j < num_peers[i]; j++) {
  435. peer = peer_array[i][j];
  436. if (!peer)
  437. continue;
  438. (*func)(soc, peer, arg);
  439. dp_peer_unref_delete(peer, mod_id);
  440. }
  441. qdf_mem_free(peer_array[i]);
  442. }
  443. }
  444. /**
  445. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  446. *
  447. * This API will cache the peers in local allocated memory and calls
  448. * iterate function outside the lock.
  449. *
  450. * As this API is allocating new memory it is suggested to use this
  451. * only when lock cannot be held
  452. *
  453. * @soc: DP soc context
  454. * @func: function to be called for each peer
  455. * @arg: argument need to be passed to func
  456. * @mod_id: module_id
  457. *
  458. * Return: void
  459. */
  460. static inline void
  461. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  462. dp_peer_iter_func *func,
  463. void *arg,
  464. enum dp_mod_id mod_id)
  465. {
  466. struct dp_pdev *pdev;
  467. int i;
  468. if (!soc)
  469. return;
  470. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  471. pdev = soc->pdev_list[i];
  472. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  473. }
  474. }
  475. #ifdef DP_PEER_STATE_DEBUG
  476. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  477. do { \
  478. if (!(_condition)) { \
  479. dp_alert("Invalid state shift from %u to %u peer " \
  480. QDF_MAC_ADDR_FMT, \
  481. (_peer)->peer_state, (_new_state), \
  482. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  483. QDF_ASSERT(0); \
  484. } \
  485. } while (0)
  486. #else
  487. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  488. do { \
  489. if (!(_condition)) { \
  490. dp_alert("Invalid state shift from %u to %u peer " \
  491. QDF_MAC_ADDR_FMT, \
  492. (_peer)->peer_state, (_new_state), \
  493. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  494. } \
  495. } while (0)
  496. #endif
  497. /**
  498. * dp_peer_state_cmp() - compare dp peer state
  499. *
  500. * @peer: DP peer
  501. * @state: state
  502. *
  503. * Return: true if state matches with peer state
  504. * false if it does not match
  505. */
  506. static inline bool
  507. dp_peer_state_cmp(struct dp_peer *peer,
  508. enum dp_peer_state state)
  509. {
  510. bool is_status_equal = false;
  511. qdf_spin_lock_bh(&peer->peer_state_lock);
  512. is_status_equal = (peer->peer_state == state);
  513. qdf_spin_unlock_bh(&peer->peer_state_lock);
  514. return is_status_equal;
  515. }
  516. /**
  517. * dp_print_ast_stats() - Dump AST table contents
  518. * @soc: Datapath soc handle
  519. *
  520. * Return: void
  521. */
  522. void dp_print_ast_stats(struct dp_soc *soc);
  523. /**
  524. * dp_rx_peer_map_handler() - handle peer map event from firmware
  525. * @soc: generic soc handle
  526. * @peer_id: peer_id from firmware
  527. * @hw_peer_id: ast index for this peer
  528. * @vdev_id: vdev ID
  529. * @peer_mac_addr: mac address of the peer
  530. * @ast_hash: ast hash value
  531. * @is_wds: flag to indicate peer map event for WDS ast entry
  532. *
  533. * associate the peer_id that firmware provided with peer entry
  534. * and update the ast table in the host with the hw_peer_id.
  535. *
  536. * Return: QDF_STATUS code
  537. */
  538. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  539. uint16_t hw_peer_id, uint8_t vdev_id,
  540. uint8_t *peer_mac_addr, uint16_t ast_hash,
  541. uint8_t is_wds);
  542. /**
  543. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  544. * @soc: generic soc handle
  545. * @peer_id: peer_id from firmware
  546. * @vdev_id: vdev ID
  547. * @peer_mac_addr: mac address of the peer or wds entry
  548. * @is_wds: flag to indicate peer map event for WDS ast entry
  549. * @free_wds_count: number of wds entries freed by FW with peer delete
  550. *
  551. * Return: none
  552. */
  553. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  554. uint8_t vdev_id, uint8_t *peer_mac_addr,
  555. uint8_t is_wds, uint32_t free_wds_count);
  556. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  557. /**
  558. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  559. * @soc: dp soc pointer
  560. * @vdev_id: vdev id
  561. * @peer_mac_addr: mac address of the peer
  562. *
  563. * This function resets the roamed peer auth status and mac address
  564. * after peer map indication of same peer is received from firmware.
  565. *
  566. * Return: None
  567. */
  568. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  569. uint8_t *peer_mac_addr);
  570. #else
  571. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  572. uint8_t *peer_mac_addr)
  573. {
  574. }
  575. #endif
  576. #ifdef WLAN_FEATURE_11BE_MLO
  577. /**
  578. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  579. * @soc: generic soc handle
  580. * @peer_id: ML peer_id from firmware
  581. * @peer_mac_addr: mac address of the peer
  582. * @mlo_flow_info: MLO AST flow info
  583. * @mlo_link_info: MLO link info
  584. *
  585. * associate the ML peer_id that firmware provided with peer entry
  586. * and update the ast table in the host with the hw_peer_id.
  587. *
  588. * Return: QDF_STATUS code
  589. */
  590. QDF_STATUS
  591. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  592. uint8_t *peer_mac_addr,
  593. struct dp_mlo_flow_override_info *mlo_flow_info,
  594. struct dp_mlo_link_info *mlo_link_info);
  595. /**
  596. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  597. * @soc: generic soc handle
  598. * @peer_id: peer_id from firmware
  599. *
  600. * Return: none
  601. */
  602. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  603. #endif
  604. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  605. enum cdp_sec_type sec_type, int is_unicast,
  606. u_int32_t *michael_key, u_int32_t *rx_pn);
  607. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  608. uint16_t peer_id, uint8_t *peer_mac);
  609. /**
  610. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  611. * @soc: SoC handle
  612. * @peer: peer to which ast node belongs
  613. * @mac_addr: MAC address of ast node
  614. * @type: AST entry type
  615. * @flags: AST configuration flags
  616. *
  617. * This API is used by WDS source port learning function to
  618. * add a new AST entry into peer AST list
  619. *
  620. * Return: QDF_STATUS code
  621. */
  622. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  623. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  624. uint32_t flags);
  625. /**
  626. * dp_peer_del_ast() - Delete and free AST entry
  627. * @soc: SoC handle
  628. * @ast_entry: AST entry of the node
  629. *
  630. * This function removes the AST entry from peer and soc tables
  631. * It assumes caller has taken the ast lock to protect the access to these
  632. * tables
  633. *
  634. * Return: None
  635. */
  636. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  637. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  638. struct dp_ast_entry *ast_entry);
  639. /**
  640. * dp_peer_update_ast() - Delete and free AST entry
  641. * @soc: SoC handle
  642. * @peer: peer to which ast node belongs
  643. * @ast_entry: AST entry of the node
  644. * @flags: wds or hmwds
  645. *
  646. * This function update the AST entry to the roamed peer and soc tables
  647. * It assumes caller has taken the ast lock to protect the access to these
  648. * tables
  649. *
  650. * Return: 0 if ast entry is updated successfully
  651. * -1 failure
  652. */
  653. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  654. struct dp_ast_entry *ast_entry, uint32_t flags);
  655. /**
  656. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  657. * @soc: SoC handle
  658. * @ast_mac_addr: Mac address
  659. * @pdev_id: pdev Id
  660. *
  661. * It assumes caller has taken the ast lock to protect the access to
  662. * AST hash table
  663. *
  664. * Return: AST entry
  665. */
  666. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  667. uint8_t *ast_mac_addr,
  668. uint8_t pdev_id);
  669. /**
  670. * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
  671. * @soc: SoC handle
  672. * @ast_mac_addr: Mac address
  673. * @vdev_id: vdev Id
  674. *
  675. * It assumes caller has taken the ast lock to protect the access to
  676. * AST hash table
  677. *
  678. * Return: AST entry
  679. */
  680. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  681. uint8_t *ast_mac_addr,
  682. uint8_t vdev_id);
  683. /**
  684. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  685. * @soc: SoC handle
  686. * @ast_mac_addr: Mac address
  687. *
  688. * It assumes caller has taken the ast lock to protect the access to
  689. * AST hash table
  690. *
  691. * Return: AST entry
  692. */
  693. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  694. uint8_t *ast_mac_addr);
  695. /**
  696. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  697. * @soc: SoC handle
  698. * @ast_entry: AST entry of the node
  699. *
  700. * This function gets the pdev_id from the ast entry.
  701. *
  702. * Return: (uint8_t) pdev_id
  703. */
  704. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  705. struct dp_ast_entry *ast_entry);
  706. /**
  707. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  708. * @soc: SoC handle
  709. * @ast_entry: AST entry of the node
  710. *
  711. * This function gets the next hop from the ast entry.
  712. *
  713. * Return: (uint8_t) next_hop
  714. */
  715. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  716. struct dp_ast_entry *ast_entry);
  717. /**
  718. * dp_peer_ast_set_type() - set type from the ast entry
  719. * @soc: SoC handle
  720. * @ast_entry: AST entry of the node
  721. * @type: AST entry type
  722. *
  723. * This function sets the type in the ast entry.
  724. *
  725. * Return:
  726. */
  727. void dp_peer_ast_set_type(struct dp_soc *soc,
  728. struct dp_ast_entry *ast_entry,
  729. enum cdp_txrx_ast_entry_type type);
  730. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  731. struct dp_ast_entry *ast_entry,
  732. struct dp_peer *peer);
  733. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  734. void dp_peer_ast_send_multi_wds_del(
  735. struct dp_soc *soc, uint8_t vdev_id,
  736. struct peer_del_multi_wds_entries *wds_list);
  737. #endif
  738. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  739. struct cdp_soc *dp_soc,
  740. void *cookie,
  741. enum cdp_ast_free_status status);
  742. /**
  743. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  744. * @soc: SoC handle
  745. * @ase: Address search entry
  746. *
  747. * This function removes the AST entry from soc AST hash table
  748. * It assumes caller has taken the ast lock to protect the access to this table
  749. *
  750. * Return: None
  751. */
  752. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  753. struct dp_ast_entry *ase);
  754. /**
  755. * dp_peer_free_ast_entry() - Free up the ast entry memory
  756. * @soc: SoC handle
  757. * @ast_entry: Address search entry
  758. *
  759. * This API is used to free up the memory associated with
  760. * AST entry.
  761. *
  762. * Return: None
  763. */
  764. void dp_peer_free_ast_entry(struct dp_soc *soc,
  765. struct dp_ast_entry *ast_entry);
  766. /**
  767. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  768. * @soc: SoC handle
  769. * @ast_entry: Address search entry
  770. * @peer: peer
  771. *
  772. * This API is used to remove/unlink AST entry from the peer list
  773. * and hash list.
  774. *
  775. * Return: None
  776. */
  777. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  778. struct dp_ast_entry *ast_entry,
  779. struct dp_peer *peer);
  780. /**
  781. * dp_peer_mec_detach_entry() - Detach the MEC entry
  782. * @soc: SoC handle
  783. * @mecentry: MEC entry of the node
  784. * @ptr: pointer to free list
  785. *
  786. * The MEC entry is detached from MEC table and added to free_list
  787. * to free the object outside lock
  788. *
  789. * Return: None
  790. */
  791. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  792. void *ptr);
  793. /**
  794. * dp_peer_mec_free_list() - free the MEC entry from free_list
  795. * @soc: SoC handle
  796. * @ptr: pointer to free list
  797. *
  798. * Return: None
  799. */
  800. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  801. /**
  802. * dp_peer_mec_add_entry()
  803. * @soc: SoC handle
  804. * @vdev: vdev to which mec node belongs
  805. * @mac_addr: MAC address of mec node
  806. *
  807. * This function allocates and adds MEC entry to MEC table.
  808. * It assumes caller has taken the mec lock to protect the access to these
  809. * tables
  810. *
  811. * Return: QDF_STATUS
  812. */
  813. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  814. struct dp_vdev *vdev,
  815. uint8_t *mac_addr);
  816. /**
  817. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
  818. * within pdev
  819. * @soc: SoC handle
  820. * @pdev_id: pdev Id
  821. * @mec_mac_addr: MAC address of mec node
  822. *
  823. * It assumes caller has taken the mec_lock to protect the access to
  824. * MEC hash table
  825. *
  826. * Return: MEC entry
  827. */
  828. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  829. uint8_t pdev_id,
  830. uint8_t *mec_mac_addr);
  831. #define DP_AST_ASSERT(_condition) \
  832. do { \
  833. if (!(_condition)) { \
  834. dp_print_ast_stats(soc);\
  835. QDF_BUG(_condition); \
  836. } \
  837. } while (0)
  838. /**
  839. * dp_peer_update_inactive_time() - Update inactive time for peer
  840. * @pdev: pdev object
  841. * @tag_type: htt_tlv_tag type
  842. * @tag_buf: buf message
  843. */
  844. void
  845. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  846. uint32_t *tag_buf);
  847. #ifndef QCA_MULTIPASS_SUPPORT
  848. static inline
  849. /**
  850. * dp_peer_set_vlan_id() - set vlan_id for this peer
  851. * @cdp_soc: soc handle
  852. * @vdev_id: id of vdev object
  853. * @peer_mac: mac address
  854. * @vlan_id: vlan id for peer
  855. *
  856. * Return: void
  857. */
  858. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  859. uint8_t vdev_id, uint8_t *peer_mac,
  860. uint16_t vlan_id)
  861. {
  862. }
  863. /**
  864. * dp_set_vlan_groupkey() - set vlan map for vdev
  865. * @soc_hdl: pointer to soc
  866. * @vdev_id: id of vdev handle
  867. * @vlan_id: vlan_id
  868. * @group_key: group key for vlan
  869. *
  870. * Return: set success/failure
  871. */
  872. static inline
  873. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  874. uint16_t vlan_id, uint16_t group_key)
  875. {
  876. return QDF_STATUS_SUCCESS;
  877. }
  878. /**
  879. * dp_peer_multipass_list_init() - initialize multipass peer list
  880. * @vdev: pointer to vdev
  881. *
  882. * Return: void
  883. */
  884. static inline
  885. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  886. {
  887. }
  888. /**
  889. * dp_peer_multipass_list_remove() - remove peer from special peer list
  890. * @peer: peer handle
  891. *
  892. * Return: void
  893. */
  894. static inline
  895. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  896. {
  897. }
  898. #else
  899. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  900. uint8_t vdev_id, uint8_t *peer_mac,
  901. uint16_t vlan_id);
  902. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  903. uint16_t vlan_id, uint16_t group_key);
  904. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  905. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  906. #endif
  907. #ifndef QCA_PEER_MULTIQ_SUPPORT
  908. /**
  909. * dp_peer_reset_flowq_map() - reset peer flowq map table
  910. * @peer: dp peer handle
  911. *
  912. * Return: none
  913. */
  914. static inline
  915. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  916. {
  917. }
  918. /**
  919. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  920. * @soc_hdl: generic soc handle
  921. * @is_wds: flag to indicate if peer is wds
  922. * @peer_id: peer_id from htt peer map message
  923. * @peer_mac_addr: mac address of the peer
  924. * @ast_info: ast flow override information from peer map
  925. *
  926. * Return: none
  927. */
  928. static inline
  929. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  930. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  931. struct dp_ast_flow_override_info *ast_info)
  932. {
  933. }
  934. #else
  935. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  936. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  937. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  938. struct dp_ast_flow_override_info *ast_info);
  939. #endif
  940. #ifdef QCA_PEER_EXT_STATS
  941. /**
  942. * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
  943. * @soc: DP SoC context
  944. * @txrx_peer: DP txrx peer context
  945. *
  946. * Allocate the peer delay stats context
  947. *
  948. * Return: QDF_STATUS_SUCCESS if allocation is
  949. * successful
  950. */
  951. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  952. struct dp_txrx_peer *txrx_peer);
  953. /**
  954. * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
  955. * @soc: DP SoC context
  956. * @txrx_peer: txrx DP peer context
  957. *
  958. * Free the peer delay stats context
  959. *
  960. * Return: Void
  961. */
  962. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  963. struct dp_txrx_peer *txrx_peer);
  964. /**
  965. * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
  966. * @txrx_peer: dp_txrx_peer handle
  967. *
  968. * Return: void
  969. */
  970. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  971. #else
  972. static inline
  973. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  974. struct dp_txrx_peer *txrx_peer)
  975. {
  976. return QDF_STATUS_SUCCESS;
  977. }
  978. static inline
  979. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  980. struct dp_txrx_peer *txrx_peer)
  981. {
  982. }
  983. static inline
  984. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  985. {
  986. }
  987. #endif
  988. #ifdef WLAN_PEER_JITTER
  989. /**
  990. * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
  991. * @pdev: Datapath pdev handle
  992. * @txrx_peer: dp_txrx_peer handle
  993. *
  994. * Return: QDF_STATUS
  995. */
  996. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  997. struct dp_txrx_peer *txrx_peer);
  998. /**
  999. * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
  1000. * @pdev: Datapath pdev handle
  1001. * @txrx_peer: dp_txrx_peer handle
  1002. *
  1003. * Return: void
  1004. */
  1005. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1006. struct dp_txrx_peer *txrx_peer);
  1007. /**
  1008. * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
  1009. * @txrx_peer: dp_txrx_peer handle
  1010. *
  1011. * Return: void
  1012. */
  1013. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1014. #else
  1015. static inline
  1016. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1017. struct dp_txrx_peer *txrx_peer)
  1018. {
  1019. return QDF_STATUS_SUCCESS;
  1020. }
  1021. static inline
  1022. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1023. struct dp_txrx_peer *txrx_peer)
  1024. {
  1025. }
  1026. static inline
  1027. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1028. {
  1029. }
  1030. #endif
  1031. #ifndef CONFIG_SAWF_DEF_QUEUES
  1032. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  1033. struct dp_peer *peer)
  1034. {
  1035. return QDF_STATUS_SUCCESS;
  1036. }
  1037. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  1038. struct dp_peer *peer)
  1039. {
  1040. return QDF_STATUS_SUCCESS;
  1041. }
  1042. #endif
  1043. #ifndef CONFIG_SAWF
  1044. static inline
  1045. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  1046. struct dp_txrx_peer *txrx_peer)
  1047. {
  1048. return QDF_STATUS_SUCCESS;
  1049. }
  1050. static inline
  1051. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  1052. struct dp_txrx_peer *txrx_peer)
  1053. {
  1054. return QDF_STATUS_SUCCESS;
  1055. }
  1056. #endif
  1057. /**
  1058. * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
  1059. * @soc: DP soc
  1060. * @vdev: vdev
  1061. * @mod_id: id of module requesting reference
  1062. *
  1063. * Return: VDEV BSS peer
  1064. */
  1065. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  1066. struct dp_vdev *vdev,
  1067. enum dp_mod_id mod_id);
  1068. /**
  1069. * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
  1070. * @soc: DP soc
  1071. * @vdev: vdev
  1072. * @mod_id: id of module requesting reference
  1073. *
  1074. * Return: VDEV self peer
  1075. */
  1076. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  1077. struct dp_vdev *vdev,
  1078. enum dp_mod_id mod_id);
  1079. void dp_peer_ast_table_detach(struct dp_soc *soc);
  1080. /**
  1081. * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
  1082. * @soc: soc handle
  1083. *
  1084. * Return: none
  1085. */
  1086. void dp_peer_find_map_detach(struct dp_soc *soc);
  1087. void dp_soc_wds_detach(struct dp_soc *soc);
  1088. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  1089. /**
  1090. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  1091. * @soc: SoC handle
  1092. *
  1093. * Return: QDF_STATUS
  1094. */
  1095. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  1096. /**
  1097. * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
  1098. * @soc: SoC handle
  1099. *
  1100. * Return: QDF_STATUS
  1101. */
  1102. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  1103. /**
  1104. * dp_del_wds_entry_wrapper() - delete a WDS AST entry
  1105. * @soc: DP soc structure pointer
  1106. * @vdev_id: vdev_id
  1107. * @wds_macaddr: MAC address of ast node
  1108. * @type: type from enum cdp_txrx_ast_entry_type
  1109. * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
  1110. *
  1111. * This API is used to delete an AST entry from fw
  1112. *
  1113. * Return: None
  1114. */
  1115. void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
  1116. uint8_t *wds_macaddr, uint8_t type,
  1117. uint8_t delete_in_fw);
  1118. void dp_soc_wds_attach(struct dp_soc *soc);
  1119. /**
  1120. * dp_peer_mec_hash_detach() - Free MEC Hash table
  1121. * @soc: SoC handle
  1122. *
  1123. * Return: None
  1124. */
  1125. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  1126. /**
  1127. * dp_peer_ast_hash_detach() - Free AST Hash table
  1128. * @soc: SoC handle
  1129. *
  1130. * Return: None
  1131. */
  1132. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  1133. #ifdef FEATURE_AST
  1134. /**
  1135. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  1136. * @soc: datapath soc handle
  1137. * @peer: datapath peer handle
  1138. *
  1139. * Delete the AST entries belonging to a peer
  1140. */
  1141. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1142. struct dp_peer *peer)
  1143. {
  1144. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  1145. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  1146. /*
  1147. * Delete peer self ast entry. This is done to handle scenarios
  1148. * where peer is freed before peer map is received(for ex in case
  1149. * of auth disallow due to ACL) in such cases self ast is not added
  1150. * to peer->ast_list.
  1151. */
  1152. if (peer->self_ast_entry) {
  1153. dp_peer_del_ast(soc, peer->self_ast_entry);
  1154. peer->self_ast_entry = NULL;
  1155. }
  1156. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  1157. dp_peer_del_ast(soc, ast_entry);
  1158. }
  1159. /**
  1160. * dp_print_peer_ast_entries() - Dump AST entries of peer
  1161. * @soc: Datapath soc handle
  1162. * @peer: Datapath peer
  1163. * @arg: argument to iterate function
  1164. *
  1165. * Return: void
  1166. */
  1167. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  1168. void *arg);
  1169. #else
  1170. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  1171. struct dp_peer *peer, void *arg)
  1172. {
  1173. }
  1174. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1175. struct dp_peer *peer)
  1176. {
  1177. }
  1178. #endif
  1179. #ifdef FEATURE_MEC
  1180. /**
  1181. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  1182. * @soc: SoC handle
  1183. *
  1184. * Return: none
  1185. */
  1186. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  1187. /**
  1188. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  1189. * @soc: SoC handle
  1190. *
  1191. * Return: none
  1192. */
  1193. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  1194. /**
  1195. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  1196. * @soc: Datapath SOC
  1197. *
  1198. * Return: None
  1199. */
  1200. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  1201. #else
  1202. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  1203. {
  1204. }
  1205. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  1206. {
  1207. }
  1208. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  1209. {
  1210. }
  1211. #endif
  1212. static inline int dp_peer_find_mac_addr_cmp(
  1213. union dp_align_mac_addr *mac_addr1,
  1214. union dp_align_mac_addr *mac_addr2)
  1215. {
  1216. /*
  1217. * Intentionally use & rather than &&.
  1218. * because the operands are binary rather than generic boolean,
  1219. * the functionality is equivalent.
  1220. * Using && has the advantage of short-circuited evaluation,
  1221. * but using & has the advantage of no conditional branching,
  1222. * which is a more significant benefit.
  1223. */
  1224. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1225. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1226. }
  1227. /**
  1228. * dp_peer_delete() - delete DP peer
  1229. *
  1230. * @soc: Datatpath soc
  1231. * @peer: Datapath peer
  1232. * @arg: argument to iter function
  1233. *
  1234. * Return: void
  1235. */
  1236. void dp_peer_delete(struct dp_soc *soc,
  1237. struct dp_peer *peer,
  1238. void *arg);
  1239. /**
  1240. * dp_mlo_peer_delete() - delete MLO DP peer
  1241. *
  1242. * @soc: Datapath soc
  1243. * @peer: Datapath peer
  1244. * @arg: argument to iter function
  1245. *
  1246. * Return: void
  1247. */
  1248. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  1249. #ifdef WLAN_FEATURE_11BE_MLO
  1250. /* is MLO connection mld peer */
  1251. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1252. /* set peer type */
  1253. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1254. ((_peer)->peer_type = (_type_val))
  1255. /* is legacy peer */
  1256. #define IS_DP_LEGACY_PEER(_peer) \
  1257. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1258. /* is MLO connection link peer */
  1259. #define IS_MLO_DP_LINK_PEER(_peer) \
  1260. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1261. /* is MLO connection mld peer */
  1262. #define IS_MLO_DP_MLD_PEER(_peer) \
  1263. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1264. /* Get Mld peer from link peer */
  1265. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1266. ((link_peer)->mld_peer)
  1267. #ifdef WLAN_MLO_MULTI_CHIP
  1268. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1269. struct dp_peer *
  1270. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1271. uint8_t *peer_mac_addr,
  1272. int mac_addr_is_aligned,
  1273. uint8_t vdev_id,
  1274. uint8_t chip_id,
  1275. enum dp_mod_id mod_id);
  1276. #else
  1277. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1278. {
  1279. return 0;
  1280. }
  1281. static inline struct dp_peer *
  1282. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1283. uint8_t *peer_mac_addr,
  1284. int mac_addr_is_aligned,
  1285. uint8_t vdev_id,
  1286. uint8_t chip_id,
  1287. enum dp_mod_id mod_id)
  1288. {
  1289. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1290. mac_addr_is_aligned,
  1291. vdev_id, mod_id);
  1292. }
  1293. #endif
  1294. /**
  1295. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1296. * matching mac_address
  1297. * @soc: soc handle
  1298. * @peer_mac_addr: mld peer mac address
  1299. * @mac_addr_is_aligned: is mac addr aligned
  1300. * @vdev_id: vdev_id
  1301. * @mod_id: id of module requesting reference
  1302. *
  1303. * Return: peer in success
  1304. * NULL in failure
  1305. */
  1306. static inline
  1307. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1308. uint8_t *peer_mac_addr,
  1309. int mac_addr_is_aligned,
  1310. uint8_t vdev_id,
  1311. enum dp_mod_id mod_id)
  1312. {
  1313. if (soc->arch_ops.mlo_peer_find_hash_find)
  1314. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1315. peer_mac_addr,
  1316. mac_addr_is_aligned,
  1317. mod_id, vdev_id);
  1318. return NULL;
  1319. }
  1320. /**
  1321. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1322. * peer_type
  1323. * @soc: DP SOC handle
  1324. * @peer_info: peer information for hash find
  1325. * @mod_id: ID of module requesting reference
  1326. *
  1327. * Return: peer handle
  1328. */
  1329. static inline
  1330. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1331. struct cdp_peer_info *peer_info,
  1332. enum dp_mod_id mod_id)
  1333. {
  1334. struct dp_peer *peer = NULL;
  1335. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1336. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1337. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1338. peer_info->mac_addr_is_aligned,
  1339. peer_info->vdev_id,
  1340. mod_id);
  1341. if (peer)
  1342. return peer;
  1343. }
  1344. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1345. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1346. peer = dp_mld_peer_find_hash_find(
  1347. soc, peer_info->mac_addr,
  1348. peer_info->mac_addr_is_aligned,
  1349. peer_info->vdev_id,
  1350. mod_id);
  1351. return peer;
  1352. }
  1353. /**
  1354. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1355. * increase mld peer ref_cnt
  1356. * @link_peer: link peer pointer
  1357. * @mld_peer: mld peer pointer
  1358. *
  1359. * Return: none
  1360. */
  1361. static inline
  1362. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1363. struct dp_peer *mld_peer)
  1364. {
  1365. /* increase mld_peer ref_cnt */
  1366. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1367. link_peer->mld_peer = mld_peer;
  1368. }
  1369. /**
  1370. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1371. * decrease mld peer ref_cnt
  1372. * @link_peer: link peer pointer
  1373. *
  1374. * Return: None
  1375. */
  1376. static inline
  1377. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1378. {
  1379. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1380. link_peer->mld_peer = NULL;
  1381. }
  1382. /**
  1383. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1384. * @mld_peer: mld peer pointer
  1385. *
  1386. * Return: None
  1387. */
  1388. static inline
  1389. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1390. {
  1391. int i;
  1392. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1393. mld_peer->num_links = 0;
  1394. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1395. mld_peer->link_peers[i].is_valid = false;
  1396. }
  1397. /**
  1398. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1399. * @mld_peer: mld peer pointer
  1400. *
  1401. * Return: None
  1402. */
  1403. static inline
  1404. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1405. {
  1406. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1407. }
  1408. /**
  1409. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1410. * @mld_peer: mld dp peer pointer
  1411. * @link_peer: link dp peer pointer
  1412. *
  1413. * Return: None
  1414. */
  1415. static inline
  1416. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1417. struct dp_peer *link_peer)
  1418. {
  1419. int i;
  1420. struct dp_peer_link_info *link_peer_info;
  1421. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1422. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1423. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1424. link_peer_info = &mld_peer->link_peers[i];
  1425. if (!link_peer_info->is_valid) {
  1426. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1427. link_peer->mac_addr.raw,
  1428. QDF_MAC_ADDR_SIZE);
  1429. link_peer_info->is_valid = true;
  1430. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1431. link_peer_info->chip_id =
  1432. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1433. mld_peer->num_links++;
  1434. break;
  1435. }
  1436. }
  1437. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1438. dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1439. "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1440. "idx %u num_links %u",
  1441. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1442. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1443. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1444. i, mld_peer->num_links);
  1445. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
  1446. mld_peer, link_peer, i,
  1447. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1448. }
  1449. /**
  1450. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1451. * @mld_peer: MLD dp peer pointer
  1452. * @link_peer: link dp peer pointer
  1453. *
  1454. * Return: number of links left after deletion
  1455. */
  1456. static inline
  1457. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1458. struct dp_peer *link_peer)
  1459. {
  1460. int i;
  1461. struct dp_peer_link_info *link_peer_info;
  1462. uint8_t num_links;
  1463. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1464. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1465. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1466. link_peer_info = &mld_peer->link_peers[i];
  1467. if (link_peer_info->is_valid &&
  1468. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1469. &link_peer_info->mac_addr)) {
  1470. link_peer_info->is_valid = false;
  1471. mld_peer->num_links--;
  1472. break;
  1473. }
  1474. }
  1475. num_links = mld_peer->num_links;
  1476. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1477. dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1478. "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1479. "idx %u num_links %u",
  1480. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1481. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1482. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1483. i, mld_peer->num_links);
  1484. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
  1485. mld_peer, link_peer, i,
  1486. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1487. return num_links;
  1488. }
  1489. /**
  1490. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1491. * increase link peers ref_cnt
  1492. * @soc: dp_soc handle
  1493. * @mld_peer: dp mld peer pointer
  1494. * @mld_link_peers: structure that hold links peers pointer array and number
  1495. * @mod_id: id of module requesting reference
  1496. *
  1497. * Return: None
  1498. */
  1499. static inline
  1500. void dp_get_link_peers_ref_from_mld_peer(
  1501. struct dp_soc *soc,
  1502. struct dp_peer *mld_peer,
  1503. struct dp_mld_link_peers *mld_link_peers,
  1504. enum dp_mod_id mod_id)
  1505. {
  1506. struct dp_peer *peer;
  1507. uint8_t i = 0, j = 0;
  1508. struct dp_peer_link_info *link_peer_info;
  1509. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1510. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1511. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1512. link_peer_info = &mld_peer->link_peers[i];
  1513. if (link_peer_info->is_valid) {
  1514. peer = dp_link_peer_hash_find_by_chip_id(
  1515. soc,
  1516. link_peer_info->mac_addr.raw,
  1517. true,
  1518. link_peer_info->vdev_id,
  1519. link_peer_info->chip_id,
  1520. mod_id);
  1521. if (peer)
  1522. mld_link_peers->link_peers[j++] = peer;
  1523. }
  1524. }
  1525. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1526. mld_link_peers->num_links = j;
  1527. }
  1528. /**
  1529. * dp_release_link_peers_ref() - release all link peers reference
  1530. * @mld_link_peers: structure that hold links peers pointer array and number
  1531. * @mod_id: id of module requesting reference
  1532. *
  1533. * Return: None.
  1534. */
  1535. static inline
  1536. void dp_release_link_peers_ref(
  1537. struct dp_mld_link_peers *mld_link_peers,
  1538. enum dp_mod_id mod_id)
  1539. {
  1540. struct dp_peer *peer;
  1541. uint8_t i;
  1542. for (i = 0; i < mld_link_peers->num_links; i++) {
  1543. peer = mld_link_peers->link_peers[i];
  1544. if (peer)
  1545. dp_peer_unref_delete(peer, mod_id);
  1546. mld_link_peers->link_peers[i] = NULL;
  1547. }
  1548. mld_link_peers->num_links = 0;
  1549. }
  1550. /**
  1551. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1552. * @soc: Datapath soc handle
  1553. * @peer_id: peer id
  1554. * @lmac_id: lmac id to find the link peer on given lmac
  1555. *
  1556. * Return: peer_id of link peer if found
  1557. * else return HTT_INVALID_PEER
  1558. */
  1559. static inline
  1560. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1561. uint8_t lmac_id)
  1562. {
  1563. uint8_t i;
  1564. struct dp_peer *peer;
  1565. struct dp_peer *link_peer;
  1566. struct dp_soc *link_peer_soc;
  1567. struct dp_mld_link_peers link_peers_info;
  1568. uint16_t link_peer_id = HTT_INVALID_PEER;
  1569. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1570. if (!peer)
  1571. return HTT_INVALID_PEER;
  1572. if (IS_MLO_DP_MLD_PEER(peer)) {
  1573. /* get link peers with reference */
  1574. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1575. DP_MOD_ID_CDP);
  1576. for (i = 0; i < link_peers_info.num_links; i++) {
  1577. link_peer = link_peers_info.link_peers[i];
  1578. link_peer_soc = link_peer->vdev->pdev->soc;
  1579. if ((link_peer_soc == soc) &&
  1580. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1581. link_peer_id = link_peer->peer_id;
  1582. break;
  1583. }
  1584. }
  1585. /* release link peers reference */
  1586. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1587. } else {
  1588. link_peer_id = peer_id;
  1589. }
  1590. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1591. return link_peer_id;
  1592. }
  1593. /**
  1594. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1595. * @soc: soc handle
  1596. * @peer_mac: peer mac address
  1597. * @mac_addr_is_aligned: is mac addr aligned
  1598. * @vdev_id: vdev_id
  1599. * @mod_id: id of module requesting reference
  1600. *
  1601. * for MLO connection, get corresponding MLD peer,
  1602. * otherwise get link peer for non-MLO case.
  1603. *
  1604. * Return: peer in success
  1605. * NULL in failure
  1606. */
  1607. static inline
  1608. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1609. uint8_t *peer_mac,
  1610. int mac_addr_is_aligned,
  1611. uint8_t vdev_id,
  1612. enum dp_mod_id mod_id)
  1613. {
  1614. struct dp_peer *ta_peer = NULL;
  1615. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1616. peer_mac, 0, vdev_id,
  1617. mod_id);
  1618. if (peer) {
  1619. /* mlo connection link peer, get mld peer with reference */
  1620. if (IS_MLO_DP_LINK_PEER(peer)) {
  1621. /* increase mld peer ref_cnt */
  1622. if (QDF_STATUS_SUCCESS ==
  1623. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1624. ta_peer = peer->mld_peer;
  1625. else
  1626. ta_peer = NULL;
  1627. /* release peer reference that added by hash find */
  1628. dp_peer_unref_delete(peer, mod_id);
  1629. } else {
  1630. /* mlo MLD peer or non-mlo link peer */
  1631. ta_peer = peer;
  1632. }
  1633. } else {
  1634. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1635. QDF_MAC_ADDR_REF(peer_mac));
  1636. }
  1637. return ta_peer;
  1638. }
  1639. /**
  1640. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1641. * @soc: core DP soc context
  1642. * @peer_id: peer id from peer object can be retrieved
  1643. * @mod_id: ID of module requesting reference
  1644. *
  1645. * for MLO connection, get corresponding MLD peer,
  1646. * otherwise get link peer for non-MLO case.
  1647. *
  1648. * Return: peer in success
  1649. * NULL in failure
  1650. */
  1651. static inline
  1652. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1653. uint16_t peer_id,
  1654. enum dp_mod_id mod_id)
  1655. {
  1656. struct dp_peer *ta_peer = NULL;
  1657. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1658. if (peer) {
  1659. /* mlo connection link peer, get mld peer with reference */
  1660. if (IS_MLO_DP_LINK_PEER(peer)) {
  1661. /* increase mld peer ref_cnt */
  1662. if (QDF_STATUS_SUCCESS ==
  1663. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1664. ta_peer = peer->mld_peer;
  1665. else
  1666. ta_peer = NULL;
  1667. /* release peer reference that added by hash find */
  1668. dp_peer_unref_delete(peer, mod_id);
  1669. } else {
  1670. /* mlo MLD peer or non-mlo link peer */
  1671. ta_peer = peer;
  1672. }
  1673. }
  1674. return ta_peer;
  1675. }
  1676. /**
  1677. * dp_peer_mlo_delete() - peer MLO related delete operation
  1678. * @peer: DP peer handle
  1679. * Return: None
  1680. */
  1681. static inline
  1682. void dp_peer_mlo_delete(struct dp_peer *peer)
  1683. {
  1684. struct dp_peer *ml_peer;
  1685. struct dp_soc *soc;
  1686. dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
  1687. QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
  1688. /* MLO connection link peer */
  1689. if (IS_MLO_DP_LINK_PEER(peer)) {
  1690. ml_peer = peer->mld_peer;
  1691. soc = ml_peer->vdev->pdev->soc;
  1692. /* if last link peer deletion, delete MLD peer */
  1693. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1694. dp_peer_delete(soc, peer->mld_peer, NULL);
  1695. }
  1696. }
  1697. /**
  1698. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1699. * @soc: Soc handle
  1700. * @peer: DP peer handle
  1701. * @vdev_id: Vdev ID
  1702. * @setup_info: peer setup information for MLO
  1703. */
  1704. QDF_STATUS dp_peer_mlo_setup(
  1705. struct dp_soc *soc,
  1706. struct dp_peer *peer,
  1707. uint8_t vdev_id,
  1708. struct cdp_peer_setup_info *setup_info);
  1709. /**
  1710. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1711. * @peer: datapath peer
  1712. *
  1713. * Return: MLD peer in case of MLO Link peer
  1714. * Peer itself in other cases
  1715. */
  1716. static inline
  1717. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1718. {
  1719. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1720. }
  1721. /**
  1722. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1723. * peer id
  1724. * @soc: core DP soc context
  1725. * @peer_id: peer id
  1726. * @mod_id: ID of module requesting reference
  1727. *
  1728. * Return: primary link peer for the MLO peer
  1729. * legacy peer itself in case of legacy peer
  1730. */
  1731. static inline
  1732. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1733. uint16_t peer_id,
  1734. enum dp_mod_id mod_id)
  1735. {
  1736. uint8_t i;
  1737. struct dp_mld_link_peers link_peers_info;
  1738. struct dp_peer *peer;
  1739. struct dp_peer *link_peer;
  1740. struct dp_peer *primary_peer = NULL;
  1741. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1742. if (!peer)
  1743. return NULL;
  1744. if (IS_MLO_DP_MLD_PEER(peer)) {
  1745. /* get link peers with reference */
  1746. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1747. mod_id);
  1748. for (i = 0; i < link_peers_info.num_links; i++) {
  1749. link_peer = link_peers_info.link_peers[i];
  1750. if (link_peer->primary_link) {
  1751. primary_peer = link_peer;
  1752. /*
  1753. * Take additional reference over
  1754. * primary link peer.
  1755. */
  1756. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1757. break;
  1758. }
  1759. }
  1760. /* release link peers reference */
  1761. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1762. dp_peer_unref_delete(peer, mod_id);
  1763. } else {
  1764. primary_peer = peer;
  1765. }
  1766. return primary_peer;
  1767. }
  1768. /**
  1769. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1770. * @peer: Datapath peer
  1771. *
  1772. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1773. * dp_txrx_peer from peer itself for other cases
  1774. */
  1775. static inline
  1776. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1777. {
  1778. return IS_MLO_DP_LINK_PEER(peer) ?
  1779. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1780. }
  1781. /**
  1782. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1783. * @peer: Datapath peer
  1784. *
  1785. * Return: true if peer is primary link peer or legacy peer
  1786. * false otherwise
  1787. */
  1788. static inline
  1789. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1790. {
  1791. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1792. return true;
  1793. else if (IS_DP_LEGACY_PEER(peer))
  1794. return true;
  1795. else
  1796. return false;
  1797. }
  1798. /**
  1799. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1800. *
  1801. * @soc: core DP soc context
  1802. * @peer_id: peer id from peer object can be retrieved
  1803. * @handle: reference handle
  1804. * @mod_id: ID of module requesting reference
  1805. *
  1806. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1807. */
  1808. static inline struct dp_txrx_peer *
  1809. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1810. uint16_t peer_id,
  1811. dp_txrx_ref_handle *handle,
  1812. enum dp_mod_id mod_id)
  1813. {
  1814. struct dp_peer *peer;
  1815. struct dp_txrx_peer *txrx_peer;
  1816. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1817. if (!peer)
  1818. return NULL;
  1819. txrx_peer = dp_get_txrx_peer(peer);
  1820. if (txrx_peer) {
  1821. *handle = (dp_txrx_ref_handle)peer;
  1822. return txrx_peer;
  1823. }
  1824. dp_peer_unref_delete(peer, mod_id);
  1825. return NULL;
  1826. }
  1827. /**
  1828. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1829. *
  1830. * @soc: core DP soc context
  1831. *
  1832. * Return: void
  1833. */
  1834. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1835. #else
  1836. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1837. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1838. /* is legacy peer */
  1839. #define IS_DP_LEGACY_PEER(_peer) true
  1840. #define IS_MLO_DP_LINK_PEER(_peer) false
  1841. #define IS_MLO_DP_MLD_PEER(_peer) false
  1842. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1843. static inline
  1844. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1845. struct cdp_peer_info *peer_info,
  1846. enum dp_mod_id mod_id)
  1847. {
  1848. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1849. peer_info->mac_addr_is_aligned,
  1850. peer_info->vdev_id,
  1851. mod_id);
  1852. }
  1853. static inline
  1854. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1855. uint8_t *peer_mac,
  1856. int mac_addr_is_aligned,
  1857. uint8_t vdev_id,
  1858. enum dp_mod_id mod_id)
  1859. {
  1860. return dp_peer_find_hash_find(soc, peer_mac,
  1861. mac_addr_is_aligned, vdev_id,
  1862. mod_id);
  1863. }
  1864. static inline
  1865. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1866. uint16_t peer_id,
  1867. enum dp_mod_id mod_id)
  1868. {
  1869. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1870. }
  1871. static inline
  1872. QDF_STATUS dp_peer_mlo_setup(
  1873. struct dp_soc *soc,
  1874. struct dp_peer *peer,
  1875. uint8_t vdev_id,
  1876. struct cdp_peer_setup_info *setup_info)
  1877. {
  1878. return QDF_STATUS_SUCCESS;
  1879. }
  1880. static inline
  1881. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1882. {
  1883. }
  1884. static inline
  1885. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1886. {
  1887. }
  1888. static inline
  1889. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1890. {
  1891. }
  1892. static inline
  1893. void dp_peer_mlo_delete(struct dp_peer *peer)
  1894. {
  1895. }
  1896. static inline
  1897. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1898. struct dp_peer *link_peer)
  1899. {
  1900. }
  1901. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1902. {
  1903. return 0;
  1904. }
  1905. static inline struct dp_peer *
  1906. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1907. uint8_t *peer_mac_addr,
  1908. int mac_addr_is_aligned,
  1909. uint8_t vdev_id,
  1910. uint8_t chip_id,
  1911. enum dp_mod_id mod_id)
  1912. {
  1913. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1914. mac_addr_is_aligned,
  1915. vdev_id, mod_id);
  1916. }
  1917. static inline
  1918. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1919. {
  1920. return peer;
  1921. }
  1922. static inline
  1923. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1924. uint16_t peer_id,
  1925. enum dp_mod_id mod_id)
  1926. {
  1927. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1928. }
  1929. static inline
  1930. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1931. {
  1932. return peer->txrx_peer;
  1933. }
  1934. static inline
  1935. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1936. {
  1937. return true;
  1938. }
  1939. /**
  1940. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1941. *
  1942. * @soc: core DP soc context
  1943. * @peer_id: peer id from peer object can be retrieved
  1944. * @handle: reference handle
  1945. * @mod_id: ID of module requesting reference
  1946. *
  1947. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1948. */
  1949. static inline struct dp_txrx_peer *
  1950. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1951. uint16_t peer_id,
  1952. dp_txrx_ref_handle *handle,
  1953. enum dp_mod_id mod_id)
  1954. {
  1955. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1956. }
  1957. static inline
  1958. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1959. uint8_t lmac_id)
  1960. {
  1961. return peer_id;
  1962. }
  1963. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  1964. {
  1965. }
  1966. #endif /* WLAN_FEATURE_11BE_MLO */
  1967. static inline
  1968. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1969. {
  1970. uint8_t i;
  1971. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1972. sizeof(struct dp_rx_tid_defrag));
  1973. for (i = 0; i < DP_MAX_TIDS; i++)
  1974. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1975. }
  1976. static inline
  1977. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1978. {
  1979. uint8_t i;
  1980. for (i = 0; i < DP_MAX_TIDS; i++)
  1981. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1982. }
  1983. #ifdef PEER_CACHE_RX_PKTS
  1984. static inline
  1985. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1986. {
  1987. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  1988. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  1989. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  1990. DP_RX_CACHED_BUFQ_THRESH);
  1991. }
  1992. static inline
  1993. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1994. {
  1995. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  1996. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  1997. }
  1998. #else
  1999. static inline
  2000. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2001. {
  2002. }
  2003. static inline
  2004. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2005. {
  2006. }
  2007. #endif
  2008. /**
  2009. * dp_peer_update_state() - update dp peer state
  2010. *
  2011. * @soc: core DP soc context
  2012. * @peer: DP peer
  2013. * @state: new state
  2014. *
  2015. * Return: None
  2016. */
  2017. static inline void
  2018. dp_peer_update_state(struct dp_soc *soc,
  2019. struct dp_peer *peer,
  2020. enum dp_peer_state state)
  2021. {
  2022. uint8_t peer_state;
  2023. qdf_spin_lock_bh(&peer->peer_state_lock);
  2024. peer_state = peer->peer_state;
  2025. switch (state) {
  2026. case DP_PEER_STATE_INIT:
  2027. DP_PEER_STATE_ASSERT
  2028. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  2029. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  2030. break;
  2031. case DP_PEER_STATE_ACTIVE:
  2032. DP_PEER_STATE_ASSERT(peer, state,
  2033. (peer_state == DP_PEER_STATE_INIT));
  2034. break;
  2035. case DP_PEER_STATE_LOGICAL_DELETE:
  2036. DP_PEER_STATE_ASSERT(peer, state,
  2037. (peer_state == DP_PEER_STATE_ACTIVE) ||
  2038. (peer_state == DP_PEER_STATE_INIT));
  2039. break;
  2040. case DP_PEER_STATE_INACTIVE:
  2041. if (IS_MLO_DP_MLD_PEER(peer))
  2042. DP_PEER_STATE_ASSERT
  2043. (peer, state,
  2044. (peer_state == DP_PEER_STATE_ACTIVE));
  2045. else
  2046. DP_PEER_STATE_ASSERT
  2047. (peer, state,
  2048. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2049. break;
  2050. case DP_PEER_STATE_FREED:
  2051. if (peer->sta_self_peer)
  2052. DP_PEER_STATE_ASSERT
  2053. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  2054. else
  2055. DP_PEER_STATE_ASSERT
  2056. (peer, state,
  2057. (peer_state == DP_PEER_STATE_INACTIVE) ||
  2058. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2059. break;
  2060. default:
  2061. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2062. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  2063. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2064. return;
  2065. }
  2066. peer->peer_state = state;
  2067. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2068. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  2069. peer_state, state,
  2070. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2071. }
  2072. /**
  2073. * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer
  2074. * list based on type of peer (Legacy or MLD peer)
  2075. *
  2076. * @vdev: DP vdev context
  2077. * @func: function to be called for each peer
  2078. * @arg: argument need to be passed to func
  2079. * @mod_id: module_id
  2080. * @peer_type: type of peer - MLO Link Peer or Legacy Peer
  2081. *
  2082. * Return: void
  2083. */
  2084. static inline void
  2085. dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev,
  2086. dp_peer_iter_func *func,
  2087. void *arg, enum dp_mod_id mod_id,
  2088. enum dp_peer_type peer_type)
  2089. {
  2090. struct dp_peer *peer;
  2091. struct dp_peer *tmp_peer;
  2092. struct dp_soc *soc = NULL;
  2093. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  2094. return;
  2095. soc = vdev->pdev->soc;
  2096. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2097. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  2098. peer_list_elem,
  2099. tmp_peer) {
  2100. if (dp_peer_get_ref(soc, peer, mod_id) ==
  2101. QDF_STATUS_SUCCESS) {
  2102. if ((peer_type == DP_PEER_TYPE_LEGACY &&
  2103. (IS_DP_LEGACY_PEER(peer))) ||
  2104. (peer_type == DP_PEER_TYPE_MLO_LINK &&
  2105. (IS_MLO_DP_LINK_PEER(peer)))) {
  2106. (*func)(soc, peer, arg);
  2107. }
  2108. dp_peer_unref_delete(peer, mod_id);
  2109. }
  2110. }
  2111. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2112. }
  2113. #ifdef REO_SHARED_QREF_TABLE_EN
  2114. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2115. struct dp_peer *peer);
  2116. #else
  2117. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2118. struct dp_peer *peer) {}
  2119. #endif
  2120. /**
  2121. * dp_peer_check_wds_ext_peer() - Check WDS ext peer
  2122. *
  2123. * @peer: DP peer
  2124. *
  2125. * Return: True for WDS ext peer, false otherwise
  2126. */
  2127. bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
  2128. /**
  2129. * dp_gen_ml_peer_id() - Generate MLD peer id for DP
  2130. *
  2131. * @soc: DP soc context
  2132. * @peer_id: mld peer id
  2133. *
  2134. * Return: DP MLD peer id
  2135. */
  2136. uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
  2137. #endif /* _DP_PEER_H_ */