dp_peer.h 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #include "dp_internal.h"
  25. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  26. #include "hal_reo.h"
  27. #endif
  28. #define DP_INVALID_PEER_ID 0xffff
  29. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  30. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  31. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  32. #define DP_PEER_HASH_LOAD_MULT 2
  33. #define DP_PEER_HASH_LOAD_SHIFT 0
  34. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  35. #define DP_RX_CACHED_BUFQ_THRESH 64
  36. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  39. #define dp_peer_info(params...) \
  40. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  41. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  42. void check_free_list_for_invalid_flush(struct dp_soc *soc);
  43. static inline
  44. void add_entry_alloc_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid,
  45. struct dp_peer *peer, void *hw_qdesc_vaddr)
  46. {
  47. uint32_t max_list_size;
  48. unsigned long curr_ts = qdf_get_system_timestamp();
  49. uint32_t qref_index = soc->free_addr_list_idx;
  50. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  51. if (max_list_size == 0)
  52. return;
  53. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_paddr =
  54. rx_tid->hw_qdesc_paddr;
  55. soc->list_qdesc_addr_alloc[qref_index].ts_qdesc_mem_hdl = curr_ts;
  56. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_align =
  57. hw_qdesc_vaddr;
  58. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_unalign =
  59. rx_tid->hw_qdesc_vaddr_unaligned;
  60. soc->list_qdesc_addr_alloc[qref_index].peer_id = peer->peer_id;
  61. soc->list_qdesc_addr_alloc[qref_index].tid = rx_tid->tid;
  62. soc->alloc_addr_list_idx++;
  63. if (soc->alloc_addr_list_idx == max_list_size)
  64. soc->alloc_addr_list_idx = 0;
  65. }
  66. static inline
  67. void add_entry_free_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid)
  68. {
  69. uint32_t max_list_size;
  70. unsigned long curr_ts = qdf_get_system_timestamp();
  71. uint32_t qref_index = soc->free_addr_list_idx;
  72. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  73. if (max_list_size == 0)
  74. return;
  75. soc->list_qdesc_addr_free[qref_index].ts_qdesc_mem_hdl = curr_ts;
  76. soc->list_qdesc_addr_free[qref_index].hw_qdesc_paddr =
  77. rx_tid->hw_qdesc_paddr;
  78. soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_align =
  79. rx_tid->hw_qdesc_vaddr_aligned;
  80. soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_unalign =
  81. rx_tid->hw_qdesc_vaddr_unaligned;
  82. soc->free_addr_list_idx++;
  83. if (soc->free_addr_list_idx == max_list_size)
  84. soc->free_addr_list_idx = 0;
  85. }
  86. static inline
  87. void add_entry_write_list(struct dp_soc *soc, struct dp_peer *peer,
  88. uint32_t tid)
  89. {
  90. uint32_t max_list_size;
  91. unsigned long curr_ts = qdf_get_system_timestamp();
  92. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  93. if (max_list_size == 0)
  94. return;
  95. soc->reo_write_list[soc->write_paddr_list_idx].ts_qaddr_del = curr_ts;
  96. soc->reo_write_list[soc->write_paddr_list_idx].peer_id = peer->peer_id;
  97. soc->reo_write_list[soc->write_paddr_list_idx].paddr =
  98. peer->rx_tid[tid].hw_qdesc_paddr;
  99. soc->reo_write_list[soc->write_paddr_list_idx].tid = tid;
  100. soc->write_paddr_list_idx++;
  101. if (soc->write_paddr_list_idx == max_list_size)
  102. soc->write_paddr_list_idx = 0;
  103. }
  104. #ifdef REO_QDESC_HISTORY
  105. enum reo_qdesc_event_type {
  106. REO_QDESC_UPDATE_CB = 0,
  107. REO_QDESC_FREE,
  108. };
  109. struct reo_qdesc_event {
  110. qdf_dma_addr_t qdesc_addr;
  111. uint64_t ts;
  112. enum reo_qdesc_event_type type;
  113. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  114. };
  115. #endif
  116. struct ast_del_ctxt {
  117. bool age;
  118. int del_count;
  119. };
  120. #ifdef QCA_SUPPORT_WDS_EXTENDED
  121. /**
  122. * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
  123. *
  124. * @peer: DP peer context
  125. *
  126. * This API checks whether the peer is WDS_EXT peer or not
  127. *
  128. * Return: true in the wds_ext peer else flase
  129. */
  130. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  131. {
  132. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  133. }
  134. #else
  135. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  136. {
  137. return false;
  138. }
  139. #endif
  140. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  141. void *arg);
  142. /**
  143. * dp_peer_unref_delete() - unref and delete peer
  144. * @peer: Datapath peer handle
  145. * @id: ID of module releasing reference
  146. *
  147. */
  148. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  149. /**
  150. * dp_txrx_peer_unref_delete() - unref and delete peer
  151. * @handle: Datapath txrx ref handle
  152. * @id: Module ID of the caller
  153. *
  154. */
  155. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  156. /**
  157. * dp_peer_find_hash_find() - returns legacy or mlo link peer from
  158. * peer_hash_table matching vdev_id and mac_address
  159. * @soc: soc handle
  160. * @peer_mac_addr: peer mac address
  161. * @mac_addr_is_aligned: is mac addr aligned
  162. * @vdev_id: vdev_id
  163. * @mod_id: id of module requesting reference
  164. *
  165. * return: peer in success
  166. * NULL in failure
  167. */
  168. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  169. uint8_t *peer_mac_addr,
  170. int mac_addr_is_aligned,
  171. uint8_t vdev_id,
  172. enum dp_mod_id mod_id);
  173. /**
  174. * dp_peer_find_by_id_valid - check if peer exists for given id
  175. * @soc: core DP soc context
  176. * @peer_id: peer id from peer object can be retrieved
  177. *
  178. * Return: true if peer exists of false otherwise
  179. */
  180. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  181. /**
  182. * dp_peer_get_ref() - Returns peer object given the peer id
  183. *
  184. * @soc: core DP soc context
  185. * @peer: DP peer
  186. * @mod_id: id of module requesting the reference
  187. *
  188. * Return: QDF_STATUS_SUCCESS if reference held successfully
  189. * else QDF_STATUS_E_INVAL
  190. */
  191. static inline
  192. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  193. struct dp_peer *peer,
  194. enum dp_mod_id mod_id)
  195. {
  196. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  197. return QDF_STATUS_E_INVAL;
  198. if (mod_id > DP_MOD_ID_RX)
  199. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  200. return QDF_STATUS_SUCCESS;
  201. }
  202. /**
  203. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  204. *
  205. * @soc: core DP soc context
  206. * @peer_id: peer id from peer object can be retrieved
  207. * @mod_id: module id
  208. *
  209. * Return: struct dp_peer*: Pointer to DP peer object
  210. */
  211. static inline struct dp_peer *
  212. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  213. uint16_t peer_id,
  214. enum dp_mod_id mod_id)
  215. {
  216. struct dp_peer *peer;
  217. qdf_spin_lock_bh(&soc->peer_map_lock);
  218. peer = (peer_id >= soc->max_peer_id) ? NULL :
  219. soc->peer_id_to_obj_map[peer_id];
  220. if (!peer ||
  221. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  222. qdf_spin_unlock_bh(&soc->peer_map_lock);
  223. return NULL;
  224. }
  225. qdf_spin_unlock_bh(&soc->peer_map_lock);
  226. return peer;
  227. }
  228. /**
  229. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  230. * if peer state is active
  231. *
  232. * @soc: core DP soc context
  233. * @peer_id: peer id from peer object can be retrieved
  234. * @mod_id: ID of module requesting reference
  235. *
  236. * Return: struct dp_peer*: Pointer to DP peer object
  237. */
  238. static inline
  239. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  240. uint16_t peer_id,
  241. enum dp_mod_id mod_id)
  242. {
  243. struct dp_peer *peer;
  244. qdf_spin_lock_bh(&soc->peer_map_lock);
  245. peer = (peer_id >= soc->max_peer_id) ? NULL :
  246. soc->peer_id_to_obj_map[peer_id];
  247. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  248. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  249. qdf_spin_unlock_bh(&soc->peer_map_lock);
  250. return NULL;
  251. }
  252. qdf_spin_unlock_bh(&soc->peer_map_lock);
  253. return peer;
  254. }
  255. /**
  256. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  257. *
  258. * @soc: core DP soc context
  259. * @peer_id: peer id from peer object can be retrieved
  260. * @handle: reference handle
  261. * @mod_id: ID of module requesting reference
  262. *
  263. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  264. */
  265. static inline struct dp_txrx_peer *
  266. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  267. uint16_t peer_id,
  268. dp_txrx_ref_handle *handle,
  269. enum dp_mod_id mod_id)
  270. {
  271. struct dp_peer *peer;
  272. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  273. if (!peer)
  274. return NULL;
  275. if (!peer->txrx_peer) {
  276. dp_peer_unref_delete(peer, mod_id);
  277. return NULL;
  278. }
  279. *handle = (dp_txrx_ref_handle)peer;
  280. return peer->txrx_peer;
  281. }
  282. #ifdef PEER_CACHE_RX_PKTS
  283. /**
  284. * dp_rx_flush_rx_cached() - flush cached rx frames
  285. * @peer: peer
  286. * @drop: set flag to drop frames
  287. *
  288. * Return: None
  289. */
  290. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  291. #else
  292. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  293. {
  294. }
  295. #endif
  296. static inline void
  297. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  298. {
  299. qdf_spin_lock_bh(&peer->peer_info_lock);
  300. peer->state = OL_TXRX_PEER_STATE_DISC;
  301. qdf_spin_unlock_bh(&peer->peer_info_lock);
  302. dp_rx_flush_rx_cached(peer, true);
  303. }
  304. /**
  305. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  306. *
  307. * @vdev: DP vdev context
  308. * @func: function to be called for each peer
  309. * @arg: argument need to be passed to func
  310. * @mod_id: module_id
  311. *
  312. * Return: void
  313. */
  314. static inline void
  315. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  316. enum dp_mod_id mod_id)
  317. {
  318. struct dp_peer *peer;
  319. struct dp_peer *tmp_peer;
  320. struct dp_soc *soc = NULL;
  321. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  322. return;
  323. soc = vdev->pdev->soc;
  324. qdf_spin_lock_bh(&vdev->peer_list_lock);
  325. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  326. peer_list_elem,
  327. tmp_peer) {
  328. if (dp_peer_get_ref(soc, peer, mod_id) ==
  329. QDF_STATUS_SUCCESS) {
  330. (*func)(soc, peer, arg);
  331. dp_peer_unref_delete(peer, mod_id);
  332. }
  333. }
  334. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  335. }
  336. /**
  337. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  338. *
  339. * @pdev: DP pdev context
  340. * @func: function to be called for each peer
  341. * @arg: argument need to be passed to func
  342. * @mod_id: module_id
  343. *
  344. * Return: void
  345. */
  346. static inline void
  347. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  348. enum dp_mod_id mod_id)
  349. {
  350. struct dp_vdev *vdev;
  351. if (!pdev)
  352. return;
  353. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  354. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  355. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  356. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  357. }
  358. /**
  359. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  360. *
  361. * @soc: DP soc context
  362. * @func: function to be called for each peer
  363. * @arg: argument need to be passed to func
  364. * @mod_id: module_id
  365. *
  366. * Return: void
  367. */
  368. static inline void
  369. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  370. enum dp_mod_id mod_id)
  371. {
  372. struct dp_pdev *pdev;
  373. int i;
  374. if (!soc)
  375. return;
  376. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  377. pdev = soc->pdev_list[i];
  378. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  379. }
  380. }
  381. /**
  382. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  383. *
  384. * This API will cache the peers in local allocated memory and calls
  385. * iterate function outside the lock.
  386. *
  387. * As this API is allocating new memory it is suggested to use this
  388. * only when lock cannot be held
  389. *
  390. * @vdev: DP vdev context
  391. * @func: function to be called for each peer
  392. * @arg: argument need to be passed to func
  393. * @mod_id: module_id
  394. *
  395. * Return: void
  396. */
  397. static inline void
  398. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  399. dp_peer_iter_func *func,
  400. void *arg,
  401. enum dp_mod_id mod_id)
  402. {
  403. struct dp_peer *peer;
  404. struct dp_peer *tmp_peer;
  405. struct dp_soc *soc = NULL;
  406. struct dp_peer **peer_array = NULL;
  407. int i = 0;
  408. uint32_t num_peers = 0;
  409. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  410. return;
  411. num_peers = vdev->num_peers;
  412. soc = vdev->pdev->soc;
  413. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  414. if (!peer_array)
  415. return;
  416. qdf_spin_lock_bh(&vdev->peer_list_lock);
  417. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  418. peer_list_elem,
  419. tmp_peer) {
  420. if (i >= num_peers)
  421. break;
  422. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  423. peer_array[i] = peer;
  424. i = (i + 1);
  425. }
  426. }
  427. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  428. for (i = 0; i < num_peers; i++) {
  429. peer = peer_array[i];
  430. if (!peer)
  431. continue;
  432. (*func)(soc, peer, arg);
  433. dp_peer_unref_delete(peer, mod_id);
  434. }
  435. qdf_mem_free(peer_array);
  436. }
  437. /**
  438. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  439. *
  440. * This API will cache the peers in local allocated memory and calls
  441. * iterate function outside the lock.
  442. *
  443. * As this API is allocating new memory it is suggested to use this
  444. * only when lock cannot be held
  445. *
  446. * @pdev: DP pdev context
  447. * @func: function to be called for each peer
  448. * @arg: argument need to be passed to func
  449. * @mod_id: module_id
  450. *
  451. * Return: void
  452. */
  453. static inline void
  454. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  455. dp_peer_iter_func *func,
  456. void *arg,
  457. enum dp_mod_id mod_id)
  458. {
  459. struct dp_peer *peer;
  460. struct dp_peer *tmp_peer;
  461. struct dp_soc *soc = NULL;
  462. struct dp_vdev *vdev = NULL;
  463. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  464. int i = 0;
  465. int j = 0;
  466. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  467. if (!pdev || !pdev->soc)
  468. return;
  469. soc = pdev->soc;
  470. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  471. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  472. num_peers[i] = vdev->num_peers;
  473. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  474. sizeof(struct dp_peer *));
  475. if (!peer_array[i])
  476. break;
  477. qdf_spin_lock_bh(&vdev->peer_list_lock);
  478. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  479. peer_list_elem,
  480. tmp_peer) {
  481. if (j >= num_peers[i])
  482. break;
  483. if (dp_peer_get_ref(soc, peer, mod_id) ==
  484. QDF_STATUS_SUCCESS) {
  485. peer_array[i][j] = peer;
  486. j = (j + 1);
  487. }
  488. }
  489. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  490. i = (i + 1);
  491. }
  492. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  493. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  494. if (!peer_array[i])
  495. break;
  496. for (j = 0; j < num_peers[i]; j++) {
  497. peer = peer_array[i][j];
  498. if (!peer)
  499. continue;
  500. (*func)(soc, peer, arg);
  501. dp_peer_unref_delete(peer, mod_id);
  502. }
  503. qdf_mem_free(peer_array[i]);
  504. }
  505. }
  506. /**
  507. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  508. *
  509. * This API will cache the peers in local allocated memory and calls
  510. * iterate function outside the lock.
  511. *
  512. * As this API is allocating new memory it is suggested to use this
  513. * only when lock cannot be held
  514. *
  515. * @soc: DP soc context
  516. * @func: function to be called for each peer
  517. * @arg: argument need to be passed to func
  518. * @mod_id: module_id
  519. *
  520. * Return: void
  521. */
  522. static inline void
  523. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  524. dp_peer_iter_func *func,
  525. void *arg,
  526. enum dp_mod_id mod_id)
  527. {
  528. struct dp_pdev *pdev;
  529. int i;
  530. if (!soc)
  531. return;
  532. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  533. pdev = soc->pdev_list[i];
  534. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  535. }
  536. }
  537. #ifdef DP_PEER_STATE_DEBUG
  538. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  539. do { \
  540. if (!(_condition)) { \
  541. dp_alert("Invalid state shift from %u to %u peer " \
  542. QDF_MAC_ADDR_FMT, \
  543. (_peer)->peer_state, (_new_state), \
  544. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  545. QDF_ASSERT(0); \
  546. } \
  547. } while (0)
  548. #else
  549. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  550. do { \
  551. if (!(_condition)) { \
  552. dp_alert("Invalid state shift from %u to %u peer " \
  553. QDF_MAC_ADDR_FMT, \
  554. (_peer)->peer_state, (_new_state), \
  555. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  556. } \
  557. } while (0)
  558. #endif
  559. /**
  560. * dp_peer_state_cmp() - compare dp peer state
  561. *
  562. * @peer: DP peer
  563. * @state: state
  564. *
  565. * Return: true if state matches with peer state
  566. * false if it does not match
  567. */
  568. static inline bool
  569. dp_peer_state_cmp(struct dp_peer *peer,
  570. enum dp_peer_state state)
  571. {
  572. bool is_status_equal = false;
  573. qdf_spin_lock_bh(&peer->peer_state_lock);
  574. is_status_equal = (peer->peer_state == state);
  575. qdf_spin_unlock_bh(&peer->peer_state_lock);
  576. return is_status_equal;
  577. }
  578. /**
  579. * dp_print_ast_stats() - Dump AST table contents
  580. * @soc: Datapath soc handle
  581. *
  582. * Return: void
  583. */
  584. void dp_print_ast_stats(struct dp_soc *soc);
  585. /**
  586. * dp_rx_peer_map_handler() - handle peer map event from firmware
  587. * @soc: generic soc handle
  588. * @peer_id: peer_id from firmware
  589. * @hw_peer_id: ast index for this peer
  590. * @vdev_id: vdev ID
  591. * @peer_mac_addr: mac address of the peer
  592. * @ast_hash: ast hash value
  593. * @is_wds: flag to indicate peer map event for WDS ast entry
  594. *
  595. * associate the peer_id that firmware provided with peer entry
  596. * and update the ast table in the host with the hw_peer_id.
  597. *
  598. * Return: QDF_STATUS code
  599. */
  600. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  601. uint16_t hw_peer_id, uint8_t vdev_id,
  602. uint8_t *peer_mac_addr, uint16_t ast_hash,
  603. uint8_t is_wds);
  604. /**
  605. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  606. * @soc: generic soc handle
  607. * @peer_id: peer_id from firmware
  608. * @vdev_id: vdev ID
  609. * @peer_mac_addr: mac address of the peer or wds entry
  610. * @is_wds: flag to indicate peer map event for WDS ast entry
  611. * @free_wds_count: number of wds entries freed by FW with peer delete
  612. *
  613. * Return: none
  614. */
  615. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  616. uint8_t vdev_id, uint8_t *peer_mac_addr,
  617. uint8_t is_wds, uint32_t free_wds_count);
  618. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  619. /**
  620. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  621. * @soc: dp soc pointer
  622. * @vdev_id: vdev id
  623. * @peer_mac_addr: mac address of the peer
  624. *
  625. * This function resets the roamed peer auth status and mac address
  626. * after peer map indication of same peer is received from firmware.
  627. *
  628. * Return: None
  629. */
  630. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  631. uint8_t *peer_mac_addr);
  632. #else
  633. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  634. uint8_t *peer_mac_addr)
  635. {
  636. }
  637. #endif
  638. #ifdef WLAN_FEATURE_11BE_MLO
  639. /**
  640. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  641. * @soc: generic soc handle
  642. * @peer_id: ML peer_id from firmware
  643. * @peer_mac_addr: mac address of the peer
  644. * @mlo_flow_info: MLO AST flow info
  645. * @mlo_link_info: MLO link info
  646. *
  647. * associate the ML peer_id that firmware provided with peer entry
  648. * and update the ast table in the host with the hw_peer_id.
  649. *
  650. * Return: QDF_STATUS code
  651. */
  652. QDF_STATUS
  653. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  654. uint8_t *peer_mac_addr,
  655. struct dp_mlo_flow_override_info *mlo_flow_info,
  656. struct dp_mlo_link_info *mlo_link_info);
  657. /**
  658. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  659. * @soc: generic soc handle
  660. * @peer_id: peer_id from firmware
  661. *
  662. * Return: none
  663. */
  664. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  665. #endif
  666. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  667. enum cdp_sec_type sec_type, int is_unicast,
  668. u_int32_t *michael_key, u_int32_t *rx_pn);
  669. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  670. uint16_t peer_id, uint8_t *peer_mac);
  671. /**
  672. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  673. * @soc: SoC handle
  674. * @peer: peer to which ast node belongs
  675. * @mac_addr: MAC address of ast node
  676. * @type: AST entry type
  677. * @flags: AST configuration flags
  678. *
  679. * This API is used by WDS source port learning function to
  680. * add a new AST entry into peer AST list
  681. *
  682. * Return: QDF_STATUS code
  683. */
  684. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  685. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  686. uint32_t flags);
  687. /**
  688. * dp_peer_del_ast() - Delete and free AST entry
  689. * @soc: SoC handle
  690. * @ast_entry: AST entry of the node
  691. *
  692. * This function removes the AST entry from peer and soc tables
  693. * It assumes caller has taken the ast lock to protect the access to these
  694. * tables
  695. *
  696. * Return: None
  697. */
  698. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  699. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  700. struct dp_ast_entry *ast_entry);
  701. /**
  702. * dp_peer_update_ast() - Delete and free AST entry
  703. * @soc: SoC handle
  704. * @peer: peer to which ast node belongs
  705. * @ast_entry: AST entry of the node
  706. * @flags: wds or hmwds
  707. *
  708. * This function update the AST entry to the roamed peer and soc tables
  709. * It assumes caller has taken the ast lock to protect the access to these
  710. * tables
  711. *
  712. * Return: 0 if ast entry is updated successfully
  713. * -1 failure
  714. */
  715. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  716. struct dp_ast_entry *ast_entry, uint32_t flags);
  717. /**
  718. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  719. * @soc: SoC handle
  720. * @ast_mac_addr: Mac address
  721. * @pdev_id: pdev Id
  722. *
  723. * It assumes caller has taken the ast lock to protect the access to
  724. * AST hash table
  725. *
  726. * Return: AST entry
  727. */
  728. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  729. uint8_t *ast_mac_addr,
  730. uint8_t pdev_id);
  731. /**
  732. * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
  733. * @soc: SoC handle
  734. * @ast_mac_addr: Mac address
  735. * @vdev_id: vdev Id
  736. *
  737. * It assumes caller has taken the ast lock to protect the access to
  738. * AST hash table
  739. *
  740. * Return: AST entry
  741. */
  742. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  743. uint8_t *ast_mac_addr,
  744. uint8_t vdev_id);
  745. /**
  746. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  747. * @soc: SoC handle
  748. * @ast_mac_addr: Mac address
  749. *
  750. * It assumes caller has taken the ast lock to protect the access to
  751. * AST hash table
  752. *
  753. * Return: AST entry
  754. */
  755. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  756. uint8_t *ast_mac_addr);
  757. /**
  758. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  759. * @soc: SoC handle
  760. * @ast_entry: AST entry of the node
  761. *
  762. * This function gets the pdev_id from the ast entry.
  763. *
  764. * Return: (uint8_t) pdev_id
  765. */
  766. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  767. struct dp_ast_entry *ast_entry);
  768. /**
  769. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  770. * @soc: SoC handle
  771. * @ast_entry: AST entry of the node
  772. *
  773. * This function gets the next hop from the ast entry.
  774. *
  775. * Return: (uint8_t) next_hop
  776. */
  777. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  778. struct dp_ast_entry *ast_entry);
  779. /**
  780. * dp_peer_ast_set_type() - set type from the ast entry
  781. * @soc: SoC handle
  782. * @ast_entry: AST entry of the node
  783. * @type: AST entry type
  784. *
  785. * This function sets the type in the ast entry.
  786. *
  787. * Return:
  788. */
  789. void dp_peer_ast_set_type(struct dp_soc *soc,
  790. struct dp_ast_entry *ast_entry,
  791. enum cdp_txrx_ast_entry_type type);
  792. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  793. struct dp_ast_entry *ast_entry,
  794. struct dp_peer *peer);
  795. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  796. void dp_peer_ast_send_multi_wds_del(
  797. struct dp_soc *soc, uint8_t vdev_id,
  798. struct peer_del_multi_wds_entries *wds_list);
  799. #endif
  800. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  801. struct cdp_soc *dp_soc,
  802. void *cookie,
  803. enum cdp_ast_free_status status);
  804. /**
  805. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  806. * @soc: SoC handle
  807. * @ase: Address search entry
  808. *
  809. * This function removes the AST entry from soc AST hash table
  810. * It assumes caller has taken the ast lock to protect the access to this table
  811. *
  812. * Return: None
  813. */
  814. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  815. struct dp_ast_entry *ase);
  816. /**
  817. * dp_peer_free_ast_entry() - Free up the ast entry memory
  818. * @soc: SoC handle
  819. * @ast_entry: Address search entry
  820. *
  821. * This API is used to free up the memory associated with
  822. * AST entry.
  823. *
  824. * Return: None
  825. */
  826. void dp_peer_free_ast_entry(struct dp_soc *soc,
  827. struct dp_ast_entry *ast_entry);
  828. /**
  829. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  830. * @soc: SoC handle
  831. * @ast_entry: Address search entry
  832. * @peer: peer
  833. *
  834. * This API is used to remove/unlink AST entry from the peer list
  835. * and hash list.
  836. *
  837. * Return: None
  838. */
  839. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  840. struct dp_ast_entry *ast_entry,
  841. struct dp_peer *peer);
  842. /**
  843. * dp_peer_mec_detach_entry() - Detach the MEC entry
  844. * @soc: SoC handle
  845. * @mecentry: MEC entry of the node
  846. * @ptr: pointer to free list
  847. *
  848. * The MEC entry is detached from MEC table and added to free_list
  849. * to free the object outside lock
  850. *
  851. * Return: None
  852. */
  853. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  854. void *ptr);
  855. /**
  856. * dp_peer_mec_free_list() - free the MEC entry from free_list
  857. * @soc: SoC handle
  858. * @ptr: pointer to free list
  859. *
  860. * Return: None
  861. */
  862. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  863. /**
  864. * dp_peer_mec_add_entry()
  865. * @soc: SoC handle
  866. * @vdev: vdev to which mec node belongs
  867. * @mac_addr: MAC address of mec node
  868. *
  869. * This function allocates and adds MEC entry to MEC table.
  870. * It assumes caller has taken the mec lock to protect the access to these
  871. * tables
  872. *
  873. * Return: QDF_STATUS
  874. */
  875. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  876. struct dp_vdev *vdev,
  877. uint8_t *mac_addr);
  878. /**
  879. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
  880. * within pdev
  881. * @soc: SoC handle
  882. * @pdev_id: pdev Id
  883. * @mec_mac_addr: MAC address of mec node
  884. *
  885. * It assumes caller has taken the mec_lock to protect the access to
  886. * MEC hash table
  887. *
  888. * Return: MEC entry
  889. */
  890. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  891. uint8_t pdev_id,
  892. uint8_t *mec_mac_addr);
  893. #define DP_AST_ASSERT(_condition) \
  894. do { \
  895. if (!(_condition)) { \
  896. dp_print_ast_stats(soc);\
  897. QDF_BUG(_condition); \
  898. } \
  899. } while (0)
  900. /**
  901. * dp_peer_update_inactive_time() - Update inactive time for peer
  902. * @pdev: pdev object
  903. * @tag_type: htt_tlv_tag type
  904. * @tag_buf: buf message
  905. */
  906. void
  907. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  908. uint32_t *tag_buf);
  909. #ifndef QCA_MULTIPASS_SUPPORT
  910. static inline
  911. /**
  912. * dp_peer_set_vlan_id() - set vlan_id for this peer
  913. * @cdp_soc: soc handle
  914. * @vdev_id: id of vdev object
  915. * @peer_mac: mac address
  916. * @vlan_id: vlan id for peer
  917. *
  918. * Return: void
  919. */
  920. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  921. uint8_t vdev_id, uint8_t *peer_mac,
  922. uint16_t vlan_id)
  923. {
  924. }
  925. /**
  926. * dp_set_vlan_groupkey() - set vlan map for vdev
  927. * @soc_hdl: pointer to soc
  928. * @vdev_id: id of vdev handle
  929. * @vlan_id: vlan_id
  930. * @group_key: group key for vlan
  931. *
  932. * Return: set success/failure
  933. */
  934. static inline
  935. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  936. uint16_t vlan_id, uint16_t group_key)
  937. {
  938. return QDF_STATUS_SUCCESS;
  939. }
  940. /**
  941. * dp_peer_multipass_list_init() - initialize multipass peer list
  942. * @vdev: pointer to vdev
  943. *
  944. * Return: void
  945. */
  946. static inline
  947. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  948. {
  949. }
  950. /**
  951. * dp_peer_multipass_list_remove() - remove peer from special peer list
  952. * @peer: peer handle
  953. *
  954. * Return: void
  955. */
  956. static inline
  957. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  958. {
  959. }
  960. #else
  961. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  962. uint8_t vdev_id, uint8_t *peer_mac,
  963. uint16_t vlan_id);
  964. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  965. uint16_t vlan_id, uint16_t group_key);
  966. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  967. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  968. #endif
  969. #ifndef QCA_PEER_MULTIQ_SUPPORT
  970. /**
  971. * dp_peer_reset_flowq_map() - reset peer flowq map table
  972. * @peer: dp peer handle
  973. *
  974. * Return: none
  975. */
  976. static inline
  977. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  978. {
  979. }
  980. /**
  981. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  982. * @soc_hdl: generic soc handle
  983. * @is_wds: flag to indicate if peer is wds
  984. * @peer_id: peer_id from htt peer map message
  985. * @peer_mac_addr: mac address of the peer
  986. * @ast_info: ast flow override information from peer map
  987. *
  988. * Return: none
  989. */
  990. static inline
  991. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  992. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  993. struct dp_ast_flow_override_info *ast_info)
  994. {
  995. }
  996. #else
  997. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  998. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  999. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  1000. struct dp_ast_flow_override_info *ast_info);
  1001. #endif
  1002. #ifdef QCA_PEER_EXT_STATS
  1003. /**
  1004. * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
  1005. * @soc: DP SoC context
  1006. * @txrx_peer: DP txrx peer context
  1007. *
  1008. * Allocate the peer delay stats context
  1009. *
  1010. * Return: QDF_STATUS_SUCCESS if allocation is
  1011. * successful
  1012. */
  1013. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  1014. struct dp_txrx_peer *txrx_peer);
  1015. /**
  1016. * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
  1017. * @soc: DP SoC context
  1018. * @txrx_peer: txrx DP peer context
  1019. *
  1020. * Free the peer delay stats context
  1021. *
  1022. * Return: Void
  1023. */
  1024. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  1025. struct dp_txrx_peer *txrx_peer);
  1026. /**
  1027. * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
  1028. * @txrx_peer: dp_txrx_peer handle
  1029. *
  1030. * Return: void
  1031. */
  1032. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1033. #else
  1034. static inline
  1035. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  1036. struct dp_txrx_peer *txrx_peer)
  1037. {
  1038. return QDF_STATUS_SUCCESS;
  1039. }
  1040. static inline
  1041. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  1042. struct dp_txrx_peer *txrx_peer)
  1043. {
  1044. }
  1045. static inline
  1046. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1047. {
  1048. }
  1049. #endif
  1050. #ifdef WLAN_PEER_JITTER
  1051. /**
  1052. * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
  1053. * @pdev: Datapath pdev handle
  1054. * @txrx_peer: dp_txrx_peer handle
  1055. *
  1056. * Return: QDF_STATUS
  1057. */
  1058. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1059. struct dp_txrx_peer *txrx_peer);
  1060. /**
  1061. * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
  1062. * @pdev: Datapath pdev handle
  1063. * @txrx_peer: dp_txrx_peer handle
  1064. *
  1065. * Return: void
  1066. */
  1067. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1068. struct dp_txrx_peer *txrx_peer);
  1069. /**
  1070. * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
  1071. * @txrx_peer: dp_txrx_peer handle
  1072. *
  1073. * Return: void
  1074. */
  1075. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1076. #else
  1077. static inline
  1078. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1079. struct dp_txrx_peer *txrx_peer)
  1080. {
  1081. return QDF_STATUS_SUCCESS;
  1082. }
  1083. static inline
  1084. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1085. struct dp_txrx_peer *txrx_peer)
  1086. {
  1087. }
  1088. static inline
  1089. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1090. {
  1091. }
  1092. #endif
  1093. #ifndef CONFIG_SAWF_DEF_QUEUES
  1094. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  1095. struct dp_peer *peer)
  1096. {
  1097. return QDF_STATUS_SUCCESS;
  1098. }
  1099. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  1100. struct dp_peer *peer)
  1101. {
  1102. return QDF_STATUS_SUCCESS;
  1103. }
  1104. #endif
  1105. #ifndef CONFIG_SAWF
  1106. static inline
  1107. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  1108. struct dp_txrx_peer *txrx_peer)
  1109. {
  1110. return QDF_STATUS_SUCCESS;
  1111. }
  1112. static inline
  1113. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  1114. struct dp_txrx_peer *txrx_peer)
  1115. {
  1116. return QDF_STATUS_SUCCESS;
  1117. }
  1118. #endif
  1119. /**
  1120. * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
  1121. * @soc: DP soc
  1122. * @vdev: vdev
  1123. * @mod_id: id of module requesting reference
  1124. *
  1125. * Return: VDEV BSS peer
  1126. */
  1127. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  1128. struct dp_vdev *vdev,
  1129. enum dp_mod_id mod_id);
  1130. /**
  1131. * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
  1132. * @soc: DP soc
  1133. * @vdev: vdev
  1134. * @mod_id: id of module requesting reference
  1135. *
  1136. * Return: VDEV self peer
  1137. */
  1138. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  1139. struct dp_vdev *vdev,
  1140. enum dp_mod_id mod_id);
  1141. void dp_peer_ast_table_detach(struct dp_soc *soc);
  1142. /**
  1143. * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
  1144. * @soc: soc handle
  1145. *
  1146. * Return: none
  1147. */
  1148. void dp_peer_find_map_detach(struct dp_soc *soc);
  1149. void dp_soc_wds_detach(struct dp_soc *soc);
  1150. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  1151. /**
  1152. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  1153. * @soc: SoC handle
  1154. *
  1155. * Return: QDF_STATUS
  1156. */
  1157. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  1158. /**
  1159. * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
  1160. * @soc: SoC handle
  1161. *
  1162. * Return: QDF_STATUS
  1163. */
  1164. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  1165. /**
  1166. * dp_del_wds_entry_wrapper() - delete a WDS AST entry
  1167. * @soc: DP soc structure pointer
  1168. * @vdev_id: vdev_id
  1169. * @wds_macaddr: MAC address of ast node
  1170. * @type: type from enum cdp_txrx_ast_entry_type
  1171. * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
  1172. *
  1173. * This API is used to delete an AST entry from fw
  1174. *
  1175. * Return: None
  1176. */
  1177. void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
  1178. uint8_t *wds_macaddr, uint8_t type,
  1179. uint8_t delete_in_fw);
  1180. void dp_soc_wds_attach(struct dp_soc *soc);
  1181. /**
  1182. * dp_peer_mec_hash_detach() - Free MEC Hash table
  1183. * @soc: SoC handle
  1184. *
  1185. * Return: None
  1186. */
  1187. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  1188. /**
  1189. * dp_peer_ast_hash_detach() - Free AST Hash table
  1190. * @soc: SoC handle
  1191. *
  1192. * Return: None
  1193. */
  1194. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  1195. #ifdef FEATURE_AST
  1196. /**
  1197. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  1198. * @soc: datapath soc handle
  1199. * @peer: datapath peer handle
  1200. *
  1201. * Delete the AST entries belonging to a peer
  1202. */
  1203. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1204. struct dp_peer *peer)
  1205. {
  1206. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  1207. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  1208. /*
  1209. * Delete peer self ast entry. This is done to handle scenarios
  1210. * where peer is freed before peer map is received(for ex in case
  1211. * of auth disallow due to ACL) in such cases self ast is not added
  1212. * to peer->ast_list.
  1213. */
  1214. if (peer->self_ast_entry) {
  1215. dp_peer_del_ast(soc, peer->self_ast_entry);
  1216. peer->self_ast_entry = NULL;
  1217. }
  1218. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  1219. dp_peer_del_ast(soc, ast_entry);
  1220. }
  1221. /**
  1222. * dp_print_peer_ast_entries() - Dump AST entries of peer
  1223. * @soc: Datapath soc handle
  1224. * @peer: Datapath peer
  1225. * @arg: argument to iterate function
  1226. *
  1227. * Return: void
  1228. */
  1229. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  1230. void *arg);
  1231. #else
  1232. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  1233. struct dp_peer *peer, void *arg)
  1234. {
  1235. }
  1236. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1237. struct dp_peer *peer)
  1238. {
  1239. }
  1240. #endif
  1241. #ifdef FEATURE_MEC
  1242. /**
  1243. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  1244. * @soc: SoC handle
  1245. *
  1246. * Return: none
  1247. */
  1248. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  1249. /**
  1250. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  1251. * @soc: SoC handle
  1252. *
  1253. * Return: none
  1254. */
  1255. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  1256. /**
  1257. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  1258. * @soc: Datapath SOC
  1259. *
  1260. * Return: None
  1261. */
  1262. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  1263. #else
  1264. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  1265. {
  1266. }
  1267. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  1268. {
  1269. }
  1270. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  1271. {
  1272. }
  1273. #endif
  1274. static inline int dp_peer_find_mac_addr_cmp(
  1275. union dp_align_mac_addr *mac_addr1,
  1276. union dp_align_mac_addr *mac_addr2)
  1277. {
  1278. /*
  1279. * Intentionally use & rather than &&.
  1280. * because the operands are binary rather than generic boolean,
  1281. * the functionality is equivalent.
  1282. * Using && has the advantage of short-circuited evaluation,
  1283. * but using & has the advantage of no conditional branching,
  1284. * which is a more significant benefit.
  1285. */
  1286. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1287. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1288. }
  1289. /**
  1290. * dp_peer_delete() - delete DP peer
  1291. *
  1292. * @soc: Datatpath soc
  1293. * @peer: Datapath peer
  1294. * @arg: argument to iter function
  1295. *
  1296. * Return: void
  1297. */
  1298. void dp_peer_delete(struct dp_soc *soc,
  1299. struct dp_peer *peer,
  1300. void *arg);
  1301. /**
  1302. * dp_mlo_peer_delete() - delete MLO DP peer
  1303. *
  1304. * @soc: Datapath soc
  1305. * @peer: Datapath peer
  1306. * @arg: argument to iter function
  1307. *
  1308. * Return: void
  1309. */
  1310. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  1311. #ifdef WLAN_FEATURE_11BE_MLO
  1312. /* is MLO connection mld peer */
  1313. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1314. /* set peer type */
  1315. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1316. ((_peer)->peer_type = (_type_val))
  1317. /* is legacy peer */
  1318. #define IS_DP_LEGACY_PEER(_peer) \
  1319. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1320. /* is MLO connection link peer */
  1321. #define IS_MLO_DP_LINK_PEER(_peer) \
  1322. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1323. /* is MLO connection mld peer */
  1324. #define IS_MLO_DP_MLD_PEER(_peer) \
  1325. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1326. /* Get Mld peer from link peer */
  1327. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1328. ((link_peer)->mld_peer)
  1329. #ifdef WLAN_MLO_MULTI_CHIP
  1330. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1331. struct dp_peer *
  1332. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1333. uint8_t *peer_mac_addr,
  1334. int mac_addr_is_aligned,
  1335. uint8_t vdev_id,
  1336. uint8_t chip_id,
  1337. enum dp_mod_id mod_id);
  1338. #else
  1339. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1340. {
  1341. return 0;
  1342. }
  1343. static inline struct dp_peer *
  1344. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1345. uint8_t *peer_mac_addr,
  1346. int mac_addr_is_aligned,
  1347. uint8_t vdev_id,
  1348. uint8_t chip_id,
  1349. enum dp_mod_id mod_id)
  1350. {
  1351. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1352. mac_addr_is_aligned,
  1353. vdev_id, mod_id);
  1354. }
  1355. #endif
  1356. /**
  1357. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1358. * matching mac_address
  1359. * @soc: soc handle
  1360. * @peer_mac_addr: mld peer mac address
  1361. * @mac_addr_is_aligned: is mac addr aligned
  1362. * @vdev_id: vdev_id
  1363. * @mod_id: id of module requesting reference
  1364. *
  1365. * Return: peer in success
  1366. * NULL in failure
  1367. */
  1368. static inline
  1369. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1370. uint8_t *peer_mac_addr,
  1371. int mac_addr_is_aligned,
  1372. uint8_t vdev_id,
  1373. enum dp_mod_id mod_id)
  1374. {
  1375. if (soc->arch_ops.mlo_peer_find_hash_find)
  1376. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1377. peer_mac_addr,
  1378. mac_addr_is_aligned,
  1379. mod_id, vdev_id);
  1380. return NULL;
  1381. }
  1382. /**
  1383. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1384. * peer_type
  1385. * @soc: DP SOC handle
  1386. * @peer_info: peer information for hash find
  1387. * @mod_id: ID of module requesting reference
  1388. *
  1389. * Return: peer handle
  1390. */
  1391. static inline
  1392. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1393. struct cdp_peer_info *peer_info,
  1394. enum dp_mod_id mod_id)
  1395. {
  1396. struct dp_peer *peer = NULL;
  1397. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1398. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1399. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1400. peer_info->mac_addr_is_aligned,
  1401. peer_info->vdev_id,
  1402. mod_id);
  1403. if (peer)
  1404. return peer;
  1405. }
  1406. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1407. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1408. peer = dp_mld_peer_find_hash_find(
  1409. soc, peer_info->mac_addr,
  1410. peer_info->mac_addr_is_aligned,
  1411. peer_info->vdev_id,
  1412. mod_id);
  1413. return peer;
  1414. }
  1415. /**
  1416. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1417. * increase mld peer ref_cnt
  1418. * @link_peer: link peer pointer
  1419. * @mld_peer: mld peer pointer
  1420. *
  1421. * Return: none
  1422. */
  1423. static inline
  1424. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1425. struct dp_peer *mld_peer)
  1426. {
  1427. /* increase mld_peer ref_cnt */
  1428. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1429. link_peer->mld_peer = mld_peer;
  1430. }
  1431. /**
  1432. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1433. * decrease mld peer ref_cnt
  1434. * @link_peer: link peer pointer
  1435. *
  1436. * Return: None
  1437. */
  1438. static inline
  1439. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1440. {
  1441. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1442. link_peer->mld_peer = NULL;
  1443. }
  1444. /**
  1445. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1446. * @mld_peer: mld peer pointer
  1447. *
  1448. * Return: None
  1449. */
  1450. static inline
  1451. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1452. {
  1453. int i;
  1454. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1455. mld_peer->num_links = 0;
  1456. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1457. mld_peer->link_peers[i].is_valid = false;
  1458. }
  1459. /**
  1460. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1461. * @mld_peer: mld peer pointer
  1462. *
  1463. * Return: None
  1464. */
  1465. static inline
  1466. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1467. {
  1468. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1469. }
  1470. /**
  1471. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1472. * @mld_peer: mld dp peer pointer
  1473. * @link_peer: link dp peer pointer
  1474. *
  1475. * Return: None
  1476. */
  1477. static inline
  1478. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1479. struct dp_peer *link_peer)
  1480. {
  1481. int i;
  1482. struct dp_peer_link_info *link_peer_info;
  1483. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1484. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1485. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1486. link_peer_info = &mld_peer->link_peers[i];
  1487. if (!link_peer_info->is_valid) {
  1488. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1489. link_peer->mac_addr.raw,
  1490. QDF_MAC_ADDR_SIZE);
  1491. link_peer_info->is_valid = true;
  1492. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1493. link_peer_info->chip_id =
  1494. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1495. mld_peer->num_links++;
  1496. break;
  1497. }
  1498. }
  1499. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1500. dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1501. "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1502. "idx %u num_links %u",
  1503. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1504. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1505. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1506. i, mld_peer->num_links);
  1507. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
  1508. mld_peer, link_peer, i,
  1509. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1510. }
  1511. /**
  1512. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1513. * @mld_peer: MLD dp peer pointer
  1514. * @link_peer: link dp peer pointer
  1515. *
  1516. * Return: number of links left after deletion
  1517. */
  1518. static inline
  1519. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1520. struct dp_peer *link_peer)
  1521. {
  1522. int i;
  1523. struct dp_peer_link_info *link_peer_info;
  1524. uint8_t num_links;
  1525. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1526. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1527. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1528. link_peer_info = &mld_peer->link_peers[i];
  1529. if (link_peer_info->is_valid &&
  1530. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1531. &link_peer_info->mac_addr)) {
  1532. link_peer_info->is_valid = false;
  1533. mld_peer->num_links--;
  1534. break;
  1535. }
  1536. }
  1537. num_links = mld_peer->num_links;
  1538. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1539. dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1540. "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1541. "idx %u num_links %u",
  1542. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1543. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1544. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1545. i, mld_peer->num_links);
  1546. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
  1547. mld_peer, link_peer, i,
  1548. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1549. return num_links;
  1550. }
  1551. /**
  1552. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1553. * increase link peers ref_cnt
  1554. * @soc: dp_soc handle
  1555. * @mld_peer: dp mld peer pointer
  1556. * @mld_link_peers: structure that hold links peers pointer array and number
  1557. * @mod_id: id of module requesting reference
  1558. *
  1559. * Return: None
  1560. */
  1561. static inline
  1562. void dp_get_link_peers_ref_from_mld_peer(
  1563. struct dp_soc *soc,
  1564. struct dp_peer *mld_peer,
  1565. struct dp_mld_link_peers *mld_link_peers,
  1566. enum dp_mod_id mod_id)
  1567. {
  1568. struct dp_peer *peer;
  1569. uint8_t i = 0, j = 0;
  1570. struct dp_peer_link_info *link_peer_info;
  1571. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1572. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1573. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1574. link_peer_info = &mld_peer->link_peers[i];
  1575. if (link_peer_info->is_valid) {
  1576. peer = dp_link_peer_hash_find_by_chip_id(
  1577. soc,
  1578. link_peer_info->mac_addr.raw,
  1579. true,
  1580. link_peer_info->vdev_id,
  1581. link_peer_info->chip_id,
  1582. mod_id);
  1583. if (peer)
  1584. mld_link_peers->link_peers[j++] = peer;
  1585. }
  1586. }
  1587. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1588. mld_link_peers->num_links = j;
  1589. }
  1590. /**
  1591. * dp_release_link_peers_ref() - release all link peers reference
  1592. * @mld_link_peers: structure that hold links peers pointer array and number
  1593. * @mod_id: id of module requesting reference
  1594. *
  1595. * Return: None.
  1596. */
  1597. static inline
  1598. void dp_release_link_peers_ref(
  1599. struct dp_mld_link_peers *mld_link_peers,
  1600. enum dp_mod_id mod_id)
  1601. {
  1602. struct dp_peer *peer;
  1603. uint8_t i;
  1604. for (i = 0; i < mld_link_peers->num_links; i++) {
  1605. peer = mld_link_peers->link_peers[i];
  1606. if (peer)
  1607. dp_peer_unref_delete(peer, mod_id);
  1608. mld_link_peers->link_peers[i] = NULL;
  1609. }
  1610. mld_link_peers->num_links = 0;
  1611. }
  1612. /**
  1613. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1614. * @soc: Datapath soc handle
  1615. * @peer_id: peer id
  1616. * @lmac_id: lmac id to find the link peer on given lmac
  1617. *
  1618. * Return: peer_id of link peer if found
  1619. * else return HTT_INVALID_PEER
  1620. */
  1621. static inline
  1622. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1623. uint8_t lmac_id)
  1624. {
  1625. uint8_t i;
  1626. struct dp_peer *peer;
  1627. struct dp_peer *link_peer;
  1628. struct dp_soc *link_peer_soc;
  1629. struct dp_mld_link_peers link_peers_info;
  1630. uint16_t link_peer_id = HTT_INVALID_PEER;
  1631. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1632. if (!peer)
  1633. return HTT_INVALID_PEER;
  1634. if (IS_MLO_DP_MLD_PEER(peer)) {
  1635. /* get link peers with reference */
  1636. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1637. DP_MOD_ID_CDP);
  1638. for (i = 0; i < link_peers_info.num_links; i++) {
  1639. link_peer = link_peers_info.link_peers[i];
  1640. link_peer_soc = link_peer->vdev->pdev->soc;
  1641. if ((link_peer_soc == soc) &&
  1642. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1643. link_peer_id = link_peer->peer_id;
  1644. break;
  1645. }
  1646. }
  1647. /* release link peers reference */
  1648. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1649. } else {
  1650. link_peer_id = peer_id;
  1651. }
  1652. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1653. return link_peer_id;
  1654. }
  1655. /**
  1656. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1657. * @soc: soc handle
  1658. * @peer_mac: peer mac address
  1659. * @mac_addr_is_aligned: is mac addr aligned
  1660. * @vdev_id: vdev_id
  1661. * @mod_id: id of module requesting reference
  1662. *
  1663. * for MLO connection, get corresponding MLD peer,
  1664. * otherwise get link peer for non-MLO case.
  1665. *
  1666. * Return: peer in success
  1667. * NULL in failure
  1668. */
  1669. static inline
  1670. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1671. uint8_t *peer_mac,
  1672. int mac_addr_is_aligned,
  1673. uint8_t vdev_id,
  1674. enum dp_mod_id mod_id)
  1675. {
  1676. struct dp_peer *ta_peer = NULL;
  1677. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1678. peer_mac, 0, vdev_id,
  1679. mod_id);
  1680. if (peer) {
  1681. /* mlo connection link peer, get mld peer with reference */
  1682. if (IS_MLO_DP_LINK_PEER(peer)) {
  1683. /* increase mld peer ref_cnt */
  1684. if (QDF_STATUS_SUCCESS ==
  1685. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1686. ta_peer = peer->mld_peer;
  1687. else
  1688. ta_peer = NULL;
  1689. /* release peer reference that added by hash find */
  1690. dp_peer_unref_delete(peer, mod_id);
  1691. } else {
  1692. /* mlo MLD peer or non-mlo link peer */
  1693. ta_peer = peer;
  1694. }
  1695. } else {
  1696. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1697. QDF_MAC_ADDR_REF(peer_mac));
  1698. }
  1699. return ta_peer;
  1700. }
  1701. /**
  1702. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1703. * @soc: core DP soc context
  1704. * @peer_id: peer id from peer object can be retrieved
  1705. * @mod_id: ID of module requesting reference
  1706. *
  1707. * for MLO connection, get corresponding MLD peer,
  1708. * otherwise get link peer for non-MLO case.
  1709. *
  1710. * Return: peer in success
  1711. * NULL in failure
  1712. */
  1713. static inline
  1714. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1715. uint16_t peer_id,
  1716. enum dp_mod_id mod_id)
  1717. {
  1718. struct dp_peer *ta_peer = NULL;
  1719. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1720. if (peer) {
  1721. /* mlo connection link peer, get mld peer with reference */
  1722. if (IS_MLO_DP_LINK_PEER(peer)) {
  1723. /* increase mld peer ref_cnt */
  1724. if (QDF_STATUS_SUCCESS ==
  1725. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1726. ta_peer = peer->mld_peer;
  1727. else
  1728. ta_peer = NULL;
  1729. /* release peer reference that added by hash find */
  1730. dp_peer_unref_delete(peer, mod_id);
  1731. } else {
  1732. /* mlo MLD peer or non-mlo link peer */
  1733. ta_peer = peer;
  1734. }
  1735. }
  1736. return ta_peer;
  1737. }
  1738. /**
  1739. * dp_peer_mlo_delete() - peer MLO related delete operation
  1740. * @peer: DP peer handle
  1741. * Return: None
  1742. */
  1743. static inline
  1744. void dp_peer_mlo_delete(struct dp_peer *peer)
  1745. {
  1746. struct dp_peer *ml_peer;
  1747. struct dp_soc *soc;
  1748. dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
  1749. QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
  1750. /* MLO connection link peer */
  1751. if (IS_MLO_DP_LINK_PEER(peer)) {
  1752. ml_peer = peer->mld_peer;
  1753. soc = ml_peer->vdev->pdev->soc;
  1754. /* if last link peer deletion, delete MLD peer */
  1755. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1756. dp_peer_delete(soc, peer->mld_peer, NULL);
  1757. }
  1758. }
  1759. /**
  1760. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1761. * @soc: Soc handle
  1762. * @peer: DP peer handle
  1763. * @vdev_id: Vdev ID
  1764. * @setup_info: peer setup information for MLO
  1765. */
  1766. QDF_STATUS dp_peer_mlo_setup(
  1767. struct dp_soc *soc,
  1768. struct dp_peer *peer,
  1769. uint8_t vdev_id,
  1770. struct cdp_peer_setup_info *setup_info);
  1771. /**
  1772. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1773. * @peer: datapath peer
  1774. *
  1775. * Return: MLD peer in case of MLO Link peer
  1776. * Peer itself in other cases
  1777. */
  1778. static inline
  1779. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1780. {
  1781. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1782. }
  1783. /**
  1784. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1785. * peer id
  1786. * @soc: core DP soc context
  1787. * @peer_id: peer id
  1788. * @mod_id: ID of module requesting reference
  1789. *
  1790. * Return: primary link peer for the MLO peer
  1791. * legacy peer itself in case of legacy peer
  1792. */
  1793. static inline
  1794. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1795. uint16_t peer_id,
  1796. enum dp_mod_id mod_id)
  1797. {
  1798. uint8_t i;
  1799. struct dp_mld_link_peers link_peers_info;
  1800. struct dp_peer *peer;
  1801. struct dp_peer *link_peer;
  1802. struct dp_peer *primary_peer = NULL;
  1803. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1804. if (!peer)
  1805. return NULL;
  1806. if (IS_MLO_DP_MLD_PEER(peer)) {
  1807. /* get link peers with reference */
  1808. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1809. mod_id);
  1810. for (i = 0; i < link_peers_info.num_links; i++) {
  1811. link_peer = link_peers_info.link_peers[i];
  1812. if (link_peer->primary_link) {
  1813. primary_peer = link_peer;
  1814. /*
  1815. * Take additional reference over
  1816. * primary link peer.
  1817. */
  1818. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1819. break;
  1820. }
  1821. }
  1822. /* release link peers reference */
  1823. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1824. dp_peer_unref_delete(peer, mod_id);
  1825. } else {
  1826. primary_peer = peer;
  1827. }
  1828. return primary_peer;
  1829. }
  1830. /**
  1831. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1832. * @peer: Datapath peer
  1833. *
  1834. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1835. * dp_txrx_peer from peer itself for other cases
  1836. */
  1837. static inline
  1838. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1839. {
  1840. return IS_MLO_DP_LINK_PEER(peer) ?
  1841. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1842. }
  1843. /**
  1844. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1845. * @peer: Datapath peer
  1846. *
  1847. * Return: true if peer is primary link peer or legacy peer
  1848. * false otherwise
  1849. */
  1850. static inline
  1851. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1852. {
  1853. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1854. return true;
  1855. else if (IS_DP_LEGACY_PEER(peer))
  1856. return true;
  1857. else
  1858. return false;
  1859. }
  1860. /**
  1861. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1862. *
  1863. * @soc: core DP soc context
  1864. * @peer_id: peer id from peer object can be retrieved
  1865. * @handle: reference handle
  1866. * @mod_id: ID of module requesting reference
  1867. *
  1868. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1869. */
  1870. static inline struct dp_txrx_peer *
  1871. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1872. uint16_t peer_id,
  1873. dp_txrx_ref_handle *handle,
  1874. enum dp_mod_id mod_id)
  1875. {
  1876. struct dp_peer *peer;
  1877. struct dp_txrx_peer *txrx_peer;
  1878. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1879. if (!peer)
  1880. return NULL;
  1881. txrx_peer = dp_get_txrx_peer(peer);
  1882. if (txrx_peer) {
  1883. *handle = (dp_txrx_ref_handle)peer;
  1884. return txrx_peer;
  1885. }
  1886. dp_peer_unref_delete(peer, mod_id);
  1887. return NULL;
  1888. }
  1889. /**
  1890. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1891. *
  1892. * @soc: core DP soc context
  1893. *
  1894. * Return: void
  1895. */
  1896. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1897. #else
  1898. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1899. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1900. /* is legacy peer */
  1901. #define IS_DP_LEGACY_PEER(_peer) true
  1902. #define IS_MLO_DP_LINK_PEER(_peer) false
  1903. #define IS_MLO_DP_MLD_PEER(_peer) false
  1904. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1905. static inline
  1906. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1907. struct cdp_peer_info *peer_info,
  1908. enum dp_mod_id mod_id)
  1909. {
  1910. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1911. peer_info->mac_addr_is_aligned,
  1912. peer_info->vdev_id,
  1913. mod_id);
  1914. }
  1915. static inline
  1916. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1917. uint8_t *peer_mac,
  1918. int mac_addr_is_aligned,
  1919. uint8_t vdev_id,
  1920. enum dp_mod_id mod_id)
  1921. {
  1922. return dp_peer_find_hash_find(soc, peer_mac,
  1923. mac_addr_is_aligned, vdev_id,
  1924. mod_id);
  1925. }
  1926. static inline
  1927. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1928. uint16_t peer_id,
  1929. enum dp_mod_id mod_id)
  1930. {
  1931. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1932. }
  1933. static inline
  1934. QDF_STATUS dp_peer_mlo_setup(
  1935. struct dp_soc *soc,
  1936. struct dp_peer *peer,
  1937. uint8_t vdev_id,
  1938. struct cdp_peer_setup_info *setup_info)
  1939. {
  1940. return QDF_STATUS_SUCCESS;
  1941. }
  1942. static inline
  1943. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1944. {
  1945. }
  1946. static inline
  1947. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1948. {
  1949. }
  1950. static inline
  1951. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1952. {
  1953. }
  1954. static inline
  1955. void dp_peer_mlo_delete(struct dp_peer *peer)
  1956. {
  1957. }
  1958. static inline
  1959. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1960. struct dp_peer *link_peer)
  1961. {
  1962. }
  1963. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1964. {
  1965. return 0;
  1966. }
  1967. static inline struct dp_peer *
  1968. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1969. uint8_t *peer_mac_addr,
  1970. int mac_addr_is_aligned,
  1971. uint8_t vdev_id,
  1972. uint8_t chip_id,
  1973. enum dp_mod_id mod_id)
  1974. {
  1975. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1976. mac_addr_is_aligned,
  1977. vdev_id, mod_id);
  1978. }
  1979. static inline
  1980. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1981. {
  1982. return peer;
  1983. }
  1984. static inline
  1985. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1986. uint16_t peer_id,
  1987. enum dp_mod_id mod_id)
  1988. {
  1989. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1990. }
  1991. static inline
  1992. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1993. {
  1994. return peer->txrx_peer;
  1995. }
  1996. static inline
  1997. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1998. {
  1999. return true;
  2000. }
  2001. /**
  2002. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  2003. *
  2004. * @soc: core DP soc context
  2005. * @peer_id: peer id from peer object can be retrieved
  2006. * @handle: reference handle
  2007. * @mod_id: ID of module requesting reference
  2008. *
  2009. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  2010. */
  2011. static inline struct dp_txrx_peer *
  2012. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  2013. uint16_t peer_id,
  2014. dp_txrx_ref_handle *handle,
  2015. enum dp_mod_id mod_id)
  2016. {
  2017. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  2018. }
  2019. static inline
  2020. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  2021. uint8_t lmac_id)
  2022. {
  2023. return peer_id;
  2024. }
  2025. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  2026. {
  2027. }
  2028. #endif /* WLAN_FEATURE_11BE_MLO */
  2029. static inline
  2030. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  2031. {
  2032. uint8_t i;
  2033. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  2034. sizeof(struct dp_rx_tid_defrag));
  2035. for (i = 0; i < DP_MAX_TIDS; i++)
  2036. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2037. }
  2038. static inline
  2039. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  2040. {
  2041. uint8_t i;
  2042. for (i = 0; i < DP_MAX_TIDS; i++)
  2043. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2044. }
  2045. #ifdef PEER_CACHE_RX_PKTS
  2046. static inline
  2047. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2048. {
  2049. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  2050. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  2051. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  2052. DP_RX_CACHED_BUFQ_THRESH);
  2053. }
  2054. static inline
  2055. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2056. {
  2057. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  2058. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  2059. }
  2060. #else
  2061. static inline
  2062. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2063. {
  2064. }
  2065. static inline
  2066. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2067. {
  2068. }
  2069. #endif
  2070. /**
  2071. * dp_peer_update_state() - update dp peer state
  2072. *
  2073. * @soc: core DP soc context
  2074. * @peer: DP peer
  2075. * @state: new state
  2076. *
  2077. * Return: None
  2078. */
  2079. static inline void
  2080. dp_peer_update_state(struct dp_soc *soc,
  2081. struct dp_peer *peer,
  2082. enum dp_peer_state state)
  2083. {
  2084. uint8_t peer_state;
  2085. qdf_spin_lock_bh(&peer->peer_state_lock);
  2086. peer_state = peer->peer_state;
  2087. switch (state) {
  2088. case DP_PEER_STATE_INIT:
  2089. DP_PEER_STATE_ASSERT
  2090. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  2091. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  2092. break;
  2093. case DP_PEER_STATE_ACTIVE:
  2094. DP_PEER_STATE_ASSERT(peer, state,
  2095. (peer_state == DP_PEER_STATE_INIT));
  2096. break;
  2097. case DP_PEER_STATE_LOGICAL_DELETE:
  2098. DP_PEER_STATE_ASSERT(peer, state,
  2099. (peer_state == DP_PEER_STATE_ACTIVE) ||
  2100. (peer_state == DP_PEER_STATE_INIT));
  2101. break;
  2102. case DP_PEER_STATE_INACTIVE:
  2103. if (IS_MLO_DP_MLD_PEER(peer))
  2104. DP_PEER_STATE_ASSERT
  2105. (peer, state,
  2106. (peer_state == DP_PEER_STATE_ACTIVE));
  2107. else
  2108. DP_PEER_STATE_ASSERT
  2109. (peer, state,
  2110. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2111. break;
  2112. case DP_PEER_STATE_FREED:
  2113. if (peer->sta_self_peer)
  2114. DP_PEER_STATE_ASSERT
  2115. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  2116. else
  2117. DP_PEER_STATE_ASSERT
  2118. (peer, state,
  2119. (peer_state == DP_PEER_STATE_INACTIVE) ||
  2120. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2121. break;
  2122. default:
  2123. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2124. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  2125. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2126. return;
  2127. }
  2128. peer->peer_state = state;
  2129. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2130. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  2131. peer_state, state,
  2132. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2133. }
  2134. /**
  2135. * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer
  2136. * list based on type of peer (Legacy or MLD peer)
  2137. *
  2138. * @vdev: DP vdev context
  2139. * @func: function to be called for each peer
  2140. * @arg: argument need to be passed to func
  2141. * @mod_id: module_id
  2142. * @peer_type: type of peer - MLO Link Peer or Legacy Peer
  2143. *
  2144. * Return: void
  2145. */
  2146. static inline void
  2147. dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev,
  2148. dp_peer_iter_func *func,
  2149. void *arg, enum dp_mod_id mod_id,
  2150. enum dp_peer_type peer_type)
  2151. {
  2152. struct dp_peer *peer;
  2153. struct dp_peer *tmp_peer;
  2154. struct dp_soc *soc = NULL;
  2155. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  2156. return;
  2157. soc = vdev->pdev->soc;
  2158. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2159. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  2160. peer_list_elem,
  2161. tmp_peer) {
  2162. if (dp_peer_get_ref(soc, peer, mod_id) ==
  2163. QDF_STATUS_SUCCESS) {
  2164. if ((peer_type == DP_PEER_TYPE_LEGACY &&
  2165. (IS_DP_LEGACY_PEER(peer))) ||
  2166. (peer_type == DP_PEER_TYPE_MLO_LINK &&
  2167. (IS_MLO_DP_LINK_PEER(peer)))) {
  2168. (*func)(soc, peer, arg);
  2169. }
  2170. dp_peer_unref_delete(peer, mod_id);
  2171. }
  2172. }
  2173. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2174. }
  2175. #ifdef REO_SHARED_QREF_TABLE_EN
  2176. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2177. struct dp_peer *peer);
  2178. #else
  2179. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2180. struct dp_peer *peer) {}
  2181. #endif
  2182. /**
  2183. * dp_peer_check_wds_ext_peer() - Check WDS ext peer
  2184. *
  2185. * @peer: DP peer
  2186. *
  2187. * Return: True for WDS ext peer, false otherwise
  2188. */
  2189. bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
  2190. /**
  2191. * dp_gen_ml_peer_id() - Generate MLD peer id for DP
  2192. *
  2193. * @soc: DP soc context
  2194. * @peer_id: mld peer id
  2195. *
  2196. * Return: DP MLD peer id
  2197. */
  2198. uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
  2199. #endif /* _DP_PEER_H_ */