dp_peer.h 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #include "dp_internal.h"
  25. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  26. #include "hal_reo.h"
  27. #endif
  28. #define DP_INVALID_PEER_ID 0xffff
  29. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  30. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  31. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  32. #define DP_PEER_HASH_LOAD_MULT 2
  33. #define DP_PEER_HASH_LOAD_SHIFT 0
  34. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  35. #define DP_RX_CACHED_BUFQ_THRESH 64
  36. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  39. #define dp_peer_info(params...) \
  40. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  41. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  42. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  43. /**
  44. * enum dp_bands - WiFi Band
  45. *
  46. * @DP_BAND_INVALID: Invalid band
  47. * @DP_BAND_2GHZ: 2GHz link
  48. * @DP_BAND_5GHZ: 5GHz link
  49. * @DP_BAND_6GHZ: 6GHz link
  50. * @DP_BAND_UNKNOWN: Unknown band
  51. */
  52. enum dp_bands {
  53. DP_BAND_INVALID = 0,
  54. DP_BAND_2GHZ = 1,
  55. DP_BAND_5GHZ = 2,
  56. DP_BAND_6GHZ = 3,
  57. DP_BAND_UNKNOWN = 4,
  58. };
  59. #endif
  60. void check_free_list_for_invalid_flush(struct dp_soc *soc);
  61. static inline
  62. void add_entry_alloc_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid,
  63. struct dp_peer *peer, void *hw_qdesc_vaddr)
  64. {
  65. uint32_t max_list_size;
  66. unsigned long curr_ts = qdf_get_system_timestamp();
  67. uint32_t qref_index = soc->free_addr_list_idx;
  68. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  69. if (max_list_size == 0)
  70. return;
  71. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_paddr =
  72. rx_tid->hw_qdesc_paddr;
  73. soc->list_qdesc_addr_alloc[qref_index].ts_qdesc_mem_hdl = curr_ts;
  74. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_align =
  75. hw_qdesc_vaddr;
  76. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_unalign =
  77. rx_tid->hw_qdesc_vaddr_unaligned;
  78. soc->list_qdesc_addr_alloc[qref_index].peer_id = peer->peer_id;
  79. soc->list_qdesc_addr_alloc[qref_index].tid = rx_tid->tid;
  80. soc->alloc_addr_list_idx++;
  81. if (soc->alloc_addr_list_idx == max_list_size)
  82. soc->alloc_addr_list_idx = 0;
  83. }
  84. static inline
  85. void add_entry_free_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid)
  86. {
  87. uint32_t max_list_size;
  88. unsigned long curr_ts = qdf_get_system_timestamp();
  89. uint32_t qref_index = soc->free_addr_list_idx;
  90. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  91. if (max_list_size == 0)
  92. return;
  93. soc->list_qdesc_addr_free[qref_index].ts_qdesc_mem_hdl = curr_ts;
  94. soc->list_qdesc_addr_free[qref_index].hw_qdesc_paddr =
  95. rx_tid->hw_qdesc_paddr;
  96. soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_align =
  97. rx_tid->hw_qdesc_vaddr_aligned;
  98. soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_unalign =
  99. rx_tid->hw_qdesc_vaddr_unaligned;
  100. soc->free_addr_list_idx++;
  101. if (soc->free_addr_list_idx == max_list_size)
  102. soc->free_addr_list_idx = 0;
  103. }
  104. static inline
  105. void add_entry_write_list(struct dp_soc *soc, struct dp_peer *peer,
  106. uint32_t tid)
  107. {
  108. uint32_t max_list_size;
  109. unsigned long curr_ts = qdf_get_system_timestamp();
  110. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  111. if (max_list_size == 0)
  112. return;
  113. soc->reo_write_list[soc->write_paddr_list_idx].ts_qaddr_del = curr_ts;
  114. soc->reo_write_list[soc->write_paddr_list_idx].peer_id = peer->peer_id;
  115. soc->reo_write_list[soc->write_paddr_list_idx].paddr =
  116. peer->rx_tid[tid].hw_qdesc_paddr;
  117. soc->reo_write_list[soc->write_paddr_list_idx].tid = tid;
  118. soc->write_paddr_list_idx++;
  119. if (soc->write_paddr_list_idx == max_list_size)
  120. soc->write_paddr_list_idx = 0;
  121. }
  122. #ifdef REO_QDESC_HISTORY
  123. enum reo_qdesc_event_type {
  124. REO_QDESC_UPDATE_CB = 0,
  125. REO_QDESC_FREE,
  126. };
  127. struct reo_qdesc_event {
  128. qdf_dma_addr_t qdesc_addr;
  129. uint64_t ts;
  130. enum reo_qdesc_event_type type;
  131. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  132. };
  133. #endif
  134. struct ast_del_ctxt {
  135. bool age;
  136. int del_count;
  137. };
  138. #ifdef QCA_SUPPORT_WDS_EXTENDED
  139. /**
  140. * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
  141. *
  142. * @peer: DP peer context
  143. *
  144. * This API checks whether the peer is WDS_EXT peer or not
  145. *
  146. * Return: true in the wds_ext peer else flase
  147. */
  148. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  149. {
  150. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  151. }
  152. #else
  153. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  154. {
  155. return false;
  156. }
  157. #endif
  158. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  159. void *arg);
  160. /**
  161. * dp_peer_unref_delete() - unref and delete peer
  162. * @peer: Datapath peer handle
  163. * @id: ID of module releasing reference
  164. *
  165. */
  166. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  167. /**
  168. * dp_txrx_peer_unref_delete() - unref and delete peer
  169. * @handle: Datapath txrx ref handle
  170. * @id: Module ID of the caller
  171. *
  172. */
  173. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  174. /**
  175. * dp_peer_find_hash_find() - returns legacy or mlo link peer from
  176. * peer_hash_table matching vdev_id and mac_address
  177. * @soc: soc handle
  178. * @peer_mac_addr: peer mac address
  179. * @mac_addr_is_aligned: is mac addr aligned
  180. * @vdev_id: vdev_id
  181. * @mod_id: id of module requesting reference
  182. *
  183. * return: peer in success
  184. * NULL in failure
  185. */
  186. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  187. uint8_t *peer_mac_addr,
  188. int mac_addr_is_aligned,
  189. uint8_t vdev_id,
  190. enum dp_mod_id mod_id);
  191. /**
  192. * dp_peer_find_by_id_valid - check if peer exists for given id
  193. * @soc: core DP soc context
  194. * @peer_id: peer id from peer object can be retrieved
  195. *
  196. * Return: true if peer exists of false otherwise
  197. */
  198. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  199. /**
  200. * dp_peer_get_ref() - Returns peer object given the peer id
  201. *
  202. * @soc: core DP soc context
  203. * @peer: DP peer
  204. * @mod_id: id of module requesting the reference
  205. *
  206. * Return: QDF_STATUS_SUCCESS if reference held successfully
  207. * else QDF_STATUS_E_INVAL
  208. */
  209. static inline
  210. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  211. struct dp_peer *peer,
  212. enum dp_mod_id mod_id)
  213. {
  214. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  215. return QDF_STATUS_E_INVAL;
  216. if (mod_id > DP_MOD_ID_RX)
  217. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  218. return QDF_STATUS_SUCCESS;
  219. }
  220. /**
  221. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  222. *
  223. * @soc: core DP soc context
  224. * @peer_id: peer id from peer object can be retrieved
  225. * @mod_id: module id
  226. *
  227. * Return: struct dp_peer*: Pointer to DP peer object
  228. */
  229. static inline struct dp_peer *
  230. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  231. uint16_t peer_id,
  232. enum dp_mod_id mod_id)
  233. {
  234. struct dp_peer *peer;
  235. qdf_spin_lock_bh(&soc->peer_map_lock);
  236. peer = (peer_id >= soc->max_peer_id) ? NULL :
  237. soc->peer_id_to_obj_map[peer_id];
  238. if (!peer ||
  239. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  240. qdf_spin_unlock_bh(&soc->peer_map_lock);
  241. return NULL;
  242. }
  243. qdf_spin_unlock_bh(&soc->peer_map_lock);
  244. return peer;
  245. }
  246. /**
  247. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  248. * if peer state is active
  249. *
  250. * @soc: core DP soc context
  251. * @peer_id: peer id from peer object can be retrieved
  252. * @mod_id: ID of module requesting reference
  253. *
  254. * Return: struct dp_peer*: Pointer to DP peer object
  255. */
  256. static inline
  257. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  258. uint16_t peer_id,
  259. enum dp_mod_id mod_id)
  260. {
  261. struct dp_peer *peer;
  262. qdf_spin_lock_bh(&soc->peer_map_lock);
  263. peer = (peer_id >= soc->max_peer_id) ? NULL :
  264. soc->peer_id_to_obj_map[peer_id];
  265. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  266. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  267. qdf_spin_unlock_bh(&soc->peer_map_lock);
  268. return NULL;
  269. }
  270. qdf_spin_unlock_bh(&soc->peer_map_lock);
  271. return peer;
  272. }
  273. /**
  274. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  275. *
  276. * @soc: core DP soc context
  277. * @peer_id: peer id from peer object can be retrieved
  278. * @handle: reference handle
  279. * @mod_id: ID of module requesting reference
  280. *
  281. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  282. */
  283. static inline struct dp_txrx_peer *
  284. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  285. uint16_t peer_id,
  286. dp_txrx_ref_handle *handle,
  287. enum dp_mod_id mod_id)
  288. {
  289. struct dp_peer *peer;
  290. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  291. if (!peer)
  292. return NULL;
  293. if (!peer->txrx_peer) {
  294. dp_peer_unref_delete(peer, mod_id);
  295. return NULL;
  296. }
  297. *handle = (dp_txrx_ref_handle)peer;
  298. return peer->txrx_peer;
  299. }
  300. #ifdef PEER_CACHE_RX_PKTS
  301. /**
  302. * dp_rx_flush_rx_cached() - flush cached rx frames
  303. * @peer: peer
  304. * @drop: set flag to drop frames
  305. *
  306. * Return: None
  307. */
  308. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  309. #else
  310. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  311. {
  312. }
  313. #endif
  314. static inline void
  315. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  316. {
  317. qdf_spin_lock_bh(&peer->peer_info_lock);
  318. peer->state = OL_TXRX_PEER_STATE_DISC;
  319. qdf_spin_unlock_bh(&peer->peer_info_lock);
  320. dp_rx_flush_rx_cached(peer, true);
  321. }
  322. /**
  323. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  324. *
  325. * @vdev: DP vdev context
  326. * @func: function to be called for each peer
  327. * @arg: argument need to be passed to func
  328. * @mod_id: module_id
  329. *
  330. * Return: void
  331. */
  332. static inline void
  333. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  334. enum dp_mod_id mod_id)
  335. {
  336. struct dp_peer *peer;
  337. struct dp_peer *tmp_peer;
  338. struct dp_soc *soc = NULL;
  339. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  340. return;
  341. soc = vdev->pdev->soc;
  342. qdf_spin_lock_bh(&vdev->peer_list_lock);
  343. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  344. peer_list_elem,
  345. tmp_peer) {
  346. if (dp_peer_get_ref(soc, peer, mod_id) ==
  347. QDF_STATUS_SUCCESS) {
  348. (*func)(soc, peer, arg);
  349. dp_peer_unref_delete(peer, mod_id);
  350. }
  351. }
  352. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  353. }
  354. /**
  355. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  356. *
  357. * @pdev: DP pdev context
  358. * @func: function to be called for each peer
  359. * @arg: argument need to be passed to func
  360. * @mod_id: module_id
  361. *
  362. * Return: void
  363. */
  364. static inline void
  365. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  366. enum dp_mod_id mod_id)
  367. {
  368. struct dp_vdev *vdev;
  369. if (!pdev)
  370. return;
  371. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  372. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  373. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  374. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  375. }
  376. /**
  377. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  378. *
  379. * @soc: DP soc context
  380. * @func: function to be called for each peer
  381. * @arg: argument need to be passed to func
  382. * @mod_id: module_id
  383. *
  384. * Return: void
  385. */
  386. static inline void
  387. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  388. enum dp_mod_id mod_id)
  389. {
  390. struct dp_pdev *pdev;
  391. int i;
  392. if (!soc)
  393. return;
  394. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  395. pdev = soc->pdev_list[i];
  396. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  397. }
  398. }
  399. /**
  400. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  401. *
  402. * This API will cache the peers in local allocated memory and calls
  403. * iterate function outside the lock.
  404. *
  405. * As this API is allocating new memory it is suggested to use this
  406. * only when lock cannot be held
  407. *
  408. * @vdev: DP vdev context
  409. * @func: function to be called for each peer
  410. * @arg: argument need to be passed to func
  411. * @mod_id: module_id
  412. *
  413. * Return: void
  414. */
  415. static inline void
  416. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  417. dp_peer_iter_func *func,
  418. void *arg,
  419. enum dp_mod_id mod_id)
  420. {
  421. struct dp_peer *peer;
  422. struct dp_peer *tmp_peer;
  423. struct dp_soc *soc = NULL;
  424. struct dp_peer **peer_array = NULL;
  425. int i = 0;
  426. uint32_t num_peers = 0;
  427. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  428. return;
  429. num_peers = vdev->num_peers;
  430. soc = vdev->pdev->soc;
  431. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  432. if (!peer_array)
  433. return;
  434. qdf_spin_lock_bh(&vdev->peer_list_lock);
  435. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  436. peer_list_elem,
  437. tmp_peer) {
  438. if (i >= num_peers)
  439. break;
  440. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  441. peer_array[i] = peer;
  442. i = (i + 1);
  443. }
  444. }
  445. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  446. for (i = 0; i < num_peers; i++) {
  447. peer = peer_array[i];
  448. if (!peer)
  449. continue;
  450. (*func)(soc, peer, arg);
  451. dp_peer_unref_delete(peer, mod_id);
  452. }
  453. qdf_mem_free(peer_array);
  454. }
  455. /**
  456. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  457. *
  458. * This API will cache the peers in local allocated memory and calls
  459. * iterate function outside the lock.
  460. *
  461. * As this API is allocating new memory it is suggested to use this
  462. * only when lock cannot be held
  463. *
  464. * @pdev: DP pdev context
  465. * @func: function to be called for each peer
  466. * @arg: argument need to be passed to func
  467. * @mod_id: module_id
  468. *
  469. * Return: void
  470. */
  471. static inline void
  472. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  473. dp_peer_iter_func *func,
  474. void *arg,
  475. enum dp_mod_id mod_id)
  476. {
  477. struct dp_peer *peer;
  478. struct dp_peer *tmp_peer;
  479. struct dp_soc *soc = NULL;
  480. struct dp_vdev *vdev = NULL;
  481. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  482. int i = 0;
  483. int j = 0;
  484. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  485. if (!pdev || !pdev->soc)
  486. return;
  487. soc = pdev->soc;
  488. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  489. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  490. num_peers[i] = vdev->num_peers;
  491. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  492. sizeof(struct dp_peer *));
  493. if (!peer_array[i])
  494. break;
  495. qdf_spin_lock_bh(&vdev->peer_list_lock);
  496. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  497. peer_list_elem,
  498. tmp_peer) {
  499. if (j >= num_peers[i])
  500. break;
  501. if (dp_peer_get_ref(soc, peer, mod_id) ==
  502. QDF_STATUS_SUCCESS) {
  503. peer_array[i][j] = peer;
  504. j = (j + 1);
  505. }
  506. }
  507. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  508. i = (i + 1);
  509. }
  510. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  511. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  512. if (!peer_array[i])
  513. break;
  514. for (j = 0; j < num_peers[i]; j++) {
  515. peer = peer_array[i][j];
  516. if (!peer)
  517. continue;
  518. (*func)(soc, peer, arg);
  519. dp_peer_unref_delete(peer, mod_id);
  520. }
  521. qdf_mem_free(peer_array[i]);
  522. }
  523. }
  524. /**
  525. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  526. *
  527. * This API will cache the peers in local allocated memory and calls
  528. * iterate function outside the lock.
  529. *
  530. * As this API is allocating new memory it is suggested to use this
  531. * only when lock cannot be held
  532. *
  533. * @soc: DP soc context
  534. * @func: function to be called for each peer
  535. * @arg: argument need to be passed to func
  536. * @mod_id: module_id
  537. *
  538. * Return: void
  539. */
  540. static inline void
  541. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  542. dp_peer_iter_func *func,
  543. void *arg,
  544. enum dp_mod_id mod_id)
  545. {
  546. struct dp_pdev *pdev;
  547. int i;
  548. if (!soc)
  549. return;
  550. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  551. pdev = soc->pdev_list[i];
  552. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  553. }
  554. }
  555. #ifdef DP_PEER_STATE_DEBUG
  556. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  557. do { \
  558. if (!(_condition)) { \
  559. dp_alert("Invalid state shift from %u to %u peer " \
  560. QDF_MAC_ADDR_FMT, \
  561. (_peer)->peer_state, (_new_state), \
  562. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  563. QDF_ASSERT(0); \
  564. } \
  565. } while (0)
  566. #else
  567. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  568. do { \
  569. if (!(_condition)) { \
  570. dp_alert("Invalid state shift from %u to %u peer " \
  571. QDF_MAC_ADDR_FMT, \
  572. (_peer)->peer_state, (_new_state), \
  573. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  574. } \
  575. } while (0)
  576. #endif
  577. /**
  578. * dp_peer_state_cmp() - compare dp peer state
  579. *
  580. * @peer: DP peer
  581. * @state: state
  582. *
  583. * Return: true if state matches with peer state
  584. * false if it does not match
  585. */
  586. static inline bool
  587. dp_peer_state_cmp(struct dp_peer *peer,
  588. enum dp_peer_state state)
  589. {
  590. bool is_status_equal = false;
  591. qdf_spin_lock_bh(&peer->peer_state_lock);
  592. is_status_equal = (peer->peer_state == state);
  593. qdf_spin_unlock_bh(&peer->peer_state_lock);
  594. return is_status_equal;
  595. }
  596. /**
  597. * dp_print_ast_stats() - Dump AST table contents
  598. * @soc: Datapath soc handle
  599. *
  600. * Return: void
  601. */
  602. void dp_print_ast_stats(struct dp_soc *soc);
  603. /**
  604. * dp_rx_peer_map_handler() - handle peer map event from firmware
  605. * @soc: generic soc handle
  606. * @peer_id: peer_id from firmware
  607. * @hw_peer_id: ast index for this peer
  608. * @vdev_id: vdev ID
  609. * @peer_mac_addr: mac address of the peer
  610. * @ast_hash: ast hash value
  611. * @is_wds: flag to indicate peer map event for WDS ast entry
  612. *
  613. * associate the peer_id that firmware provided with peer entry
  614. * and update the ast table in the host with the hw_peer_id.
  615. *
  616. * Return: QDF_STATUS code
  617. */
  618. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  619. uint16_t hw_peer_id, uint8_t vdev_id,
  620. uint8_t *peer_mac_addr, uint16_t ast_hash,
  621. uint8_t is_wds);
  622. /**
  623. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  624. * @soc: generic soc handle
  625. * @peer_id: peer_id from firmware
  626. * @vdev_id: vdev ID
  627. * @peer_mac_addr: mac address of the peer or wds entry
  628. * @is_wds: flag to indicate peer map event for WDS ast entry
  629. * @free_wds_count: number of wds entries freed by FW with peer delete
  630. *
  631. * Return: none
  632. */
  633. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  634. uint8_t vdev_id, uint8_t *peer_mac_addr,
  635. uint8_t is_wds, uint32_t free_wds_count);
  636. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  637. /**
  638. * dp_rx_peer_ext_evt() - handle peer extended event from firmware
  639. * @soc: DP soc handle
  640. * @info: extended evt info
  641. *
  642. *
  643. * Return: QDF_STATUS
  644. */
  645. QDF_STATUS
  646. dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info);
  647. #endif
  648. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  649. /**
  650. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  651. * @soc: dp soc pointer
  652. * @vdev_id: vdev id
  653. * @peer_mac_addr: mac address of the peer
  654. *
  655. * This function resets the roamed peer auth status and mac address
  656. * after peer map indication of same peer is received from firmware.
  657. *
  658. * Return: None
  659. */
  660. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  661. uint8_t *peer_mac_addr);
  662. #else
  663. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  664. uint8_t *peer_mac_addr)
  665. {
  666. }
  667. #endif
  668. #ifdef WLAN_FEATURE_11BE_MLO
  669. /**
  670. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  671. * @soc: generic soc handle
  672. * @peer_id: ML peer_id from firmware
  673. * @peer_mac_addr: mac address of the peer
  674. * @mlo_flow_info: MLO AST flow info
  675. * @mlo_link_info: MLO link info
  676. *
  677. * associate the ML peer_id that firmware provided with peer entry
  678. * and update the ast table in the host with the hw_peer_id.
  679. *
  680. * Return: QDF_STATUS code
  681. */
  682. QDF_STATUS
  683. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  684. uint8_t *peer_mac_addr,
  685. struct dp_mlo_flow_override_info *mlo_flow_info,
  686. struct dp_mlo_link_info *mlo_link_info);
  687. /**
  688. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  689. * @soc: generic soc handle
  690. * @peer_id: peer_id from firmware
  691. *
  692. * Return: none
  693. */
  694. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  695. #endif
  696. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  697. enum cdp_sec_type sec_type, int is_unicast,
  698. u_int32_t *michael_key, u_int32_t *rx_pn);
  699. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  700. uint16_t peer_id, uint8_t *peer_mac);
  701. /**
  702. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  703. * @soc: SoC handle
  704. * @peer: peer to which ast node belongs
  705. * @mac_addr: MAC address of ast node
  706. * @type: AST entry type
  707. * @flags: AST configuration flags
  708. *
  709. * This API is used by WDS source port learning function to
  710. * add a new AST entry into peer AST list
  711. *
  712. * Return: QDF_STATUS code
  713. */
  714. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  715. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  716. uint32_t flags);
  717. /**
  718. * dp_peer_del_ast() - Delete and free AST entry
  719. * @soc: SoC handle
  720. * @ast_entry: AST entry of the node
  721. *
  722. * This function removes the AST entry from peer and soc tables
  723. * It assumes caller has taken the ast lock to protect the access to these
  724. * tables
  725. *
  726. * Return: None
  727. */
  728. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  729. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  730. struct dp_ast_entry *ast_entry);
  731. /**
  732. * dp_peer_update_ast() - Delete and free AST entry
  733. * @soc: SoC handle
  734. * @peer: peer to which ast node belongs
  735. * @ast_entry: AST entry of the node
  736. * @flags: wds or hmwds
  737. *
  738. * This function update the AST entry to the roamed peer and soc tables
  739. * It assumes caller has taken the ast lock to protect the access to these
  740. * tables
  741. *
  742. * Return: 0 if ast entry is updated successfully
  743. * -1 failure
  744. */
  745. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  746. struct dp_ast_entry *ast_entry, uint32_t flags);
  747. /**
  748. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  749. * @soc: SoC handle
  750. * @ast_mac_addr: Mac address
  751. * @pdev_id: pdev Id
  752. *
  753. * It assumes caller has taken the ast lock to protect the access to
  754. * AST hash table
  755. *
  756. * Return: AST entry
  757. */
  758. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  759. uint8_t *ast_mac_addr,
  760. uint8_t pdev_id);
  761. /**
  762. * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
  763. * @soc: SoC handle
  764. * @ast_mac_addr: Mac address
  765. * @vdev_id: vdev Id
  766. *
  767. * It assumes caller has taken the ast lock to protect the access to
  768. * AST hash table
  769. *
  770. * Return: AST entry
  771. */
  772. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  773. uint8_t *ast_mac_addr,
  774. uint8_t vdev_id);
  775. /**
  776. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  777. * @soc: SoC handle
  778. * @ast_mac_addr: Mac address
  779. *
  780. * It assumes caller has taken the ast lock to protect the access to
  781. * AST hash table
  782. *
  783. * Return: AST entry
  784. */
  785. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  786. uint8_t *ast_mac_addr);
  787. /**
  788. * dp_peer_ast_hash_find_soc_by_type() - Find AST entry by MAC address
  789. * and AST type
  790. * @soc: SoC handle
  791. * @ast_mac_addr: Mac address
  792. * @type: AST entry type
  793. *
  794. * It assumes caller has taken the ast lock to protect the access to
  795. * AST hash table
  796. *
  797. * Return: AST entry
  798. */
  799. struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
  800. struct dp_soc *soc,
  801. uint8_t *ast_mac_addr,
  802. enum cdp_txrx_ast_entry_type type);
  803. /**
  804. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  805. * @soc: SoC handle
  806. * @ast_entry: AST entry of the node
  807. *
  808. * This function gets the pdev_id from the ast entry.
  809. *
  810. * Return: (uint8_t) pdev_id
  811. */
  812. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  813. struct dp_ast_entry *ast_entry);
  814. /**
  815. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  816. * @soc: SoC handle
  817. * @ast_entry: AST entry of the node
  818. *
  819. * This function gets the next hop from the ast entry.
  820. *
  821. * Return: (uint8_t) next_hop
  822. */
  823. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  824. struct dp_ast_entry *ast_entry);
  825. /**
  826. * dp_peer_ast_set_type() - set type from the ast entry
  827. * @soc: SoC handle
  828. * @ast_entry: AST entry of the node
  829. * @type: AST entry type
  830. *
  831. * This function sets the type in the ast entry.
  832. *
  833. * Return:
  834. */
  835. void dp_peer_ast_set_type(struct dp_soc *soc,
  836. struct dp_ast_entry *ast_entry,
  837. enum cdp_txrx_ast_entry_type type);
  838. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  839. struct dp_ast_entry *ast_entry,
  840. struct dp_peer *peer);
  841. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  842. void dp_peer_ast_send_multi_wds_del(
  843. struct dp_soc *soc, uint8_t vdev_id,
  844. struct peer_del_multi_wds_entries *wds_list);
  845. #endif
  846. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  847. struct cdp_soc *dp_soc,
  848. void *cookie,
  849. enum cdp_ast_free_status status);
  850. /**
  851. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  852. * @soc: SoC handle
  853. * @ase: Address search entry
  854. *
  855. * This function removes the AST entry from soc AST hash table
  856. * It assumes caller has taken the ast lock to protect the access to this table
  857. *
  858. * Return: None
  859. */
  860. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  861. struct dp_ast_entry *ase);
  862. /**
  863. * dp_peer_free_ast_entry() - Free up the ast entry memory
  864. * @soc: SoC handle
  865. * @ast_entry: Address search entry
  866. *
  867. * This API is used to free up the memory associated with
  868. * AST entry.
  869. *
  870. * Return: None
  871. */
  872. void dp_peer_free_ast_entry(struct dp_soc *soc,
  873. struct dp_ast_entry *ast_entry);
  874. /**
  875. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  876. * @soc: SoC handle
  877. * @ast_entry: Address search entry
  878. * @peer: peer
  879. *
  880. * This API is used to remove/unlink AST entry from the peer list
  881. * and hash list.
  882. *
  883. * Return: None
  884. */
  885. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  886. struct dp_ast_entry *ast_entry,
  887. struct dp_peer *peer);
  888. /**
  889. * dp_peer_mec_detach_entry() - Detach the MEC entry
  890. * @soc: SoC handle
  891. * @mecentry: MEC entry of the node
  892. * @ptr: pointer to free list
  893. *
  894. * The MEC entry is detached from MEC table and added to free_list
  895. * to free the object outside lock
  896. *
  897. * Return: None
  898. */
  899. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  900. void *ptr);
  901. /**
  902. * dp_peer_mec_free_list() - free the MEC entry from free_list
  903. * @soc: SoC handle
  904. * @ptr: pointer to free list
  905. *
  906. * Return: None
  907. */
  908. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  909. /**
  910. * dp_peer_mec_add_entry()
  911. * @soc: SoC handle
  912. * @vdev: vdev to which mec node belongs
  913. * @mac_addr: MAC address of mec node
  914. *
  915. * This function allocates and adds MEC entry to MEC table.
  916. * It assumes caller has taken the mec lock to protect the access to these
  917. * tables
  918. *
  919. * Return: QDF_STATUS
  920. */
  921. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  922. struct dp_vdev *vdev,
  923. uint8_t *mac_addr);
  924. /**
  925. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
  926. * within pdev
  927. * @soc: SoC handle
  928. * @pdev_id: pdev Id
  929. * @mec_mac_addr: MAC address of mec node
  930. *
  931. * It assumes caller has taken the mec_lock to protect the access to
  932. * MEC hash table
  933. *
  934. * Return: MEC entry
  935. */
  936. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  937. uint8_t pdev_id,
  938. uint8_t *mec_mac_addr);
  939. #define DP_AST_ASSERT(_condition) \
  940. do { \
  941. if (!(_condition)) { \
  942. dp_print_ast_stats(soc);\
  943. QDF_BUG(_condition); \
  944. } \
  945. } while (0)
  946. /**
  947. * dp_peer_update_inactive_time() - Update inactive time for peer
  948. * @pdev: pdev object
  949. * @tag_type: htt_tlv_tag type
  950. * @tag_buf: buf message
  951. */
  952. void
  953. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  954. uint32_t *tag_buf);
  955. #ifndef QCA_MULTIPASS_SUPPORT
  956. static inline
  957. /**
  958. * dp_peer_set_vlan_id() - set vlan_id for this peer
  959. * @cdp_soc: soc handle
  960. * @vdev_id: id of vdev object
  961. * @peer_mac: mac address
  962. * @vlan_id: vlan id for peer
  963. *
  964. * Return: void
  965. */
  966. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  967. uint8_t vdev_id, uint8_t *peer_mac,
  968. uint16_t vlan_id)
  969. {
  970. }
  971. /**
  972. * dp_set_vlan_groupkey() - set vlan map for vdev
  973. * @soc_hdl: pointer to soc
  974. * @vdev_id: id of vdev handle
  975. * @vlan_id: vlan_id
  976. * @group_key: group key for vlan
  977. *
  978. * Return: set success/failure
  979. */
  980. static inline
  981. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  982. uint16_t vlan_id, uint16_t group_key)
  983. {
  984. return QDF_STATUS_SUCCESS;
  985. }
  986. /**
  987. * dp_peer_multipass_list_init() - initialize multipass peer list
  988. * @vdev: pointer to vdev
  989. *
  990. * Return: void
  991. */
  992. static inline
  993. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  994. {
  995. }
  996. /**
  997. * dp_peer_multipass_list_remove() - remove peer from special peer list
  998. * @peer: peer handle
  999. *
  1000. * Return: void
  1001. */
  1002. static inline
  1003. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  1004. {
  1005. }
  1006. #else
  1007. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  1008. uint8_t vdev_id, uint8_t *peer_mac,
  1009. uint16_t vlan_id);
  1010. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  1011. uint16_t vlan_id, uint16_t group_key);
  1012. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  1013. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  1014. #endif
  1015. #ifndef QCA_PEER_MULTIQ_SUPPORT
  1016. /**
  1017. * dp_peer_reset_flowq_map() - reset peer flowq map table
  1018. * @peer: dp peer handle
  1019. *
  1020. * Return: none
  1021. */
  1022. static inline
  1023. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  1024. {
  1025. }
  1026. /**
  1027. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  1028. * @soc_hdl: generic soc handle
  1029. * @is_wds: flag to indicate if peer is wds
  1030. * @peer_id: peer_id from htt peer map message
  1031. * @peer_mac_addr: mac address of the peer
  1032. * @ast_info: ast flow override information from peer map
  1033. *
  1034. * Return: none
  1035. */
  1036. static inline
  1037. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  1038. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  1039. struct dp_ast_flow_override_info *ast_info)
  1040. {
  1041. }
  1042. #else
  1043. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  1044. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  1045. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  1046. struct dp_ast_flow_override_info *ast_info);
  1047. #endif
  1048. #ifdef QCA_PEER_EXT_STATS
  1049. /**
  1050. * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
  1051. * @soc: DP SoC context
  1052. * @txrx_peer: DP txrx peer context
  1053. *
  1054. * Allocate the peer delay stats context
  1055. *
  1056. * Return: QDF_STATUS_SUCCESS if allocation is
  1057. * successful
  1058. */
  1059. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  1060. struct dp_txrx_peer *txrx_peer);
  1061. /**
  1062. * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
  1063. * @soc: DP SoC context
  1064. * @txrx_peer: txrx DP peer context
  1065. *
  1066. * Free the peer delay stats context
  1067. *
  1068. * Return: Void
  1069. */
  1070. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  1071. struct dp_txrx_peer *txrx_peer);
  1072. /**
  1073. * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
  1074. * @txrx_peer: dp_txrx_peer handle
  1075. *
  1076. * Return: void
  1077. */
  1078. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1079. #else
  1080. static inline
  1081. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  1082. struct dp_txrx_peer *txrx_peer)
  1083. {
  1084. return QDF_STATUS_SUCCESS;
  1085. }
  1086. static inline
  1087. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  1088. struct dp_txrx_peer *txrx_peer)
  1089. {
  1090. }
  1091. static inline
  1092. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1093. {
  1094. }
  1095. #endif
  1096. #ifdef WLAN_PEER_JITTER
  1097. /**
  1098. * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
  1099. * @pdev: Datapath pdev handle
  1100. * @txrx_peer: dp_txrx_peer handle
  1101. *
  1102. * Return: QDF_STATUS
  1103. */
  1104. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1105. struct dp_txrx_peer *txrx_peer);
  1106. /**
  1107. * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
  1108. * @pdev: Datapath pdev handle
  1109. * @txrx_peer: dp_txrx_peer handle
  1110. *
  1111. * Return: void
  1112. */
  1113. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1114. struct dp_txrx_peer *txrx_peer);
  1115. /**
  1116. * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
  1117. * @txrx_peer: dp_txrx_peer handle
  1118. *
  1119. * Return: void
  1120. */
  1121. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1122. #else
  1123. static inline
  1124. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1125. struct dp_txrx_peer *txrx_peer)
  1126. {
  1127. return QDF_STATUS_SUCCESS;
  1128. }
  1129. static inline
  1130. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1131. struct dp_txrx_peer *txrx_peer)
  1132. {
  1133. }
  1134. static inline
  1135. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1136. {
  1137. }
  1138. #endif
  1139. #ifndef CONFIG_SAWF_DEF_QUEUES
  1140. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  1141. struct dp_peer *peer)
  1142. {
  1143. return QDF_STATUS_SUCCESS;
  1144. }
  1145. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  1146. struct dp_peer *peer)
  1147. {
  1148. return QDF_STATUS_SUCCESS;
  1149. }
  1150. #endif
  1151. #ifndef CONFIG_SAWF
  1152. static inline
  1153. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  1154. struct dp_txrx_peer *txrx_peer)
  1155. {
  1156. return QDF_STATUS_SUCCESS;
  1157. }
  1158. static inline
  1159. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  1160. struct dp_txrx_peer *txrx_peer)
  1161. {
  1162. return QDF_STATUS_SUCCESS;
  1163. }
  1164. #endif
  1165. /**
  1166. * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
  1167. * @soc: DP soc
  1168. * @vdev: vdev
  1169. * @mod_id: id of module requesting reference
  1170. *
  1171. * Return: VDEV BSS peer
  1172. */
  1173. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  1174. struct dp_vdev *vdev,
  1175. enum dp_mod_id mod_id);
  1176. /**
  1177. * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
  1178. * @soc: DP soc
  1179. * @vdev: vdev
  1180. * @mod_id: id of module requesting reference
  1181. *
  1182. * Return: VDEV self peer
  1183. */
  1184. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  1185. struct dp_vdev *vdev,
  1186. enum dp_mod_id mod_id);
  1187. void dp_peer_ast_table_detach(struct dp_soc *soc);
  1188. /**
  1189. * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
  1190. * @soc: soc handle
  1191. *
  1192. * Return: none
  1193. */
  1194. void dp_peer_find_map_detach(struct dp_soc *soc);
  1195. void dp_soc_wds_detach(struct dp_soc *soc);
  1196. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  1197. /**
  1198. * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
  1199. * @soc: soc handle
  1200. * @mac_addr: MAC address to be used to find peer
  1201. * @vdev_id: VDEV id
  1202. * @mod_id: MODULE ID
  1203. *
  1204. * Return: struct dp_peer
  1205. */
  1206. struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
  1207. uint8_t vdev_id, enum dp_mod_id mod_id);
  1208. /**
  1209. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  1210. * @soc: SoC handle
  1211. *
  1212. * Return: QDF_STATUS
  1213. */
  1214. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  1215. /**
  1216. * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
  1217. * @soc: SoC handle
  1218. *
  1219. * Return: QDF_STATUS
  1220. */
  1221. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  1222. /**
  1223. * dp_del_wds_entry_wrapper() - delete a WDS AST entry
  1224. * @soc: DP soc structure pointer
  1225. * @vdev_id: vdev_id
  1226. * @wds_macaddr: MAC address of ast node
  1227. * @type: type from enum cdp_txrx_ast_entry_type
  1228. * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
  1229. *
  1230. * This API is used to delete an AST entry from fw
  1231. *
  1232. * Return: None
  1233. */
  1234. void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
  1235. uint8_t *wds_macaddr, uint8_t type,
  1236. uint8_t delete_in_fw);
  1237. void dp_soc_wds_attach(struct dp_soc *soc);
  1238. /**
  1239. * dp_peer_mec_hash_detach() - Free MEC Hash table
  1240. * @soc: SoC handle
  1241. *
  1242. * Return: None
  1243. */
  1244. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  1245. /**
  1246. * dp_peer_ast_hash_detach() - Free AST Hash table
  1247. * @soc: SoC handle
  1248. *
  1249. * Return: None
  1250. */
  1251. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  1252. #ifdef FEATURE_AST
  1253. /**
  1254. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  1255. * @soc: datapath soc handle
  1256. * @peer: datapath peer handle
  1257. *
  1258. * Delete the AST entries belonging to a peer
  1259. */
  1260. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1261. struct dp_peer *peer)
  1262. {
  1263. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  1264. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  1265. /*
  1266. * Delete peer self ast entry. This is done to handle scenarios
  1267. * where peer is freed before peer map is received(for ex in case
  1268. * of auth disallow due to ACL) in such cases self ast is not added
  1269. * to peer->ast_list.
  1270. */
  1271. if (peer->self_ast_entry) {
  1272. dp_peer_del_ast(soc, peer->self_ast_entry);
  1273. peer->self_ast_entry = NULL;
  1274. }
  1275. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  1276. dp_peer_del_ast(soc, ast_entry);
  1277. }
  1278. /**
  1279. * dp_print_peer_ast_entries() - Dump AST entries of peer
  1280. * @soc: Datapath soc handle
  1281. * @peer: Datapath peer
  1282. * @arg: argument to iterate function
  1283. *
  1284. * Return: void
  1285. */
  1286. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  1287. void *arg);
  1288. #else
  1289. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  1290. struct dp_peer *peer, void *arg)
  1291. {
  1292. }
  1293. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1294. struct dp_peer *peer)
  1295. {
  1296. }
  1297. #endif
  1298. #ifdef FEATURE_MEC
  1299. /**
  1300. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  1301. * @soc: SoC handle
  1302. *
  1303. * Return: none
  1304. */
  1305. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  1306. /**
  1307. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  1308. * @soc: SoC handle
  1309. *
  1310. * Return: none
  1311. */
  1312. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  1313. /**
  1314. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  1315. * @soc: Datapath SOC
  1316. *
  1317. * Return: None
  1318. */
  1319. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  1320. #else
  1321. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  1322. {
  1323. }
  1324. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  1325. {
  1326. }
  1327. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  1328. {
  1329. }
  1330. #endif
  1331. static inline int dp_peer_find_mac_addr_cmp(
  1332. union dp_align_mac_addr *mac_addr1,
  1333. union dp_align_mac_addr *mac_addr2)
  1334. {
  1335. /*
  1336. * Intentionally use & rather than &&.
  1337. * because the operands are binary rather than generic boolean,
  1338. * the functionality is equivalent.
  1339. * Using && has the advantage of short-circuited evaluation,
  1340. * but using & has the advantage of no conditional branching,
  1341. * which is a more significant benefit.
  1342. */
  1343. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1344. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1345. }
  1346. /**
  1347. * dp_peer_delete() - delete DP peer
  1348. *
  1349. * @soc: Datatpath soc
  1350. * @peer: Datapath peer
  1351. * @arg: argument to iter function
  1352. *
  1353. * Return: void
  1354. */
  1355. void dp_peer_delete(struct dp_soc *soc,
  1356. struct dp_peer *peer,
  1357. void *arg);
  1358. /**
  1359. * dp_mlo_peer_delete() - delete MLO DP peer
  1360. *
  1361. * @soc: Datapath soc
  1362. * @peer: Datapath peer
  1363. * @arg: argument to iter function
  1364. *
  1365. * Return: void
  1366. */
  1367. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  1368. #ifdef WLAN_FEATURE_11BE_MLO
  1369. /* is MLO connection mld peer */
  1370. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1371. /* set peer type */
  1372. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1373. ((_peer)->peer_type = (_type_val))
  1374. /* is legacy peer */
  1375. #define IS_DP_LEGACY_PEER(_peer) \
  1376. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1377. /* is MLO connection link peer */
  1378. #define IS_MLO_DP_LINK_PEER(_peer) \
  1379. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1380. /* is MLO connection mld peer */
  1381. #define IS_MLO_DP_MLD_PEER(_peer) \
  1382. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1383. /* Get Mld peer from link peer */
  1384. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1385. ((link_peer)->mld_peer)
  1386. #ifdef WLAN_MLO_MULTI_CHIP
  1387. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  1388. {
  1389. if (soc->arch_ops.mlo_get_chip_id)
  1390. return soc->arch_ops.mlo_get_chip_id(soc);
  1391. return 0;
  1392. }
  1393. static inline struct dp_peer *
  1394. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1395. uint8_t *peer_mac_addr,
  1396. int mac_addr_is_aligned,
  1397. uint8_t vdev_id,
  1398. uint8_t chip_id,
  1399. enum dp_mod_id mod_id)
  1400. {
  1401. if (soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id)
  1402. return soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id
  1403. (soc, peer_mac_addr,
  1404. mac_addr_is_aligned,
  1405. vdev_id, chip_id,
  1406. mod_id);
  1407. return NULL;
  1408. }
  1409. #else
  1410. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  1411. {
  1412. return 0;
  1413. }
  1414. static inline struct dp_peer *
  1415. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1416. uint8_t *peer_mac_addr,
  1417. int mac_addr_is_aligned,
  1418. uint8_t vdev_id,
  1419. uint8_t chip_id,
  1420. enum dp_mod_id mod_id)
  1421. {
  1422. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1423. mac_addr_is_aligned,
  1424. vdev_id, mod_id);
  1425. }
  1426. #endif
  1427. /**
  1428. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1429. * matching mac_address
  1430. * @soc: soc handle
  1431. * @peer_mac_addr: mld peer mac address
  1432. * @mac_addr_is_aligned: is mac addr aligned
  1433. * @vdev_id: vdev_id
  1434. * @mod_id: id of module requesting reference
  1435. *
  1436. * Return: peer in success
  1437. * NULL in failure
  1438. */
  1439. static inline
  1440. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1441. uint8_t *peer_mac_addr,
  1442. int mac_addr_is_aligned,
  1443. uint8_t vdev_id,
  1444. enum dp_mod_id mod_id)
  1445. {
  1446. if (soc->arch_ops.mlo_peer_find_hash_find)
  1447. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1448. peer_mac_addr,
  1449. mac_addr_is_aligned,
  1450. mod_id, vdev_id);
  1451. return NULL;
  1452. }
  1453. /**
  1454. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1455. * peer_type
  1456. * @soc: DP SOC handle
  1457. * @peer_info: peer information for hash find
  1458. * @mod_id: ID of module requesting reference
  1459. *
  1460. * Return: peer handle
  1461. */
  1462. static inline
  1463. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1464. struct cdp_peer_info *peer_info,
  1465. enum dp_mod_id mod_id)
  1466. {
  1467. struct dp_peer *peer = NULL;
  1468. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1469. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1470. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1471. peer_info->mac_addr_is_aligned,
  1472. peer_info->vdev_id,
  1473. mod_id);
  1474. if (peer)
  1475. return peer;
  1476. }
  1477. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1478. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1479. peer = dp_mld_peer_find_hash_find(
  1480. soc, peer_info->mac_addr,
  1481. peer_info->mac_addr_is_aligned,
  1482. peer_info->vdev_id,
  1483. mod_id);
  1484. return peer;
  1485. }
  1486. /**
  1487. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1488. * increase mld peer ref_cnt
  1489. * @link_peer: link peer pointer
  1490. * @mld_peer: mld peer pointer
  1491. *
  1492. * Return: none
  1493. */
  1494. static inline
  1495. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1496. struct dp_peer *mld_peer)
  1497. {
  1498. /* increase mld_peer ref_cnt */
  1499. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1500. link_peer->mld_peer = mld_peer;
  1501. }
  1502. /**
  1503. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1504. * decrease mld peer ref_cnt
  1505. * @link_peer: link peer pointer
  1506. *
  1507. * Return: None
  1508. */
  1509. static inline
  1510. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1511. {
  1512. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1513. link_peer->mld_peer = NULL;
  1514. }
  1515. /**
  1516. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1517. * @mld_peer: mld peer pointer
  1518. *
  1519. * Return: None
  1520. */
  1521. static inline
  1522. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1523. {
  1524. int i;
  1525. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1526. mld_peer->num_links = 0;
  1527. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1528. mld_peer->link_peers[i].is_valid = false;
  1529. }
  1530. /**
  1531. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1532. * @mld_peer: mld peer pointer
  1533. *
  1534. * Return: None
  1535. */
  1536. static inline
  1537. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1538. {
  1539. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1540. }
  1541. /**
  1542. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1543. * @mld_peer: mld dp peer pointer
  1544. * @link_peer: link dp peer pointer
  1545. *
  1546. * Return: None
  1547. */
  1548. static inline
  1549. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1550. struct dp_peer *link_peer)
  1551. {
  1552. int i;
  1553. struct dp_peer_link_info *link_peer_info;
  1554. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1555. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1556. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1557. link_peer_info = &mld_peer->link_peers[i];
  1558. if (!link_peer_info->is_valid) {
  1559. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1560. link_peer->mac_addr.raw,
  1561. QDF_MAC_ADDR_SIZE);
  1562. link_peer_info->is_valid = true;
  1563. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1564. link_peer_info->chip_id =
  1565. dp_get_chip_id(link_peer->vdev->pdev->soc);
  1566. mld_peer->num_links++;
  1567. break;
  1568. }
  1569. }
  1570. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1571. dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1572. "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1573. "idx %u num_links %u",
  1574. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1575. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1576. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1577. i, mld_peer->num_links);
  1578. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
  1579. mld_peer, link_peer, i,
  1580. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1581. }
  1582. /**
  1583. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1584. * @mld_peer: MLD dp peer pointer
  1585. * @link_peer: link dp peer pointer
  1586. *
  1587. * Return: number of links left after deletion
  1588. */
  1589. static inline
  1590. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1591. struct dp_peer *link_peer)
  1592. {
  1593. int i;
  1594. struct dp_peer_link_info *link_peer_info;
  1595. uint8_t num_links;
  1596. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1597. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1598. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1599. link_peer_info = &mld_peer->link_peers[i];
  1600. if (link_peer_info->is_valid &&
  1601. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1602. &link_peer_info->mac_addr)) {
  1603. link_peer_info->is_valid = false;
  1604. mld_peer->num_links--;
  1605. break;
  1606. }
  1607. }
  1608. num_links = mld_peer->num_links;
  1609. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1610. dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1611. "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1612. "idx %u num_links %u",
  1613. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1614. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1615. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1616. i, mld_peer->num_links);
  1617. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
  1618. mld_peer, link_peer, i,
  1619. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1620. return num_links;
  1621. }
  1622. /**
  1623. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1624. * increase link peers ref_cnt
  1625. * @soc: dp_soc handle
  1626. * @mld_peer: dp mld peer pointer
  1627. * @mld_link_peers: structure that hold links peers pointer array and number
  1628. * @mod_id: id of module requesting reference
  1629. *
  1630. * Return: None
  1631. */
  1632. static inline
  1633. void dp_get_link_peers_ref_from_mld_peer(
  1634. struct dp_soc *soc,
  1635. struct dp_peer *mld_peer,
  1636. struct dp_mld_link_peers *mld_link_peers,
  1637. enum dp_mod_id mod_id)
  1638. {
  1639. struct dp_peer *peer;
  1640. uint8_t i = 0, j = 0;
  1641. struct dp_peer_link_info *link_peer_info;
  1642. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1643. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1644. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1645. link_peer_info = &mld_peer->link_peers[i];
  1646. if (link_peer_info->is_valid) {
  1647. peer = dp_link_peer_hash_find_by_chip_id(
  1648. soc,
  1649. link_peer_info->mac_addr.raw,
  1650. true,
  1651. link_peer_info->vdev_id,
  1652. link_peer_info->chip_id,
  1653. mod_id);
  1654. if (peer)
  1655. mld_link_peers->link_peers[j++] = peer;
  1656. }
  1657. }
  1658. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1659. mld_link_peers->num_links = j;
  1660. }
  1661. /**
  1662. * dp_release_link_peers_ref() - release all link peers reference
  1663. * @mld_link_peers: structure that hold links peers pointer array and number
  1664. * @mod_id: id of module requesting reference
  1665. *
  1666. * Return: None.
  1667. */
  1668. static inline
  1669. void dp_release_link_peers_ref(
  1670. struct dp_mld_link_peers *mld_link_peers,
  1671. enum dp_mod_id mod_id)
  1672. {
  1673. struct dp_peer *peer;
  1674. uint8_t i;
  1675. for (i = 0; i < mld_link_peers->num_links; i++) {
  1676. peer = mld_link_peers->link_peers[i];
  1677. if (peer)
  1678. dp_peer_unref_delete(peer, mod_id);
  1679. mld_link_peers->link_peers[i] = NULL;
  1680. }
  1681. mld_link_peers->num_links = 0;
  1682. }
  1683. /**
  1684. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1685. * @soc: Datapath soc handle
  1686. * @peer_id: peer id
  1687. * @lmac_id: lmac id to find the link peer on given lmac
  1688. *
  1689. * Return: peer_id of link peer if found
  1690. * else return HTT_INVALID_PEER
  1691. */
  1692. static inline
  1693. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1694. uint8_t lmac_id)
  1695. {
  1696. uint8_t i;
  1697. struct dp_peer *peer;
  1698. struct dp_peer *link_peer;
  1699. struct dp_soc *link_peer_soc;
  1700. struct dp_mld_link_peers link_peers_info;
  1701. uint16_t link_peer_id = HTT_INVALID_PEER;
  1702. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1703. if (!peer)
  1704. return HTT_INVALID_PEER;
  1705. if (IS_MLO_DP_MLD_PEER(peer)) {
  1706. /* get link peers with reference */
  1707. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1708. DP_MOD_ID_CDP);
  1709. for (i = 0; i < link_peers_info.num_links; i++) {
  1710. link_peer = link_peers_info.link_peers[i];
  1711. link_peer_soc = link_peer->vdev->pdev->soc;
  1712. if ((link_peer_soc == soc) &&
  1713. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1714. link_peer_id = link_peer->peer_id;
  1715. break;
  1716. }
  1717. }
  1718. /* release link peers reference */
  1719. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1720. } else {
  1721. link_peer_id = peer_id;
  1722. }
  1723. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1724. return link_peer_id;
  1725. }
  1726. /**
  1727. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1728. * @soc: soc handle
  1729. * @peer_mac: peer mac address
  1730. * @mac_addr_is_aligned: is mac addr aligned
  1731. * @vdev_id: vdev_id
  1732. * @mod_id: id of module requesting reference
  1733. *
  1734. * for MLO connection, get corresponding MLD peer,
  1735. * otherwise get link peer for non-MLO case.
  1736. *
  1737. * Return: peer in success
  1738. * NULL in failure
  1739. */
  1740. static inline
  1741. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1742. uint8_t *peer_mac,
  1743. int mac_addr_is_aligned,
  1744. uint8_t vdev_id,
  1745. enum dp_mod_id mod_id)
  1746. {
  1747. struct dp_peer *ta_peer = NULL;
  1748. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1749. peer_mac, 0, vdev_id,
  1750. mod_id);
  1751. if (peer) {
  1752. /* mlo connection link peer, get mld peer with reference */
  1753. if (IS_MLO_DP_LINK_PEER(peer)) {
  1754. /* increase mld peer ref_cnt */
  1755. if (QDF_STATUS_SUCCESS ==
  1756. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1757. ta_peer = peer->mld_peer;
  1758. else
  1759. ta_peer = NULL;
  1760. /* release peer reference that added by hash find */
  1761. dp_peer_unref_delete(peer, mod_id);
  1762. } else {
  1763. /* mlo MLD peer or non-mlo link peer */
  1764. ta_peer = peer;
  1765. }
  1766. } else {
  1767. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT " vdev_id: %u",
  1768. QDF_MAC_ADDR_REF(peer_mac), vdev_id);
  1769. }
  1770. return ta_peer;
  1771. }
  1772. /**
  1773. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1774. * @soc: core DP soc context
  1775. * @peer_id: peer id from peer object can be retrieved
  1776. * @mod_id: ID of module requesting reference
  1777. *
  1778. * for MLO connection, get corresponding MLD peer,
  1779. * otherwise get link peer for non-MLO case.
  1780. *
  1781. * Return: peer in success
  1782. * NULL in failure
  1783. */
  1784. static inline
  1785. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1786. uint16_t peer_id,
  1787. enum dp_mod_id mod_id)
  1788. {
  1789. struct dp_peer *ta_peer = NULL;
  1790. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1791. if (peer) {
  1792. /* mlo connection link peer, get mld peer with reference */
  1793. if (IS_MLO_DP_LINK_PEER(peer)) {
  1794. /* increase mld peer ref_cnt */
  1795. if (QDF_STATUS_SUCCESS ==
  1796. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1797. ta_peer = peer->mld_peer;
  1798. else
  1799. ta_peer = NULL;
  1800. /* release peer reference that added by hash find */
  1801. dp_peer_unref_delete(peer, mod_id);
  1802. } else {
  1803. /* mlo MLD peer or non-mlo link peer */
  1804. ta_peer = peer;
  1805. }
  1806. }
  1807. return ta_peer;
  1808. }
  1809. /**
  1810. * dp_peer_mlo_delete() - peer MLO related delete operation
  1811. * @peer: DP peer handle
  1812. * Return: None
  1813. */
  1814. static inline
  1815. void dp_peer_mlo_delete(struct dp_peer *peer)
  1816. {
  1817. struct dp_peer *ml_peer;
  1818. struct dp_soc *soc;
  1819. dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
  1820. QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
  1821. /* MLO connection link peer */
  1822. if (IS_MLO_DP_LINK_PEER(peer)) {
  1823. ml_peer = peer->mld_peer;
  1824. soc = ml_peer->vdev->pdev->soc;
  1825. /* if last link peer deletion, delete MLD peer */
  1826. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1827. dp_peer_delete(soc, peer->mld_peer, NULL);
  1828. }
  1829. }
  1830. /**
  1831. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1832. * @soc: Soc handle
  1833. * @peer: DP peer handle
  1834. * @vdev_id: Vdev ID
  1835. * @setup_info: peer setup information for MLO
  1836. */
  1837. QDF_STATUS dp_peer_mlo_setup(
  1838. struct dp_soc *soc,
  1839. struct dp_peer *peer,
  1840. uint8_t vdev_id,
  1841. struct cdp_peer_setup_info *setup_info);
  1842. /**
  1843. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1844. * @peer: datapath peer
  1845. *
  1846. * Return: MLD peer in case of MLO Link peer
  1847. * Peer itself in other cases
  1848. */
  1849. static inline
  1850. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1851. {
  1852. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1853. }
  1854. /**
  1855. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1856. * peer id
  1857. * @soc: core DP soc context
  1858. * @peer_id: peer id
  1859. * @mod_id: ID of module requesting reference
  1860. *
  1861. * Return: primary link peer for the MLO peer
  1862. * legacy peer itself in case of legacy peer
  1863. */
  1864. static inline
  1865. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1866. uint16_t peer_id,
  1867. enum dp_mod_id mod_id)
  1868. {
  1869. uint8_t i;
  1870. struct dp_mld_link_peers link_peers_info;
  1871. struct dp_peer *peer;
  1872. struct dp_peer *link_peer;
  1873. struct dp_peer *primary_peer = NULL;
  1874. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1875. if (!peer)
  1876. return NULL;
  1877. if (IS_MLO_DP_MLD_PEER(peer)) {
  1878. /* get link peers with reference */
  1879. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1880. mod_id);
  1881. for (i = 0; i < link_peers_info.num_links; i++) {
  1882. link_peer = link_peers_info.link_peers[i];
  1883. if (link_peer->primary_link) {
  1884. /*
  1885. * Take additional reference over
  1886. * primary link peer.
  1887. */
  1888. if (QDF_STATUS_SUCCESS ==
  1889. dp_peer_get_ref(NULL, link_peer, mod_id))
  1890. primary_peer = link_peer;
  1891. break;
  1892. }
  1893. }
  1894. /* release link peers reference */
  1895. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1896. dp_peer_unref_delete(peer, mod_id);
  1897. } else {
  1898. primary_peer = peer;
  1899. }
  1900. return primary_peer;
  1901. }
  1902. /**
  1903. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1904. * @peer: Datapath peer
  1905. *
  1906. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1907. * dp_txrx_peer from peer itself for other cases
  1908. */
  1909. static inline
  1910. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1911. {
  1912. return IS_MLO_DP_LINK_PEER(peer) ?
  1913. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1914. }
  1915. /**
  1916. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1917. * @peer: Datapath peer
  1918. *
  1919. * Return: true if peer is primary link peer or legacy peer
  1920. * false otherwise
  1921. */
  1922. static inline
  1923. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1924. {
  1925. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1926. return true;
  1927. else if (IS_DP_LEGACY_PEER(peer))
  1928. return true;
  1929. else
  1930. return false;
  1931. }
  1932. /**
  1933. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1934. *
  1935. * @soc: core DP soc context
  1936. * @peer_id: peer id from peer object can be retrieved
  1937. * @handle: reference handle
  1938. * @mod_id: ID of module requesting reference
  1939. *
  1940. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1941. */
  1942. static inline struct dp_txrx_peer *
  1943. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1944. uint16_t peer_id,
  1945. dp_txrx_ref_handle *handle,
  1946. enum dp_mod_id mod_id)
  1947. {
  1948. struct dp_peer *peer;
  1949. struct dp_txrx_peer *txrx_peer;
  1950. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1951. if (!peer)
  1952. return NULL;
  1953. txrx_peer = dp_get_txrx_peer(peer);
  1954. if (txrx_peer) {
  1955. *handle = (dp_txrx_ref_handle)peer;
  1956. return txrx_peer;
  1957. }
  1958. dp_peer_unref_delete(peer, mod_id);
  1959. return NULL;
  1960. }
  1961. /**
  1962. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1963. *
  1964. * @soc: core DP soc context
  1965. *
  1966. * Return: void
  1967. */
  1968. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1969. /**
  1970. * dp_get_peer_link_id() - Get Link peer Link ID
  1971. * @peer: Datapath peer
  1972. *
  1973. * Return: Link peer Link ID
  1974. */
  1975. uint8_t dp_get_peer_link_id(struct dp_peer *peer);
  1976. #else
  1977. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1978. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1979. /* is legacy peer */
  1980. #define IS_DP_LEGACY_PEER(_peer) true
  1981. #define IS_MLO_DP_LINK_PEER(_peer) false
  1982. #define IS_MLO_DP_MLD_PEER(_peer) false
  1983. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1984. static inline
  1985. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1986. struct cdp_peer_info *peer_info,
  1987. enum dp_mod_id mod_id)
  1988. {
  1989. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1990. peer_info->mac_addr_is_aligned,
  1991. peer_info->vdev_id,
  1992. mod_id);
  1993. }
  1994. static inline
  1995. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1996. uint8_t *peer_mac,
  1997. int mac_addr_is_aligned,
  1998. uint8_t vdev_id,
  1999. enum dp_mod_id mod_id)
  2000. {
  2001. return dp_peer_find_hash_find(soc, peer_mac,
  2002. mac_addr_is_aligned, vdev_id,
  2003. mod_id);
  2004. }
  2005. static inline
  2006. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  2007. uint16_t peer_id,
  2008. enum dp_mod_id mod_id)
  2009. {
  2010. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  2011. }
  2012. static inline
  2013. QDF_STATUS dp_peer_mlo_setup(
  2014. struct dp_soc *soc,
  2015. struct dp_peer *peer,
  2016. uint8_t vdev_id,
  2017. struct cdp_peer_setup_info *setup_info)
  2018. {
  2019. return QDF_STATUS_SUCCESS;
  2020. }
  2021. static inline
  2022. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  2023. {
  2024. }
  2025. static inline
  2026. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  2027. {
  2028. }
  2029. static inline
  2030. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  2031. {
  2032. }
  2033. static inline
  2034. void dp_peer_mlo_delete(struct dp_peer *peer)
  2035. {
  2036. }
  2037. static inline
  2038. void dp_mlo_peer_authorize(struct dp_soc *soc,
  2039. struct dp_peer *link_peer)
  2040. {
  2041. }
  2042. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  2043. {
  2044. return 0;
  2045. }
  2046. static inline struct dp_peer *
  2047. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  2048. uint8_t *peer_mac_addr,
  2049. int mac_addr_is_aligned,
  2050. uint8_t vdev_id,
  2051. uint8_t chip_id,
  2052. enum dp_mod_id mod_id)
  2053. {
  2054. return dp_peer_find_hash_find(soc, peer_mac_addr,
  2055. mac_addr_is_aligned,
  2056. vdev_id, mod_id);
  2057. }
  2058. static inline
  2059. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  2060. {
  2061. return peer;
  2062. }
  2063. static inline
  2064. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  2065. uint16_t peer_id,
  2066. enum dp_mod_id mod_id)
  2067. {
  2068. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  2069. }
  2070. static inline
  2071. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  2072. {
  2073. return peer->txrx_peer;
  2074. }
  2075. static inline
  2076. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  2077. {
  2078. return true;
  2079. }
  2080. /**
  2081. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  2082. *
  2083. * @soc: core DP soc context
  2084. * @peer_id: peer id from peer object can be retrieved
  2085. * @handle: reference handle
  2086. * @mod_id: ID of module requesting reference
  2087. *
  2088. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  2089. */
  2090. static inline struct dp_txrx_peer *
  2091. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  2092. uint16_t peer_id,
  2093. dp_txrx_ref_handle *handle,
  2094. enum dp_mod_id mod_id)
  2095. {
  2096. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  2097. }
  2098. static inline
  2099. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  2100. uint8_t lmac_id)
  2101. {
  2102. return peer_id;
  2103. }
  2104. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  2105. {
  2106. }
  2107. static inline uint8_t dp_get_peer_link_id(struct dp_peer *peer)
  2108. {
  2109. return 0;
  2110. }
  2111. #endif /* WLAN_FEATURE_11BE_MLO */
  2112. static inline
  2113. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  2114. {
  2115. uint8_t i;
  2116. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  2117. sizeof(struct dp_rx_tid_defrag));
  2118. for (i = 0; i < DP_MAX_TIDS; i++)
  2119. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2120. }
  2121. static inline
  2122. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  2123. {
  2124. uint8_t i;
  2125. for (i = 0; i < DP_MAX_TIDS; i++)
  2126. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2127. }
  2128. #ifdef PEER_CACHE_RX_PKTS
  2129. static inline
  2130. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2131. {
  2132. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  2133. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  2134. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  2135. DP_RX_CACHED_BUFQ_THRESH);
  2136. }
  2137. static inline
  2138. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2139. {
  2140. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  2141. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  2142. }
  2143. #else
  2144. static inline
  2145. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2146. {
  2147. }
  2148. static inline
  2149. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2150. {
  2151. }
  2152. #endif
  2153. /**
  2154. * dp_peer_update_state() - update dp peer state
  2155. *
  2156. * @soc: core DP soc context
  2157. * @peer: DP peer
  2158. * @state: new state
  2159. *
  2160. * Return: None
  2161. */
  2162. static inline void
  2163. dp_peer_update_state(struct dp_soc *soc,
  2164. struct dp_peer *peer,
  2165. enum dp_peer_state state)
  2166. {
  2167. uint8_t peer_state;
  2168. qdf_spin_lock_bh(&peer->peer_state_lock);
  2169. peer_state = peer->peer_state;
  2170. switch (state) {
  2171. case DP_PEER_STATE_INIT:
  2172. DP_PEER_STATE_ASSERT
  2173. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  2174. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  2175. break;
  2176. case DP_PEER_STATE_ACTIVE:
  2177. DP_PEER_STATE_ASSERT(peer, state,
  2178. (peer_state == DP_PEER_STATE_INIT));
  2179. break;
  2180. case DP_PEER_STATE_LOGICAL_DELETE:
  2181. DP_PEER_STATE_ASSERT(peer, state,
  2182. (peer_state == DP_PEER_STATE_ACTIVE) ||
  2183. (peer_state == DP_PEER_STATE_INIT));
  2184. break;
  2185. case DP_PEER_STATE_INACTIVE:
  2186. if (IS_MLO_DP_MLD_PEER(peer))
  2187. DP_PEER_STATE_ASSERT
  2188. (peer, state,
  2189. (peer_state == DP_PEER_STATE_ACTIVE));
  2190. else
  2191. DP_PEER_STATE_ASSERT
  2192. (peer, state,
  2193. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2194. break;
  2195. case DP_PEER_STATE_FREED:
  2196. if (peer->sta_self_peer)
  2197. DP_PEER_STATE_ASSERT
  2198. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  2199. else
  2200. DP_PEER_STATE_ASSERT
  2201. (peer, state,
  2202. (peer_state == DP_PEER_STATE_INACTIVE) ||
  2203. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2204. break;
  2205. default:
  2206. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2207. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  2208. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2209. return;
  2210. }
  2211. peer->peer_state = state;
  2212. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2213. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  2214. peer_state, state,
  2215. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2216. }
  2217. /**
  2218. * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer
  2219. * list based on type of peer (Legacy or MLD peer)
  2220. *
  2221. * @vdev: DP vdev context
  2222. * @func: function to be called for each peer
  2223. * @arg: argument need to be passed to func
  2224. * @mod_id: module_id
  2225. * @peer_type: type of peer - MLO Link Peer or Legacy Peer
  2226. *
  2227. * Return: void
  2228. */
  2229. static inline void
  2230. dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev,
  2231. dp_peer_iter_func *func,
  2232. void *arg, enum dp_mod_id mod_id,
  2233. enum dp_peer_type peer_type)
  2234. {
  2235. struct dp_peer *peer;
  2236. struct dp_peer *tmp_peer;
  2237. struct dp_soc *soc = NULL;
  2238. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  2239. return;
  2240. soc = vdev->pdev->soc;
  2241. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2242. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  2243. peer_list_elem,
  2244. tmp_peer) {
  2245. if (dp_peer_get_ref(soc, peer, mod_id) ==
  2246. QDF_STATUS_SUCCESS) {
  2247. if ((peer_type == DP_PEER_TYPE_LEGACY &&
  2248. (IS_DP_LEGACY_PEER(peer))) ||
  2249. (peer_type == DP_PEER_TYPE_MLO_LINK &&
  2250. (IS_MLO_DP_LINK_PEER(peer)))) {
  2251. (*func)(soc, peer, arg);
  2252. }
  2253. dp_peer_unref_delete(peer, mod_id);
  2254. }
  2255. }
  2256. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2257. }
  2258. #ifdef REO_SHARED_QREF_TABLE_EN
  2259. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2260. struct dp_peer *peer);
  2261. #else
  2262. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2263. struct dp_peer *peer) {}
  2264. #endif
  2265. /**
  2266. * dp_peer_check_wds_ext_peer() - Check WDS ext peer
  2267. *
  2268. * @peer: DP peer
  2269. *
  2270. * Return: True for WDS ext peer, false otherwise
  2271. */
  2272. bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
  2273. /**
  2274. * dp_gen_ml_peer_id() - Generate MLD peer id for DP
  2275. *
  2276. * @soc: DP soc context
  2277. * @peer_id: mld peer id
  2278. *
  2279. * Return: DP MLD peer id
  2280. */
  2281. uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
  2282. #ifdef FEATURE_AST
  2283. /**
  2284. * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
  2285. * @soc: SoC handle
  2286. * @peer_id: peer id from firmware
  2287. * @mac_addr: MAC address of ast node
  2288. * @hw_peer_id: HW AST Index returned by target in peer map event
  2289. * @vdev_id: vdev id for VAP to which the peer belongs to
  2290. * @ast_hash: ast hash value in HW
  2291. * @is_wds: flag to indicate peer map event for WDS ast entry
  2292. *
  2293. * Return: QDF_STATUS code
  2294. */
  2295. QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
  2296. uint8_t *mac_addr, uint16_t hw_peer_id,
  2297. uint8_t vdev_id, uint16_t ast_hash,
  2298. uint8_t is_wds);
  2299. #endif
  2300. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  2301. /**
  2302. * dp_map_link_id_band: Set link id to band mapping in txrx_peer
  2303. * @peer: dp peer pointer
  2304. *
  2305. * Return: None
  2306. */
  2307. void dp_map_link_id_band(struct dp_peer *peer);
  2308. #else
  2309. static inline
  2310. void dp_map_link_id_band(struct dp_peer *peer)
  2311. {
  2312. }
  2313. #endif
  2314. #endif /* _DP_PEER_H_ */