dp_peer.h 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #include "dp_internal.h"
  25. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  26. #include "hal_reo.h"
  27. #endif
  28. #define DP_INVALID_PEER_ID 0xffff
  29. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  30. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  31. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  32. #define DP_PEER_HASH_LOAD_MULT 2
  33. #define DP_PEER_HASH_LOAD_SHIFT 0
  34. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  35. #define DP_RX_CACHED_BUFQ_THRESH 64
  36. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  39. #define dp_peer_info(params...) \
  40. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  41. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  42. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  43. /**
  44. * enum dp_bands - WiFi Band
  45. *
  46. * @DP_BAND_INVALID: Invalid band
  47. * @DP_BAND_2GHZ: 2GHz link
  48. * @DP_BAND_5GHZ: 5GHz link
  49. * @DP_BAND_6GHZ: 6GHz link
  50. * @DP_BAND_UNKNOWN: Unknown band
  51. */
  52. enum dp_bands {
  53. DP_BAND_INVALID = 0,
  54. DP_BAND_2GHZ = 1,
  55. DP_BAND_5GHZ = 2,
  56. DP_BAND_6GHZ = 3,
  57. DP_BAND_UNKNOWN = 4,
  58. };
  59. /**
  60. * dp_freq_to_band() - Convert frequency to band
  61. * @freq: peer frequency
  62. *
  63. * Return: band for input frequency
  64. */
  65. enum dp_bands dp_freq_to_band(qdf_freq_t freq);
  66. #endif
  67. void check_free_list_for_invalid_flush(struct dp_soc *soc);
  68. static inline
  69. void add_entry_alloc_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid,
  70. struct dp_peer *peer, void *hw_qdesc_vaddr)
  71. {
  72. uint32_t max_list_size;
  73. unsigned long curr_ts = qdf_get_system_timestamp();
  74. uint32_t qref_index = soc->free_addr_list_idx;
  75. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  76. if (max_list_size == 0)
  77. return;
  78. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_paddr =
  79. rx_tid->hw_qdesc_paddr;
  80. soc->list_qdesc_addr_alloc[qref_index].ts_qdesc_mem_hdl = curr_ts;
  81. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_align =
  82. hw_qdesc_vaddr;
  83. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_unalign =
  84. rx_tid->hw_qdesc_vaddr_unaligned;
  85. soc->list_qdesc_addr_alloc[qref_index].peer_id = peer->peer_id;
  86. soc->list_qdesc_addr_alloc[qref_index].tid = rx_tid->tid;
  87. soc->alloc_addr_list_idx++;
  88. if (soc->alloc_addr_list_idx == max_list_size)
  89. soc->alloc_addr_list_idx = 0;
  90. }
  91. static inline
  92. void add_entry_free_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid)
  93. {
  94. uint32_t max_list_size;
  95. unsigned long curr_ts = qdf_get_system_timestamp();
  96. uint32_t qref_index = soc->free_addr_list_idx;
  97. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  98. if (max_list_size == 0)
  99. return;
  100. soc->list_qdesc_addr_free[qref_index].ts_qdesc_mem_hdl = curr_ts;
  101. soc->list_qdesc_addr_free[qref_index].hw_qdesc_paddr =
  102. rx_tid->hw_qdesc_paddr;
  103. soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_align =
  104. rx_tid->hw_qdesc_vaddr_aligned;
  105. soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_unalign =
  106. rx_tid->hw_qdesc_vaddr_unaligned;
  107. soc->free_addr_list_idx++;
  108. if (soc->free_addr_list_idx == max_list_size)
  109. soc->free_addr_list_idx = 0;
  110. }
  111. static inline
  112. void add_entry_write_list(struct dp_soc *soc, struct dp_peer *peer,
  113. uint32_t tid)
  114. {
  115. uint32_t max_list_size;
  116. unsigned long curr_ts = qdf_get_system_timestamp();
  117. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  118. if (max_list_size == 0)
  119. return;
  120. soc->reo_write_list[soc->write_paddr_list_idx].ts_qaddr_del = curr_ts;
  121. soc->reo_write_list[soc->write_paddr_list_idx].peer_id = peer->peer_id;
  122. soc->reo_write_list[soc->write_paddr_list_idx].paddr =
  123. peer->rx_tid[tid].hw_qdesc_paddr;
  124. soc->reo_write_list[soc->write_paddr_list_idx].tid = tid;
  125. soc->write_paddr_list_idx++;
  126. if (soc->write_paddr_list_idx == max_list_size)
  127. soc->write_paddr_list_idx = 0;
  128. }
  129. #ifdef REO_QDESC_HISTORY
  130. enum reo_qdesc_event_type {
  131. REO_QDESC_UPDATE_CB = 0,
  132. REO_QDESC_FREE,
  133. };
  134. struct reo_qdesc_event {
  135. qdf_dma_addr_t qdesc_addr;
  136. uint64_t ts;
  137. enum reo_qdesc_event_type type;
  138. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  139. };
  140. #endif
  141. struct ast_del_ctxt {
  142. bool age;
  143. int del_count;
  144. };
  145. #ifdef QCA_SUPPORT_WDS_EXTENDED
  146. /**
  147. * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
  148. *
  149. * @peer: DP peer context
  150. *
  151. * This API checks whether the peer is WDS_EXT peer or not
  152. *
  153. * Return: true in the wds_ext peer else flase
  154. */
  155. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  156. {
  157. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  158. }
  159. #else
  160. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  161. {
  162. return false;
  163. }
  164. #endif
  165. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  166. void *arg);
  167. /**
  168. * dp_peer_unref_delete() - unref and delete peer
  169. * @peer: Datapath peer handle
  170. * @id: ID of module releasing reference
  171. *
  172. */
  173. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  174. /**
  175. * dp_txrx_peer_unref_delete() - unref and delete peer
  176. * @handle: Datapath txrx ref handle
  177. * @id: Module ID of the caller
  178. *
  179. */
  180. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  181. /**
  182. * dp_peer_find_hash_find() - returns legacy or mlo link peer from
  183. * peer_hash_table matching vdev_id and mac_address
  184. * @soc: soc handle
  185. * @peer_mac_addr: peer mac address
  186. * @mac_addr_is_aligned: is mac addr aligned
  187. * @vdev_id: vdev_id
  188. * @mod_id: id of module requesting reference
  189. *
  190. * return: peer in success
  191. * NULL in failure
  192. */
  193. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  194. uint8_t *peer_mac_addr,
  195. int mac_addr_is_aligned,
  196. uint8_t vdev_id,
  197. enum dp_mod_id mod_id);
  198. /**
  199. * dp_peer_find_by_id_valid - check if peer exists for given id
  200. * @soc: core DP soc context
  201. * @peer_id: peer id from peer object can be retrieved
  202. *
  203. * Return: true if peer exists of false otherwise
  204. */
  205. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  206. /**
  207. * dp_peer_get_ref() - Returns peer object given the peer id
  208. *
  209. * @soc: core DP soc context
  210. * @peer: DP peer
  211. * @mod_id: id of module requesting the reference
  212. *
  213. * Return: QDF_STATUS_SUCCESS if reference held successfully
  214. * else QDF_STATUS_E_INVAL
  215. */
  216. static inline
  217. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  218. struct dp_peer *peer,
  219. enum dp_mod_id mod_id)
  220. {
  221. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  222. return QDF_STATUS_E_INVAL;
  223. if (mod_id > DP_MOD_ID_RX)
  224. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  225. return QDF_STATUS_SUCCESS;
  226. }
  227. /**
  228. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  229. *
  230. * @soc: core DP soc context
  231. * @peer_id: peer id from peer object can be retrieved
  232. * @mod_id: module id
  233. *
  234. * Return: struct dp_peer*: Pointer to DP peer object
  235. */
  236. static inline struct dp_peer *
  237. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  238. uint16_t peer_id,
  239. enum dp_mod_id mod_id)
  240. {
  241. struct dp_peer *peer;
  242. qdf_spin_lock_bh(&soc->peer_map_lock);
  243. peer = (peer_id >= soc->max_peer_id) ? NULL :
  244. soc->peer_id_to_obj_map[peer_id];
  245. if (!peer ||
  246. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  247. qdf_spin_unlock_bh(&soc->peer_map_lock);
  248. return NULL;
  249. }
  250. qdf_spin_unlock_bh(&soc->peer_map_lock);
  251. return peer;
  252. }
  253. /**
  254. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  255. * if peer state is active
  256. *
  257. * @soc: core DP soc context
  258. * @peer_id: peer id from peer object can be retrieved
  259. * @mod_id: ID of module requesting reference
  260. *
  261. * Return: struct dp_peer*: Pointer to DP peer object
  262. */
  263. static inline
  264. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  265. uint16_t peer_id,
  266. enum dp_mod_id mod_id)
  267. {
  268. struct dp_peer *peer;
  269. qdf_spin_lock_bh(&soc->peer_map_lock);
  270. peer = (peer_id >= soc->max_peer_id) ? NULL :
  271. soc->peer_id_to_obj_map[peer_id];
  272. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  273. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  274. qdf_spin_unlock_bh(&soc->peer_map_lock);
  275. return NULL;
  276. }
  277. qdf_spin_unlock_bh(&soc->peer_map_lock);
  278. return peer;
  279. }
  280. /**
  281. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  282. *
  283. * @soc: core DP soc context
  284. * @peer_id: peer id from peer object can be retrieved
  285. * @handle: reference handle
  286. * @mod_id: ID of module requesting reference
  287. *
  288. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  289. */
  290. static inline struct dp_txrx_peer *
  291. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  292. uint16_t peer_id,
  293. dp_txrx_ref_handle *handle,
  294. enum dp_mod_id mod_id)
  295. {
  296. struct dp_peer *peer;
  297. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  298. if (!peer)
  299. return NULL;
  300. if (!peer->txrx_peer) {
  301. dp_peer_unref_delete(peer, mod_id);
  302. return NULL;
  303. }
  304. *handle = (dp_txrx_ref_handle)peer;
  305. return peer->txrx_peer;
  306. }
  307. #ifdef PEER_CACHE_RX_PKTS
  308. /**
  309. * dp_rx_flush_rx_cached() - flush cached rx frames
  310. * @peer: peer
  311. * @drop: set flag to drop frames
  312. *
  313. * Return: None
  314. */
  315. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  316. #else
  317. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  318. {
  319. }
  320. #endif
  321. static inline void
  322. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  323. {
  324. qdf_spin_lock_bh(&peer->peer_info_lock);
  325. peer->state = OL_TXRX_PEER_STATE_DISC;
  326. qdf_spin_unlock_bh(&peer->peer_info_lock);
  327. dp_rx_flush_rx_cached(peer, true);
  328. }
  329. /**
  330. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  331. *
  332. * @vdev: DP vdev context
  333. * @func: function to be called for each peer
  334. * @arg: argument need to be passed to func
  335. * @mod_id: module_id
  336. *
  337. * Return: void
  338. */
  339. static inline void
  340. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  341. enum dp_mod_id mod_id)
  342. {
  343. struct dp_peer *peer;
  344. struct dp_peer *tmp_peer;
  345. struct dp_soc *soc = NULL;
  346. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  347. return;
  348. soc = vdev->pdev->soc;
  349. qdf_spin_lock_bh(&vdev->peer_list_lock);
  350. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  351. peer_list_elem,
  352. tmp_peer) {
  353. if (dp_peer_get_ref(soc, peer, mod_id) ==
  354. QDF_STATUS_SUCCESS) {
  355. (*func)(soc, peer, arg);
  356. dp_peer_unref_delete(peer, mod_id);
  357. }
  358. }
  359. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  360. }
  361. /**
  362. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  363. *
  364. * @pdev: DP pdev context
  365. * @func: function to be called for each peer
  366. * @arg: argument need to be passed to func
  367. * @mod_id: module_id
  368. *
  369. * Return: void
  370. */
  371. static inline void
  372. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  373. enum dp_mod_id mod_id)
  374. {
  375. struct dp_vdev *vdev;
  376. if (!pdev)
  377. return;
  378. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  379. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  380. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  381. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  382. }
  383. /**
  384. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  385. *
  386. * @soc: DP soc context
  387. * @func: function to be called for each peer
  388. * @arg: argument need to be passed to func
  389. * @mod_id: module_id
  390. *
  391. * Return: void
  392. */
  393. static inline void
  394. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  395. enum dp_mod_id mod_id)
  396. {
  397. struct dp_pdev *pdev;
  398. int i;
  399. if (!soc)
  400. return;
  401. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  402. pdev = soc->pdev_list[i];
  403. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  404. }
  405. }
  406. /**
  407. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  408. *
  409. * This API will cache the peers in local allocated memory and calls
  410. * iterate function outside the lock.
  411. *
  412. * As this API is allocating new memory it is suggested to use this
  413. * only when lock cannot be held
  414. *
  415. * @vdev: DP vdev context
  416. * @func: function to be called for each peer
  417. * @arg: argument need to be passed to func
  418. * @mod_id: module_id
  419. *
  420. * Return: void
  421. */
  422. static inline void
  423. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  424. dp_peer_iter_func *func,
  425. void *arg,
  426. enum dp_mod_id mod_id)
  427. {
  428. struct dp_peer *peer;
  429. struct dp_peer *tmp_peer;
  430. struct dp_soc *soc = NULL;
  431. struct dp_peer **peer_array = NULL;
  432. int i = 0;
  433. uint32_t num_peers = 0;
  434. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  435. return;
  436. num_peers = vdev->num_peers;
  437. soc = vdev->pdev->soc;
  438. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  439. if (!peer_array)
  440. return;
  441. qdf_spin_lock_bh(&vdev->peer_list_lock);
  442. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  443. peer_list_elem,
  444. tmp_peer) {
  445. if (i >= num_peers)
  446. break;
  447. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  448. peer_array[i] = peer;
  449. i = (i + 1);
  450. }
  451. }
  452. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  453. for (i = 0; i < num_peers; i++) {
  454. peer = peer_array[i];
  455. if (!peer)
  456. continue;
  457. (*func)(soc, peer, arg);
  458. dp_peer_unref_delete(peer, mod_id);
  459. }
  460. qdf_mem_free(peer_array);
  461. }
  462. /**
  463. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  464. *
  465. * This API will cache the peers in local allocated memory and calls
  466. * iterate function outside the lock.
  467. *
  468. * As this API is allocating new memory it is suggested to use this
  469. * only when lock cannot be held
  470. *
  471. * @pdev: DP pdev context
  472. * @func: function to be called for each peer
  473. * @arg: argument need to be passed to func
  474. * @mod_id: module_id
  475. *
  476. * Return: void
  477. */
  478. static inline void
  479. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  480. dp_peer_iter_func *func,
  481. void *arg,
  482. enum dp_mod_id mod_id)
  483. {
  484. struct dp_peer *peer;
  485. struct dp_peer *tmp_peer;
  486. struct dp_soc *soc = NULL;
  487. struct dp_vdev *vdev = NULL;
  488. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  489. int i = 0;
  490. int j = 0;
  491. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  492. if (!pdev || !pdev->soc)
  493. return;
  494. soc = pdev->soc;
  495. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  496. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  497. num_peers[i] = vdev->num_peers;
  498. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  499. sizeof(struct dp_peer *));
  500. if (!peer_array[i])
  501. break;
  502. qdf_spin_lock_bh(&vdev->peer_list_lock);
  503. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  504. peer_list_elem,
  505. tmp_peer) {
  506. if (j >= num_peers[i])
  507. break;
  508. if (dp_peer_get_ref(soc, peer, mod_id) ==
  509. QDF_STATUS_SUCCESS) {
  510. peer_array[i][j] = peer;
  511. j = (j + 1);
  512. }
  513. }
  514. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  515. i = (i + 1);
  516. }
  517. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  518. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  519. if (!peer_array[i])
  520. break;
  521. for (j = 0; j < num_peers[i]; j++) {
  522. peer = peer_array[i][j];
  523. if (!peer)
  524. continue;
  525. (*func)(soc, peer, arg);
  526. dp_peer_unref_delete(peer, mod_id);
  527. }
  528. qdf_mem_free(peer_array[i]);
  529. }
  530. }
  531. /**
  532. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  533. *
  534. * This API will cache the peers in local allocated memory and calls
  535. * iterate function outside the lock.
  536. *
  537. * As this API is allocating new memory it is suggested to use this
  538. * only when lock cannot be held
  539. *
  540. * @soc: DP soc context
  541. * @func: function to be called for each peer
  542. * @arg: argument need to be passed to func
  543. * @mod_id: module_id
  544. *
  545. * Return: void
  546. */
  547. static inline void
  548. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  549. dp_peer_iter_func *func,
  550. void *arg,
  551. enum dp_mod_id mod_id)
  552. {
  553. struct dp_pdev *pdev;
  554. int i;
  555. if (!soc)
  556. return;
  557. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  558. pdev = soc->pdev_list[i];
  559. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  560. }
  561. }
  562. #ifdef DP_PEER_STATE_DEBUG
  563. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  564. do { \
  565. if (!(_condition)) { \
  566. dp_alert("Invalid state shift from %u to %u peer " \
  567. QDF_MAC_ADDR_FMT, \
  568. (_peer)->peer_state, (_new_state), \
  569. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  570. QDF_ASSERT(0); \
  571. } \
  572. } while (0)
  573. #else
  574. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  575. do { \
  576. if (!(_condition)) { \
  577. dp_alert("Invalid state shift from %u to %u peer " \
  578. QDF_MAC_ADDR_FMT, \
  579. (_peer)->peer_state, (_new_state), \
  580. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  581. } \
  582. } while (0)
  583. #endif
  584. /**
  585. * dp_peer_state_cmp() - compare dp peer state
  586. *
  587. * @peer: DP peer
  588. * @state: state
  589. *
  590. * Return: true if state matches with peer state
  591. * false if it does not match
  592. */
  593. static inline bool
  594. dp_peer_state_cmp(struct dp_peer *peer,
  595. enum dp_peer_state state)
  596. {
  597. bool is_status_equal = false;
  598. qdf_spin_lock_bh(&peer->peer_state_lock);
  599. is_status_equal = (peer->peer_state == state);
  600. qdf_spin_unlock_bh(&peer->peer_state_lock);
  601. return is_status_equal;
  602. }
  603. /**
  604. * dp_print_ast_stats() - Dump AST table contents
  605. * @soc: Datapath soc handle
  606. *
  607. * Return: void
  608. */
  609. void dp_print_ast_stats(struct dp_soc *soc);
  610. /**
  611. * dp_rx_peer_map_handler() - handle peer map event from firmware
  612. * @soc: generic soc handle
  613. * @peer_id: peer_id from firmware
  614. * @hw_peer_id: ast index for this peer
  615. * @vdev_id: vdev ID
  616. * @peer_mac_addr: mac address of the peer
  617. * @ast_hash: ast hash value
  618. * @is_wds: flag to indicate peer map event for WDS ast entry
  619. *
  620. * associate the peer_id that firmware provided with peer entry
  621. * and update the ast table in the host with the hw_peer_id.
  622. *
  623. * Return: QDF_STATUS code
  624. */
  625. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  626. uint16_t hw_peer_id, uint8_t vdev_id,
  627. uint8_t *peer_mac_addr, uint16_t ast_hash,
  628. uint8_t is_wds);
  629. /**
  630. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  631. * @soc: generic soc handle
  632. * @peer_id: peer_id from firmware
  633. * @vdev_id: vdev ID
  634. * @peer_mac_addr: mac address of the peer or wds entry
  635. * @is_wds: flag to indicate peer map event for WDS ast entry
  636. * @free_wds_count: number of wds entries freed by FW with peer delete
  637. *
  638. * Return: none
  639. */
  640. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  641. uint8_t vdev_id, uint8_t *peer_mac_addr,
  642. uint8_t is_wds, uint32_t free_wds_count);
  643. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  644. /**
  645. * dp_rx_peer_ext_evt() - handle peer extended event from firmware
  646. * @soc: DP soc handle
  647. * @info: extended evt info
  648. *
  649. *
  650. * Return: QDF_STATUS
  651. */
  652. QDF_STATUS
  653. dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info);
  654. #endif
  655. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  656. /**
  657. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  658. * @soc: dp soc pointer
  659. * @vdev_id: vdev id
  660. * @peer_mac_addr: mac address of the peer
  661. *
  662. * This function resets the roamed peer auth status and mac address
  663. * after peer map indication of same peer is received from firmware.
  664. *
  665. * Return: None
  666. */
  667. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  668. uint8_t *peer_mac_addr);
  669. #else
  670. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  671. uint8_t *peer_mac_addr)
  672. {
  673. }
  674. #endif
  675. #ifdef WLAN_FEATURE_11BE_MLO
  676. /**
  677. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  678. * @soc: generic soc handle
  679. * @peer_id: ML peer_id from firmware
  680. * @peer_mac_addr: mac address of the peer
  681. * @mlo_flow_info: MLO AST flow info
  682. * @mlo_link_info: MLO link info
  683. *
  684. * associate the ML peer_id that firmware provided with peer entry
  685. * and update the ast table in the host with the hw_peer_id.
  686. *
  687. * Return: QDF_STATUS code
  688. */
  689. QDF_STATUS
  690. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  691. uint8_t *peer_mac_addr,
  692. struct dp_mlo_flow_override_info *mlo_flow_info,
  693. struct dp_mlo_link_info *mlo_link_info);
  694. /**
  695. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  696. * @soc: generic soc handle
  697. * @peer_id: peer_id from firmware
  698. *
  699. * Return: none
  700. */
  701. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  702. #endif
  703. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  704. enum cdp_sec_type sec_type, int is_unicast,
  705. u_int32_t *michael_key, u_int32_t *rx_pn);
  706. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  707. uint16_t peer_id, uint8_t *peer_mac);
  708. /**
  709. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  710. * @soc: SoC handle
  711. * @peer: peer to which ast node belongs
  712. * @mac_addr: MAC address of ast node
  713. * @type: AST entry type
  714. * @flags: AST configuration flags
  715. *
  716. * This API is used by WDS source port learning function to
  717. * add a new AST entry into peer AST list
  718. *
  719. * Return: QDF_STATUS code
  720. */
  721. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  722. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  723. uint32_t flags);
  724. /**
  725. * dp_peer_del_ast() - Delete and free AST entry
  726. * @soc: SoC handle
  727. * @ast_entry: AST entry of the node
  728. *
  729. * This function removes the AST entry from peer and soc tables
  730. * It assumes caller has taken the ast lock to protect the access to these
  731. * tables
  732. *
  733. * Return: None
  734. */
  735. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  736. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  737. struct dp_ast_entry *ast_entry);
  738. /**
  739. * dp_peer_update_ast() - Delete and free AST entry
  740. * @soc: SoC handle
  741. * @peer: peer to which ast node belongs
  742. * @ast_entry: AST entry of the node
  743. * @flags: wds or hmwds
  744. *
  745. * This function update the AST entry to the roamed peer and soc tables
  746. * It assumes caller has taken the ast lock to protect the access to these
  747. * tables
  748. *
  749. * Return: 0 if ast entry is updated successfully
  750. * -1 failure
  751. */
  752. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  753. struct dp_ast_entry *ast_entry, uint32_t flags);
  754. /**
  755. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  756. * @soc: SoC handle
  757. * @ast_mac_addr: Mac address
  758. * @pdev_id: pdev Id
  759. *
  760. * It assumes caller has taken the ast lock to protect the access to
  761. * AST hash table
  762. *
  763. * Return: AST entry
  764. */
  765. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  766. uint8_t *ast_mac_addr,
  767. uint8_t pdev_id);
  768. /**
  769. * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
  770. * @soc: SoC handle
  771. * @ast_mac_addr: Mac address
  772. * @vdev_id: vdev Id
  773. *
  774. * It assumes caller has taken the ast lock to protect the access to
  775. * AST hash table
  776. *
  777. * Return: AST entry
  778. */
  779. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  780. uint8_t *ast_mac_addr,
  781. uint8_t vdev_id);
  782. /**
  783. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  784. * @soc: SoC handle
  785. * @ast_mac_addr: Mac address
  786. *
  787. * It assumes caller has taken the ast lock to protect the access to
  788. * AST hash table
  789. *
  790. * Return: AST entry
  791. */
  792. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  793. uint8_t *ast_mac_addr);
  794. /**
  795. * dp_peer_ast_hash_find_soc_by_type() - Find AST entry by MAC address
  796. * and AST type
  797. * @soc: SoC handle
  798. * @ast_mac_addr: Mac address
  799. * @type: AST entry type
  800. *
  801. * It assumes caller has taken the ast lock to protect the access to
  802. * AST hash table
  803. *
  804. * Return: AST entry
  805. */
  806. struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
  807. struct dp_soc *soc,
  808. uint8_t *ast_mac_addr,
  809. enum cdp_txrx_ast_entry_type type);
  810. /**
  811. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  812. * @soc: SoC handle
  813. * @ast_entry: AST entry of the node
  814. *
  815. * This function gets the pdev_id from the ast entry.
  816. *
  817. * Return: (uint8_t) pdev_id
  818. */
  819. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  820. struct dp_ast_entry *ast_entry);
  821. /**
  822. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  823. * @soc: SoC handle
  824. * @ast_entry: AST entry of the node
  825. *
  826. * This function gets the next hop from the ast entry.
  827. *
  828. * Return: (uint8_t) next_hop
  829. */
  830. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  831. struct dp_ast_entry *ast_entry);
  832. /**
  833. * dp_peer_ast_set_type() - set type from the ast entry
  834. * @soc: SoC handle
  835. * @ast_entry: AST entry of the node
  836. * @type: AST entry type
  837. *
  838. * This function sets the type in the ast entry.
  839. *
  840. * Return:
  841. */
  842. void dp_peer_ast_set_type(struct dp_soc *soc,
  843. struct dp_ast_entry *ast_entry,
  844. enum cdp_txrx_ast_entry_type type);
  845. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  846. struct dp_ast_entry *ast_entry,
  847. struct dp_peer *peer);
  848. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  849. void dp_peer_ast_send_multi_wds_del(
  850. struct dp_soc *soc, uint8_t vdev_id,
  851. struct peer_del_multi_wds_entries *wds_list);
  852. #endif
  853. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  854. struct cdp_soc *dp_soc,
  855. void *cookie,
  856. enum cdp_ast_free_status status);
  857. /**
  858. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  859. * @soc: SoC handle
  860. * @ase: Address search entry
  861. *
  862. * This function removes the AST entry from soc AST hash table
  863. * It assumes caller has taken the ast lock to protect the access to this table
  864. *
  865. * Return: None
  866. */
  867. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  868. struct dp_ast_entry *ase);
  869. /**
  870. * dp_peer_free_ast_entry() - Free up the ast entry memory
  871. * @soc: SoC handle
  872. * @ast_entry: Address search entry
  873. *
  874. * This API is used to free up the memory associated with
  875. * AST entry.
  876. *
  877. * Return: None
  878. */
  879. void dp_peer_free_ast_entry(struct dp_soc *soc,
  880. struct dp_ast_entry *ast_entry);
  881. /**
  882. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  883. * @soc: SoC handle
  884. * @ast_entry: Address search entry
  885. * @peer: peer
  886. *
  887. * This API is used to remove/unlink AST entry from the peer list
  888. * and hash list.
  889. *
  890. * Return: None
  891. */
  892. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  893. struct dp_ast_entry *ast_entry,
  894. struct dp_peer *peer);
  895. /**
  896. * dp_peer_mec_detach_entry() - Detach the MEC entry
  897. * @soc: SoC handle
  898. * @mecentry: MEC entry of the node
  899. * @ptr: pointer to free list
  900. *
  901. * The MEC entry is detached from MEC table and added to free_list
  902. * to free the object outside lock
  903. *
  904. * Return: None
  905. */
  906. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  907. void *ptr);
  908. /**
  909. * dp_peer_mec_free_list() - free the MEC entry from free_list
  910. * @soc: SoC handle
  911. * @ptr: pointer to free list
  912. *
  913. * Return: None
  914. */
  915. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  916. /**
  917. * dp_peer_mec_add_entry()
  918. * @soc: SoC handle
  919. * @vdev: vdev to which mec node belongs
  920. * @mac_addr: MAC address of mec node
  921. *
  922. * This function allocates and adds MEC entry to MEC table.
  923. * It assumes caller has taken the mec lock to protect the access to these
  924. * tables
  925. *
  926. * Return: QDF_STATUS
  927. */
  928. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  929. struct dp_vdev *vdev,
  930. uint8_t *mac_addr);
  931. /**
  932. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
  933. * within pdev
  934. * @soc: SoC handle
  935. * @pdev_id: pdev Id
  936. * @mec_mac_addr: MAC address of mec node
  937. *
  938. * It assumes caller has taken the mec_lock to protect the access to
  939. * MEC hash table
  940. *
  941. * Return: MEC entry
  942. */
  943. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  944. uint8_t pdev_id,
  945. uint8_t *mec_mac_addr);
  946. #define DP_AST_ASSERT(_condition) \
  947. do { \
  948. if (!(_condition)) { \
  949. dp_print_ast_stats(soc);\
  950. QDF_BUG(_condition); \
  951. } \
  952. } while (0)
  953. /**
  954. * dp_peer_update_inactive_time() - Update inactive time for peer
  955. * @pdev: pdev object
  956. * @tag_type: htt_tlv_tag type
  957. * @tag_buf: buf message
  958. */
  959. void
  960. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  961. uint32_t *tag_buf);
  962. #ifndef QCA_MULTIPASS_SUPPORT
  963. static inline
  964. /**
  965. * dp_peer_set_vlan_id() - set vlan_id for this peer
  966. * @cdp_soc: soc handle
  967. * @vdev_id: id of vdev object
  968. * @peer_mac: mac address
  969. * @vlan_id: vlan id for peer
  970. *
  971. * Return: void
  972. */
  973. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  974. uint8_t vdev_id, uint8_t *peer_mac,
  975. uint16_t vlan_id)
  976. {
  977. }
  978. /**
  979. * dp_set_vlan_groupkey() - set vlan map for vdev
  980. * @soc_hdl: pointer to soc
  981. * @vdev_id: id of vdev handle
  982. * @vlan_id: vlan_id
  983. * @group_key: group key for vlan
  984. *
  985. * Return: set success/failure
  986. */
  987. static inline
  988. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  989. uint16_t vlan_id, uint16_t group_key)
  990. {
  991. return QDF_STATUS_SUCCESS;
  992. }
  993. /**
  994. * dp_peer_multipass_list_init() - initialize multipass peer list
  995. * @vdev: pointer to vdev
  996. *
  997. * Return: void
  998. */
  999. static inline
  1000. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  1001. {
  1002. }
  1003. /**
  1004. * dp_peer_multipass_list_remove() - remove peer from special peer list
  1005. * @peer: peer handle
  1006. *
  1007. * Return: void
  1008. */
  1009. static inline
  1010. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  1011. {
  1012. }
  1013. #else
  1014. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  1015. uint8_t vdev_id, uint8_t *peer_mac,
  1016. uint16_t vlan_id);
  1017. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  1018. uint16_t vlan_id, uint16_t group_key);
  1019. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  1020. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  1021. #endif
  1022. #ifndef QCA_PEER_MULTIQ_SUPPORT
  1023. /**
  1024. * dp_peer_reset_flowq_map() - reset peer flowq map table
  1025. * @peer: dp peer handle
  1026. *
  1027. * Return: none
  1028. */
  1029. static inline
  1030. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  1031. {
  1032. }
  1033. /**
  1034. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  1035. * @soc_hdl: generic soc handle
  1036. * @is_wds: flag to indicate if peer is wds
  1037. * @peer_id: peer_id from htt peer map message
  1038. * @peer_mac_addr: mac address of the peer
  1039. * @ast_info: ast flow override information from peer map
  1040. *
  1041. * Return: none
  1042. */
  1043. static inline
  1044. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  1045. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  1046. struct dp_ast_flow_override_info *ast_info)
  1047. {
  1048. }
  1049. #else
  1050. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  1051. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  1052. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  1053. struct dp_ast_flow_override_info *ast_info);
  1054. #endif
  1055. #ifdef QCA_PEER_EXT_STATS
  1056. /**
  1057. * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
  1058. * @soc: DP SoC context
  1059. * @txrx_peer: DP txrx peer context
  1060. *
  1061. * Allocate the peer delay stats context
  1062. *
  1063. * Return: QDF_STATUS_SUCCESS if allocation is
  1064. * successful
  1065. */
  1066. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  1067. struct dp_txrx_peer *txrx_peer);
  1068. /**
  1069. * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
  1070. * @soc: DP SoC context
  1071. * @txrx_peer: txrx DP peer context
  1072. *
  1073. * Free the peer delay stats context
  1074. *
  1075. * Return: Void
  1076. */
  1077. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  1078. struct dp_txrx_peer *txrx_peer);
  1079. /**
  1080. * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
  1081. * @txrx_peer: dp_txrx_peer handle
  1082. *
  1083. * Return: void
  1084. */
  1085. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1086. #else
  1087. static inline
  1088. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  1089. struct dp_txrx_peer *txrx_peer)
  1090. {
  1091. return QDF_STATUS_SUCCESS;
  1092. }
  1093. static inline
  1094. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  1095. struct dp_txrx_peer *txrx_peer)
  1096. {
  1097. }
  1098. static inline
  1099. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1100. {
  1101. }
  1102. #endif
  1103. #ifdef WLAN_PEER_JITTER
  1104. /**
  1105. * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
  1106. * @pdev: Datapath pdev handle
  1107. * @txrx_peer: dp_txrx_peer handle
  1108. *
  1109. * Return: QDF_STATUS
  1110. */
  1111. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1112. struct dp_txrx_peer *txrx_peer);
  1113. /**
  1114. * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
  1115. * @pdev: Datapath pdev handle
  1116. * @txrx_peer: dp_txrx_peer handle
  1117. *
  1118. * Return: void
  1119. */
  1120. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1121. struct dp_txrx_peer *txrx_peer);
  1122. /**
  1123. * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
  1124. * @txrx_peer: dp_txrx_peer handle
  1125. *
  1126. * Return: void
  1127. */
  1128. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1129. #else
  1130. static inline
  1131. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1132. struct dp_txrx_peer *txrx_peer)
  1133. {
  1134. return QDF_STATUS_SUCCESS;
  1135. }
  1136. static inline
  1137. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1138. struct dp_txrx_peer *txrx_peer)
  1139. {
  1140. }
  1141. static inline
  1142. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1143. {
  1144. }
  1145. #endif
  1146. #ifndef CONFIG_SAWF_DEF_QUEUES
  1147. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  1148. struct dp_peer *peer)
  1149. {
  1150. return QDF_STATUS_SUCCESS;
  1151. }
  1152. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  1153. struct dp_peer *peer)
  1154. {
  1155. return QDF_STATUS_SUCCESS;
  1156. }
  1157. #endif
  1158. #ifndef CONFIG_SAWF
  1159. static inline
  1160. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  1161. struct dp_txrx_peer *txrx_peer)
  1162. {
  1163. return QDF_STATUS_SUCCESS;
  1164. }
  1165. static inline
  1166. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  1167. struct dp_txrx_peer *txrx_peer)
  1168. {
  1169. return QDF_STATUS_SUCCESS;
  1170. }
  1171. #endif
  1172. /**
  1173. * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
  1174. * @soc: DP soc
  1175. * @vdev: vdev
  1176. * @mod_id: id of module requesting reference
  1177. *
  1178. * Return: VDEV BSS peer
  1179. */
  1180. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  1181. struct dp_vdev *vdev,
  1182. enum dp_mod_id mod_id);
  1183. /**
  1184. * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
  1185. * @soc: DP soc
  1186. * @vdev: vdev
  1187. * @mod_id: id of module requesting reference
  1188. *
  1189. * Return: VDEV self peer
  1190. */
  1191. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  1192. struct dp_vdev *vdev,
  1193. enum dp_mod_id mod_id);
  1194. void dp_peer_ast_table_detach(struct dp_soc *soc);
  1195. /**
  1196. * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
  1197. * @soc: soc handle
  1198. *
  1199. * Return: none
  1200. */
  1201. void dp_peer_find_map_detach(struct dp_soc *soc);
  1202. void dp_soc_wds_detach(struct dp_soc *soc);
  1203. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  1204. /**
  1205. * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
  1206. * @soc: soc handle
  1207. * @mac_addr: MAC address to be used to find peer
  1208. * @vdev_id: VDEV id
  1209. * @mod_id: MODULE ID
  1210. *
  1211. * Return: struct dp_peer
  1212. */
  1213. struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
  1214. uint8_t vdev_id, enum dp_mod_id mod_id);
  1215. /**
  1216. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  1217. * @soc: SoC handle
  1218. *
  1219. * Return: QDF_STATUS
  1220. */
  1221. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  1222. /**
  1223. * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
  1224. * @soc: SoC handle
  1225. *
  1226. * Return: QDF_STATUS
  1227. */
  1228. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  1229. /**
  1230. * dp_del_wds_entry_wrapper() - delete a WDS AST entry
  1231. * @soc: DP soc structure pointer
  1232. * @vdev_id: vdev_id
  1233. * @wds_macaddr: MAC address of ast node
  1234. * @type: type from enum cdp_txrx_ast_entry_type
  1235. * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
  1236. *
  1237. * This API is used to delete an AST entry from fw
  1238. *
  1239. * Return: None
  1240. */
  1241. void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
  1242. uint8_t *wds_macaddr, uint8_t type,
  1243. uint8_t delete_in_fw);
  1244. void dp_soc_wds_attach(struct dp_soc *soc);
  1245. /**
  1246. * dp_peer_mec_hash_detach() - Free MEC Hash table
  1247. * @soc: SoC handle
  1248. *
  1249. * Return: None
  1250. */
  1251. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  1252. /**
  1253. * dp_peer_ast_hash_detach() - Free AST Hash table
  1254. * @soc: SoC handle
  1255. *
  1256. * Return: None
  1257. */
  1258. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  1259. #ifdef FEATURE_AST
  1260. /**
  1261. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  1262. * @soc: datapath soc handle
  1263. * @peer: datapath peer handle
  1264. *
  1265. * Delete the AST entries belonging to a peer
  1266. */
  1267. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1268. struct dp_peer *peer)
  1269. {
  1270. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  1271. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  1272. /*
  1273. * Delete peer self ast entry. This is done to handle scenarios
  1274. * where peer is freed before peer map is received(for ex in case
  1275. * of auth disallow due to ACL) in such cases self ast is not added
  1276. * to peer->ast_list.
  1277. */
  1278. if (peer->self_ast_entry) {
  1279. dp_peer_del_ast(soc, peer->self_ast_entry);
  1280. peer->self_ast_entry = NULL;
  1281. }
  1282. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  1283. dp_peer_del_ast(soc, ast_entry);
  1284. }
  1285. /**
  1286. * dp_print_peer_ast_entries() - Dump AST entries of peer
  1287. * @soc: Datapath soc handle
  1288. * @peer: Datapath peer
  1289. * @arg: argument to iterate function
  1290. *
  1291. * Return: void
  1292. */
  1293. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  1294. void *arg);
  1295. #else
  1296. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  1297. struct dp_peer *peer, void *arg)
  1298. {
  1299. }
  1300. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1301. struct dp_peer *peer)
  1302. {
  1303. }
  1304. #endif
  1305. #ifdef FEATURE_MEC
  1306. /**
  1307. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  1308. * @soc: SoC handle
  1309. *
  1310. * Return: none
  1311. */
  1312. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  1313. /**
  1314. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  1315. * @soc: SoC handle
  1316. *
  1317. * Return: none
  1318. */
  1319. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  1320. /**
  1321. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  1322. * @soc: Datapath SOC
  1323. *
  1324. * Return: None
  1325. */
  1326. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  1327. #else
  1328. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  1329. {
  1330. }
  1331. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  1332. {
  1333. }
  1334. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  1335. {
  1336. }
  1337. #endif
  1338. static inline int dp_peer_find_mac_addr_cmp(
  1339. union dp_align_mac_addr *mac_addr1,
  1340. union dp_align_mac_addr *mac_addr2)
  1341. {
  1342. /*
  1343. * Intentionally use & rather than &&.
  1344. * because the operands are binary rather than generic boolean,
  1345. * the functionality is equivalent.
  1346. * Using && has the advantage of short-circuited evaluation,
  1347. * but using & has the advantage of no conditional branching,
  1348. * which is a more significant benefit.
  1349. */
  1350. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1351. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1352. }
  1353. /**
  1354. * dp_peer_delete() - delete DP peer
  1355. *
  1356. * @soc: Datatpath soc
  1357. * @peer: Datapath peer
  1358. * @arg: argument to iter function
  1359. *
  1360. * Return: void
  1361. */
  1362. void dp_peer_delete(struct dp_soc *soc,
  1363. struct dp_peer *peer,
  1364. void *arg);
  1365. /**
  1366. * dp_mlo_peer_delete() - delete MLO DP peer
  1367. *
  1368. * @soc: Datapath soc
  1369. * @peer: Datapath peer
  1370. * @arg: argument to iter function
  1371. *
  1372. * Return: void
  1373. */
  1374. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  1375. #ifdef WLAN_FEATURE_11BE_MLO
  1376. /* is MLO connection mld peer */
  1377. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1378. /* set peer type */
  1379. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1380. ((_peer)->peer_type = (_type_val))
  1381. /* is legacy peer */
  1382. #define IS_DP_LEGACY_PEER(_peer) \
  1383. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1384. /* is MLO connection link peer */
  1385. #define IS_MLO_DP_LINK_PEER(_peer) \
  1386. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1387. /* is MLO connection mld peer */
  1388. #define IS_MLO_DP_MLD_PEER(_peer) \
  1389. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1390. /* Get Mld peer from link peer */
  1391. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1392. ((link_peer)->mld_peer)
  1393. #ifdef WLAN_MLO_MULTI_CHIP
  1394. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  1395. {
  1396. if (soc->arch_ops.mlo_get_chip_id)
  1397. return soc->arch_ops.mlo_get_chip_id(soc);
  1398. return 0;
  1399. }
  1400. static inline struct dp_peer *
  1401. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1402. uint8_t *peer_mac_addr,
  1403. int mac_addr_is_aligned,
  1404. uint8_t vdev_id,
  1405. uint8_t chip_id,
  1406. enum dp_mod_id mod_id)
  1407. {
  1408. if (soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id)
  1409. return soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id
  1410. (soc, peer_mac_addr,
  1411. mac_addr_is_aligned,
  1412. vdev_id, chip_id,
  1413. mod_id);
  1414. return NULL;
  1415. }
  1416. #else
  1417. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  1418. {
  1419. return 0;
  1420. }
  1421. static inline struct dp_peer *
  1422. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1423. uint8_t *peer_mac_addr,
  1424. int mac_addr_is_aligned,
  1425. uint8_t vdev_id,
  1426. uint8_t chip_id,
  1427. enum dp_mod_id mod_id)
  1428. {
  1429. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1430. mac_addr_is_aligned,
  1431. vdev_id, mod_id);
  1432. }
  1433. #endif
  1434. /**
  1435. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1436. * matching mac_address
  1437. * @soc: soc handle
  1438. * @peer_mac_addr: mld peer mac address
  1439. * @mac_addr_is_aligned: is mac addr aligned
  1440. * @vdev_id: vdev_id
  1441. * @mod_id: id of module requesting reference
  1442. *
  1443. * Return: peer in success
  1444. * NULL in failure
  1445. */
  1446. static inline
  1447. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1448. uint8_t *peer_mac_addr,
  1449. int mac_addr_is_aligned,
  1450. uint8_t vdev_id,
  1451. enum dp_mod_id mod_id)
  1452. {
  1453. if (soc->arch_ops.mlo_peer_find_hash_find)
  1454. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1455. peer_mac_addr,
  1456. mac_addr_is_aligned,
  1457. mod_id, vdev_id);
  1458. return NULL;
  1459. }
  1460. /**
  1461. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1462. * peer_type
  1463. * @soc: DP SOC handle
  1464. * @peer_info: peer information for hash find
  1465. * @mod_id: ID of module requesting reference
  1466. *
  1467. * Return: peer handle
  1468. */
  1469. static inline
  1470. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1471. struct cdp_peer_info *peer_info,
  1472. enum dp_mod_id mod_id)
  1473. {
  1474. struct dp_peer *peer = NULL;
  1475. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1476. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1477. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1478. peer_info->mac_addr_is_aligned,
  1479. peer_info->vdev_id,
  1480. mod_id);
  1481. if (peer)
  1482. return peer;
  1483. }
  1484. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1485. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1486. peer = dp_mld_peer_find_hash_find(
  1487. soc, peer_info->mac_addr,
  1488. peer_info->mac_addr_is_aligned,
  1489. peer_info->vdev_id,
  1490. mod_id);
  1491. return peer;
  1492. }
  1493. /**
  1494. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1495. * increase mld peer ref_cnt
  1496. * @link_peer: link peer pointer
  1497. * @mld_peer: mld peer pointer
  1498. *
  1499. * Return: none
  1500. */
  1501. static inline
  1502. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1503. struct dp_peer *mld_peer)
  1504. {
  1505. /* increase mld_peer ref_cnt */
  1506. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1507. link_peer->mld_peer = mld_peer;
  1508. }
  1509. /**
  1510. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1511. * decrease mld peer ref_cnt
  1512. * @link_peer: link peer pointer
  1513. *
  1514. * Return: None
  1515. */
  1516. static inline
  1517. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1518. {
  1519. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1520. link_peer->mld_peer = NULL;
  1521. }
  1522. /**
  1523. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1524. * @mld_peer: mld peer pointer
  1525. *
  1526. * Return: None
  1527. */
  1528. static inline
  1529. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1530. {
  1531. int i;
  1532. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1533. mld_peer->num_links = 0;
  1534. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1535. mld_peer->link_peers[i].is_valid = false;
  1536. }
  1537. /**
  1538. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1539. * @mld_peer: mld peer pointer
  1540. *
  1541. * Return: None
  1542. */
  1543. static inline
  1544. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1545. {
  1546. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1547. }
  1548. /**
  1549. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1550. * @mld_peer: mld dp peer pointer
  1551. * @link_peer: link dp peer pointer
  1552. * @is_bridge_peer: flag to indicate if peer is bridge peer
  1553. *
  1554. * Return: None
  1555. */
  1556. static inline
  1557. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1558. struct dp_peer *link_peer,
  1559. uint8_t is_bridge_peer)
  1560. {
  1561. int i;
  1562. struct dp_peer_link_info *link_peer_info;
  1563. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1564. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1565. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1566. link_peer_info = &mld_peer->link_peers[i];
  1567. if (!link_peer_info->is_valid) {
  1568. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1569. link_peer->mac_addr.raw,
  1570. QDF_MAC_ADDR_SIZE);
  1571. link_peer_info->is_valid = true;
  1572. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1573. link_peer_info->chip_id =
  1574. dp_get_chip_id(link_peer->vdev->pdev->soc);
  1575. link_peer_info->is_bridge_peer = is_bridge_peer;
  1576. mld_peer->num_links++;
  1577. break;
  1578. }
  1579. }
  1580. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1581. dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1582. "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1583. "idx %u num_links %u",
  1584. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1585. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1586. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1587. i, mld_peer->num_links);
  1588. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
  1589. mld_peer, link_peer, i,
  1590. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1591. }
  1592. /**
  1593. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1594. * @mld_peer: MLD dp peer pointer
  1595. * @link_peer: link dp peer pointer
  1596. *
  1597. * Return: number of links left after deletion
  1598. */
  1599. static inline
  1600. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1601. struct dp_peer *link_peer)
  1602. {
  1603. int i;
  1604. struct dp_peer_link_info *link_peer_info;
  1605. uint8_t num_links;
  1606. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1607. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1608. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1609. link_peer_info = &mld_peer->link_peers[i];
  1610. if (link_peer_info->is_valid &&
  1611. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1612. &link_peer_info->mac_addr)) {
  1613. link_peer_info->is_valid = false;
  1614. mld_peer->num_links--;
  1615. break;
  1616. }
  1617. }
  1618. num_links = mld_peer->num_links;
  1619. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1620. dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1621. "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1622. "idx %u num_links %u",
  1623. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1624. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1625. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1626. i, mld_peer->num_links);
  1627. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
  1628. mld_peer, link_peer, i,
  1629. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1630. return num_links;
  1631. }
  1632. /**
  1633. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1634. * increase link peers ref_cnt
  1635. * @soc: dp_soc handle
  1636. * @mld_peer: dp mld peer pointer
  1637. * @mld_link_peers: structure that hold links peers pointer array and number
  1638. * @mod_id: id of module requesting reference
  1639. *
  1640. * Return: None
  1641. */
  1642. static inline
  1643. void dp_get_link_peers_ref_from_mld_peer(
  1644. struct dp_soc *soc,
  1645. struct dp_peer *mld_peer,
  1646. struct dp_mld_link_peers *mld_link_peers,
  1647. enum dp_mod_id mod_id)
  1648. {
  1649. struct dp_peer *peer;
  1650. uint8_t i = 0, j = 0;
  1651. struct dp_peer_link_info *link_peer_info;
  1652. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1653. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1654. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1655. link_peer_info = &mld_peer->link_peers[i];
  1656. if (link_peer_info->is_valid) {
  1657. peer = dp_link_peer_hash_find_by_chip_id(
  1658. soc,
  1659. link_peer_info->mac_addr.raw,
  1660. true,
  1661. link_peer_info->vdev_id,
  1662. link_peer_info->chip_id,
  1663. mod_id);
  1664. if (peer)
  1665. mld_link_peers->link_peers[j++] = peer;
  1666. }
  1667. }
  1668. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1669. mld_link_peers->num_links = j;
  1670. }
  1671. /**
  1672. * dp_release_link_peers_ref() - release all link peers reference
  1673. * @mld_link_peers: structure that hold links peers pointer array and number
  1674. * @mod_id: id of module requesting reference
  1675. *
  1676. * Return: None.
  1677. */
  1678. static inline
  1679. void dp_release_link_peers_ref(
  1680. struct dp_mld_link_peers *mld_link_peers,
  1681. enum dp_mod_id mod_id)
  1682. {
  1683. struct dp_peer *peer;
  1684. uint8_t i;
  1685. for (i = 0; i < mld_link_peers->num_links; i++) {
  1686. peer = mld_link_peers->link_peers[i];
  1687. if (peer)
  1688. dp_peer_unref_delete(peer, mod_id);
  1689. mld_link_peers->link_peers[i] = NULL;
  1690. }
  1691. mld_link_peers->num_links = 0;
  1692. }
  1693. /**
  1694. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1695. * @soc: Datapath soc handle
  1696. * @peer_id: peer id
  1697. * @lmac_id: lmac id to find the link peer on given lmac
  1698. *
  1699. * Return: peer_id of link peer if found
  1700. * else return HTT_INVALID_PEER
  1701. */
  1702. static inline
  1703. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1704. uint8_t lmac_id)
  1705. {
  1706. uint8_t i;
  1707. struct dp_peer *peer;
  1708. struct dp_peer *link_peer;
  1709. struct dp_soc *link_peer_soc;
  1710. struct dp_mld_link_peers link_peers_info;
  1711. uint16_t link_peer_id = HTT_INVALID_PEER;
  1712. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1713. if (!peer)
  1714. return HTT_INVALID_PEER;
  1715. if (IS_MLO_DP_MLD_PEER(peer)) {
  1716. /* get link peers with reference */
  1717. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1718. DP_MOD_ID_CDP);
  1719. for (i = 0; i < link_peers_info.num_links; i++) {
  1720. link_peer = link_peers_info.link_peers[i];
  1721. link_peer_soc = link_peer->vdev->pdev->soc;
  1722. if ((link_peer_soc == soc) &&
  1723. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1724. link_peer_id = link_peer->peer_id;
  1725. break;
  1726. }
  1727. }
  1728. /* release link peers reference */
  1729. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1730. } else {
  1731. link_peer_id = peer_id;
  1732. }
  1733. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1734. return link_peer_id;
  1735. }
  1736. /**
  1737. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1738. * @soc: soc handle
  1739. * @peer_mac: peer mac address
  1740. * @mac_addr_is_aligned: is mac addr aligned
  1741. * @vdev_id: vdev_id
  1742. * @mod_id: id of module requesting reference
  1743. *
  1744. * for MLO connection, get corresponding MLD peer,
  1745. * otherwise get link peer for non-MLO case.
  1746. *
  1747. * Return: peer in success
  1748. * NULL in failure
  1749. */
  1750. static inline
  1751. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1752. uint8_t *peer_mac,
  1753. int mac_addr_is_aligned,
  1754. uint8_t vdev_id,
  1755. enum dp_mod_id mod_id)
  1756. {
  1757. struct dp_peer *ta_peer = NULL;
  1758. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1759. peer_mac, 0, vdev_id,
  1760. mod_id);
  1761. if (peer) {
  1762. /* mlo connection link peer, get mld peer with reference */
  1763. if (IS_MLO_DP_LINK_PEER(peer)) {
  1764. /* increase mld peer ref_cnt */
  1765. if (QDF_STATUS_SUCCESS ==
  1766. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1767. ta_peer = peer->mld_peer;
  1768. else
  1769. ta_peer = NULL;
  1770. /* release peer reference that added by hash find */
  1771. dp_peer_unref_delete(peer, mod_id);
  1772. } else {
  1773. /* mlo MLD peer or non-mlo link peer */
  1774. ta_peer = peer;
  1775. }
  1776. } else {
  1777. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT " vdev_id: %u",
  1778. QDF_MAC_ADDR_REF(peer_mac), vdev_id);
  1779. }
  1780. return ta_peer;
  1781. }
  1782. /**
  1783. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1784. * @soc: core DP soc context
  1785. * @peer_id: peer id from peer object can be retrieved
  1786. * @mod_id: ID of module requesting reference
  1787. *
  1788. * for MLO connection, get corresponding MLD peer,
  1789. * otherwise get link peer for non-MLO case.
  1790. *
  1791. * Return: peer in success
  1792. * NULL in failure
  1793. */
  1794. static inline
  1795. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1796. uint16_t peer_id,
  1797. enum dp_mod_id mod_id)
  1798. {
  1799. struct dp_peer *ta_peer = NULL;
  1800. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1801. if (peer) {
  1802. /* mlo connection link peer, get mld peer with reference */
  1803. if (IS_MLO_DP_LINK_PEER(peer)) {
  1804. /* increase mld peer ref_cnt */
  1805. if (QDF_STATUS_SUCCESS ==
  1806. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1807. ta_peer = peer->mld_peer;
  1808. else
  1809. ta_peer = NULL;
  1810. /* release peer reference that added by hash find */
  1811. dp_peer_unref_delete(peer, mod_id);
  1812. } else {
  1813. /* mlo MLD peer or non-mlo link peer */
  1814. ta_peer = peer;
  1815. }
  1816. }
  1817. return ta_peer;
  1818. }
  1819. /**
  1820. * dp_peer_mlo_delete() - peer MLO related delete operation
  1821. * @peer: DP peer handle
  1822. * Return: None
  1823. */
  1824. static inline
  1825. void dp_peer_mlo_delete(struct dp_peer *peer)
  1826. {
  1827. struct dp_peer *ml_peer;
  1828. struct dp_soc *soc;
  1829. dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
  1830. QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
  1831. /* MLO connection link peer */
  1832. if (IS_MLO_DP_LINK_PEER(peer)) {
  1833. ml_peer = peer->mld_peer;
  1834. soc = ml_peer->vdev->pdev->soc;
  1835. /* if last link peer deletion, delete MLD peer */
  1836. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1837. dp_peer_delete(soc, peer->mld_peer, NULL);
  1838. }
  1839. }
  1840. /**
  1841. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1842. * @soc: Soc handle
  1843. * @peer: DP peer handle
  1844. * @vdev_id: Vdev ID
  1845. * @setup_info: peer setup information for MLO
  1846. */
  1847. QDF_STATUS dp_peer_mlo_setup(
  1848. struct dp_soc *soc,
  1849. struct dp_peer *peer,
  1850. uint8_t vdev_id,
  1851. struct cdp_peer_setup_info *setup_info);
  1852. /**
  1853. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1854. * @peer: datapath peer
  1855. *
  1856. * Return: MLD peer in case of MLO Link peer
  1857. * Peer itself in other cases
  1858. */
  1859. static inline
  1860. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1861. {
  1862. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1863. }
  1864. /**
  1865. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1866. * peer id
  1867. * @soc: core DP soc context
  1868. * @peer_id: peer id
  1869. * @mod_id: ID of module requesting reference
  1870. *
  1871. * Return: primary link peer for the MLO peer
  1872. * legacy peer itself in case of legacy peer
  1873. */
  1874. static inline
  1875. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1876. uint16_t peer_id,
  1877. enum dp_mod_id mod_id)
  1878. {
  1879. uint8_t i;
  1880. struct dp_mld_link_peers link_peers_info;
  1881. struct dp_peer *peer;
  1882. struct dp_peer *link_peer;
  1883. struct dp_peer *primary_peer = NULL;
  1884. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1885. if (!peer)
  1886. return NULL;
  1887. if (IS_MLO_DP_MLD_PEER(peer)) {
  1888. /* get link peers with reference */
  1889. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1890. mod_id);
  1891. for (i = 0; i < link_peers_info.num_links; i++) {
  1892. link_peer = link_peers_info.link_peers[i];
  1893. if (link_peer->primary_link) {
  1894. /*
  1895. * Take additional reference over
  1896. * primary link peer.
  1897. */
  1898. if (QDF_STATUS_SUCCESS ==
  1899. dp_peer_get_ref(NULL, link_peer, mod_id))
  1900. primary_peer = link_peer;
  1901. break;
  1902. }
  1903. }
  1904. /* release link peers reference */
  1905. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1906. dp_peer_unref_delete(peer, mod_id);
  1907. } else {
  1908. primary_peer = peer;
  1909. }
  1910. return primary_peer;
  1911. }
  1912. /**
  1913. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1914. * @peer: Datapath peer
  1915. *
  1916. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1917. * dp_txrx_peer from peer itself for other cases
  1918. */
  1919. static inline
  1920. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1921. {
  1922. return IS_MLO_DP_LINK_PEER(peer) ?
  1923. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1924. }
  1925. /**
  1926. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1927. * @peer: Datapath peer
  1928. *
  1929. * Return: true if peer is primary link peer or legacy peer
  1930. * false otherwise
  1931. */
  1932. static inline
  1933. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1934. {
  1935. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1936. return true;
  1937. else if (IS_DP_LEGACY_PEER(peer))
  1938. return true;
  1939. else
  1940. return false;
  1941. }
  1942. /**
  1943. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1944. *
  1945. * @soc: core DP soc context
  1946. * @peer_id: peer id from peer object can be retrieved
  1947. * @handle: reference handle
  1948. * @mod_id: ID of module requesting reference
  1949. *
  1950. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1951. */
  1952. static inline struct dp_txrx_peer *
  1953. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1954. uint16_t peer_id,
  1955. dp_txrx_ref_handle *handle,
  1956. enum dp_mod_id mod_id)
  1957. {
  1958. struct dp_peer *peer;
  1959. struct dp_txrx_peer *txrx_peer;
  1960. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1961. if (!peer)
  1962. return NULL;
  1963. txrx_peer = dp_get_txrx_peer(peer);
  1964. if (txrx_peer) {
  1965. *handle = (dp_txrx_ref_handle)peer;
  1966. return txrx_peer;
  1967. }
  1968. dp_peer_unref_delete(peer, mod_id);
  1969. return NULL;
  1970. }
  1971. /**
  1972. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1973. *
  1974. * @soc: core DP soc context
  1975. *
  1976. * Return: void
  1977. */
  1978. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1979. /**
  1980. * dp_get_peer_link_id() - Get Link peer Link ID
  1981. * @peer: Datapath peer
  1982. *
  1983. * Return: Link peer Link ID
  1984. */
  1985. uint8_t dp_get_peer_link_id(struct dp_peer *peer);
  1986. #else
  1987. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1988. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1989. /* is legacy peer */
  1990. #define IS_DP_LEGACY_PEER(_peer) true
  1991. #define IS_MLO_DP_LINK_PEER(_peer) false
  1992. #define IS_MLO_DP_MLD_PEER(_peer) false
  1993. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1994. static inline
  1995. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1996. struct cdp_peer_info *peer_info,
  1997. enum dp_mod_id mod_id)
  1998. {
  1999. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  2000. peer_info->mac_addr_is_aligned,
  2001. peer_info->vdev_id,
  2002. mod_id);
  2003. }
  2004. static inline
  2005. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  2006. uint8_t *peer_mac,
  2007. int mac_addr_is_aligned,
  2008. uint8_t vdev_id,
  2009. enum dp_mod_id mod_id)
  2010. {
  2011. return dp_peer_find_hash_find(soc, peer_mac,
  2012. mac_addr_is_aligned, vdev_id,
  2013. mod_id);
  2014. }
  2015. static inline
  2016. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  2017. uint16_t peer_id,
  2018. enum dp_mod_id mod_id)
  2019. {
  2020. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  2021. }
  2022. static inline
  2023. QDF_STATUS dp_peer_mlo_setup(
  2024. struct dp_soc *soc,
  2025. struct dp_peer *peer,
  2026. uint8_t vdev_id,
  2027. struct cdp_peer_setup_info *setup_info)
  2028. {
  2029. return QDF_STATUS_SUCCESS;
  2030. }
  2031. static inline
  2032. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  2033. {
  2034. }
  2035. static inline
  2036. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  2037. {
  2038. }
  2039. static inline
  2040. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  2041. {
  2042. }
  2043. static inline
  2044. void dp_peer_mlo_delete(struct dp_peer *peer)
  2045. {
  2046. }
  2047. static inline
  2048. void dp_mlo_peer_authorize(struct dp_soc *soc,
  2049. struct dp_peer *link_peer)
  2050. {
  2051. }
  2052. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  2053. {
  2054. return 0;
  2055. }
  2056. static inline struct dp_peer *
  2057. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  2058. uint8_t *peer_mac_addr,
  2059. int mac_addr_is_aligned,
  2060. uint8_t vdev_id,
  2061. uint8_t chip_id,
  2062. enum dp_mod_id mod_id)
  2063. {
  2064. return dp_peer_find_hash_find(soc, peer_mac_addr,
  2065. mac_addr_is_aligned,
  2066. vdev_id, mod_id);
  2067. }
  2068. static inline
  2069. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  2070. {
  2071. return peer;
  2072. }
  2073. static inline
  2074. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  2075. uint16_t peer_id,
  2076. enum dp_mod_id mod_id)
  2077. {
  2078. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  2079. }
  2080. static inline
  2081. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  2082. {
  2083. return peer->txrx_peer;
  2084. }
  2085. static inline
  2086. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  2087. {
  2088. return true;
  2089. }
  2090. /**
  2091. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  2092. *
  2093. * @soc: core DP soc context
  2094. * @peer_id: peer id from peer object can be retrieved
  2095. * @handle: reference handle
  2096. * @mod_id: ID of module requesting reference
  2097. *
  2098. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  2099. */
  2100. static inline struct dp_txrx_peer *
  2101. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  2102. uint16_t peer_id,
  2103. dp_txrx_ref_handle *handle,
  2104. enum dp_mod_id mod_id)
  2105. {
  2106. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  2107. }
  2108. static inline
  2109. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  2110. uint8_t lmac_id)
  2111. {
  2112. return peer_id;
  2113. }
  2114. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  2115. {
  2116. }
  2117. static inline uint8_t dp_get_peer_link_id(struct dp_peer *peer)
  2118. {
  2119. return 0;
  2120. }
  2121. #endif /* WLAN_FEATURE_11BE_MLO */
  2122. static inline
  2123. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  2124. {
  2125. uint8_t i;
  2126. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  2127. sizeof(struct dp_rx_tid_defrag));
  2128. for (i = 0; i < DP_MAX_TIDS; i++)
  2129. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2130. }
  2131. static inline
  2132. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  2133. {
  2134. uint8_t i;
  2135. for (i = 0; i < DP_MAX_TIDS; i++)
  2136. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2137. }
  2138. #ifdef PEER_CACHE_RX_PKTS
  2139. static inline
  2140. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2141. {
  2142. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  2143. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  2144. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  2145. DP_RX_CACHED_BUFQ_THRESH);
  2146. }
  2147. static inline
  2148. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2149. {
  2150. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  2151. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  2152. }
  2153. #else
  2154. static inline
  2155. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2156. {
  2157. }
  2158. static inline
  2159. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2160. {
  2161. }
  2162. #endif
  2163. /**
  2164. * dp_peer_update_state() - update dp peer state
  2165. *
  2166. * @soc: core DP soc context
  2167. * @peer: DP peer
  2168. * @state: new state
  2169. *
  2170. * Return: None
  2171. */
  2172. static inline void
  2173. dp_peer_update_state(struct dp_soc *soc,
  2174. struct dp_peer *peer,
  2175. enum dp_peer_state state)
  2176. {
  2177. uint8_t peer_state;
  2178. qdf_spin_lock_bh(&peer->peer_state_lock);
  2179. peer_state = peer->peer_state;
  2180. switch (state) {
  2181. case DP_PEER_STATE_INIT:
  2182. DP_PEER_STATE_ASSERT
  2183. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) &&
  2184. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  2185. break;
  2186. case DP_PEER_STATE_ACTIVE:
  2187. DP_PEER_STATE_ASSERT(peer, state,
  2188. (peer_state == DP_PEER_STATE_INIT));
  2189. break;
  2190. case DP_PEER_STATE_LOGICAL_DELETE:
  2191. DP_PEER_STATE_ASSERT(peer, state,
  2192. (peer_state == DP_PEER_STATE_ACTIVE) ||
  2193. (peer_state == DP_PEER_STATE_INIT));
  2194. break;
  2195. case DP_PEER_STATE_INACTIVE:
  2196. if (IS_MLO_DP_MLD_PEER(peer))
  2197. DP_PEER_STATE_ASSERT
  2198. (peer, state,
  2199. (peer_state == DP_PEER_STATE_ACTIVE));
  2200. else
  2201. DP_PEER_STATE_ASSERT
  2202. (peer, state,
  2203. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2204. break;
  2205. case DP_PEER_STATE_FREED:
  2206. if (peer->sta_self_peer)
  2207. DP_PEER_STATE_ASSERT
  2208. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  2209. else
  2210. DP_PEER_STATE_ASSERT
  2211. (peer, state,
  2212. (peer_state == DP_PEER_STATE_INACTIVE) ||
  2213. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2214. break;
  2215. default:
  2216. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2217. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  2218. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2219. return;
  2220. }
  2221. peer->peer_state = state;
  2222. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2223. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  2224. peer_state, state,
  2225. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2226. }
  2227. /**
  2228. * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer
  2229. * list based on type of peer (Legacy or MLD peer)
  2230. *
  2231. * @vdev: DP vdev context
  2232. * @func: function to be called for each peer
  2233. * @arg: argument need to be passed to func
  2234. * @mod_id: module_id
  2235. * @peer_type: type of peer - MLO Link Peer or Legacy Peer
  2236. *
  2237. * Return: void
  2238. */
  2239. static inline void
  2240. dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev,
  2241. dp_peer_iter_func *func,
  2242. void *arg, enum dp_mod_id mod_id,
  2243. enum dp_peer_type peer_type)
  2244. {
  2245. struct dp_peer *peer;
  2246. struct dp_peer *tmp_peer;
  2247. struct dp_soc *soc = NULL;
  2248. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  2249. return;
  2250. soc = vdev->pdev->soc;
  2251. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2252. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  2253. peer_list_elem,
  2254. tmp_peer) {
  2255. if (dp_peer_get_ref(soc, peer, mod_id) ==
  2256. QDF_STATUS_SUCCESS) {
  2257. if ((peer_type == DP_PEER_TYPE_LEGACY &&
  2258. (IS_DP_LEGACY_PEER(peer))) ||
  2259. (peer_type == DP_PEER_TYPE_MLO_LINK &&
  2260. (IS_MLO_DP_LINK_PEER(peer)))) {
  2261. (*func)(soc, peer, arg);
  2262. }
  2263. dp_peer_unref_delete(peer, mod_id);
  2264. }
  2265. }
  2266. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2267. }
  2268. #ifdef REO_SHARED_QREF_TABLE_EN
  2269. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2270. struct dp_peer *peer);
  2271. #else
  2272. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2273. struct dp_peer *peer) {}
  2274. #endif
  2275. /**
  2276. * dp_peer_check_wds_ext_peer() - Check WDS ext peer
  2277. *
  2278. * @peer: DP peer
  2279. *
  2280. * Return: True for WDS ext peer, false otherwise
  2281. */
  2282. bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
  2283. /**
  2284. * dp_gen_ml_peer_id() - Generate MLD peer id for DP
  2285. *
  2286. * @soc: DP soc context
  2287. * @peer_id: mld peer id
  2288. *
  2289. * Return: DP MLD peer id
  2290. */
  2291. uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
  2292. #ifdef FEATURE_AST
  2293. /**
  2294. * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
  2295. * @soc: SoC handle
  2296. * @peer_id: peer id from firmware
  2297. * @mac_addr: MAC address of ast node
  2298. * @hw_peer_id: HW AST Index returned by target in peer map event
  2299. * @vdev_id: vdev id for VAP to which the peer belongs to
  2300. * @ast_hash: ast hash value in HW
  2301. * @is_wds: flag to indicate peer map event for WDS ast entry
  2302. *
  2303. * Return: QDF_STATUS code
  2304. */
  2305. QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
  2306. uint8_t *mac_addr, uint16_t hw_peer_id,
  2307. uint8_t vdev_id, uint16_t ast_hash,
  2308. uint8_t is_wds);
  2309. #endif
  2310. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  2311. /**
  2312. * dp_map_link_id_band: Set link id to band mapping in txrx_peer
  2313. * @peer: dp peer pointer
  2314. *
  2315. * Return: None
  2316. */
  2317. void dp_map_link_id_band(struct dp_peer *peer);
  2318. #else
  2319. static inline
  2320. void dp_map_link_id_band(struct dp_peer *peer)
  2321. {
  2322. }
  2323. #endif
  2324. #endif /* _DP_PEER_H_ */