dp_peer.h 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #include "dp_internal.h"
  25. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  26. #include "hal_reo.h"
  27. #endif
  28. #define DP_INVALID_PEER_ID 0xffff
  29. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  30. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  31. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  32. #define DP_PEER_HASH_LOAD_MULT 2
  33. #define DP_PEER_HASH_LOAD_SHIFT 0
  34. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  35. #define DP_RX_CACHED_BUFQ_THRESH 64
  36. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  39. #define dp_peer_info(params...) \
  40. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  41. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  42. void check_free_list_for_invalid_flush(struct dp_soc *soc);
  43. static inline
  44. void add_entry_alloc_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid,
  45. struct dp_peer *peer, void *hw_qdesc_vaddr)
  46. {
  47. uint32_t max_list_size;
  48. unsigned long curr_ts = qdf_get_system_timestamp();
  49. uint32_t qref_index = soc->free_addr_list_idx;
  50. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  51. if (max_list_size == 0)
  52. return;
  53. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_paddr =
  54. rx_tid->hw_qdesc_paddr;
  55. soc->list_qdesc_addr_alloc[qref_index].ts_qdesc_mem_hdl = curr_ts;
  56. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_align =
  57. hw_qdesc_vaddr;
  58. soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_unalign =
  59. rx_tid->hw_qdesc_vaddr_unaligned;
  60. soc->list_qdesc_addr_alloc[qref_index].peer_id = peer->peer_id;
  61. soc->list_qdesc_addr_alloc[qref_index].tid = rx_tid->tid;
  62. soc->alloc_addr_list_idx++;
  63. if (soc->alloc_addr_list_idx == max_list_size)
  64. soc->alloc_addr_list_idx = 0;
  65. }
  66. static inline
  67. void add_entry_free_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid)
  68. {
  69. uint32_t max_list_size;
  70. unsigned long curr_ts = qdf_get_system_timestamp();
  71. uint32_t qref_index = soc->free_addr_list_idx;
  72. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  73. if (max_list_size == 0)
  74. return;
  75. soc->list_qdesc_addr_free[qref_index].ts_qdesc_mem_hdl = curr_ts;
  76. soc->list_qdesc_addr_free[qref_index].hw_qdesc_paddr =
  77. rx_tid->hw_qdesc_paddr;
  78. soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_align =
  79. rx_tid->hw_qdesc_vaddr_aligned;
  80. soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_unalign =
  81. rx_tid->hw_qdesc_vaddr_unaligned;
  82. soc->free_addr_list_idx++;
  83. if (soc->free_addr_list_idx == max_list_size)
  84. soc->free_addr_list_idx = 0;
  85. }
  86. static inline
  87. void add_entry_write_list(struct dp_soc *soc, struct dp_peer *peer,
  88. uint32_t tid)
  89. {
  90. uint32_t max_list_size;
  91. unsigned long curr_ts = qdf_get_system_timestamp();
  92. max_list_size = soc->wlan_cfg_ctx->qref_control_size;
  93. if (max_list_size == 0)
  94. return;
  95. soc->reo_write_list[soc->write_paddr_list_idx].ts_qaddr_del = curr_ts;
  96. soc->reo_write_list[soc->write_paddr_list_idx].peer_id = peer->peer_id;
  97. soc->reo_write_list[soc->write_paddr_list_idx].paddr =
  98. peer->rx_tid[tid].hw_qdesc_paddr;
  99. soc->reo_write_list[soc->write_paddr_list_idx].tid = tid;
  100. soc->write_paddr_list_idx++;
  101. if (soc->write_paddr_list_idx == max_list_size)
  102. soc->write_paddr_list_idx = 0;
  103. }
  104. #ifdef REO_QDESC_HISTORY
  105. enum reo_qdesc_event_type {
  106. REO_QDESC_UPDATE_CB = 0,
  107. REO_QDESC_FREE,
  108. };
  109. struct reo_qdesc_event {
  110. qdf_dma_addr_t qdesc_addr;
  111. uint64_t ts;
  112. enum reo_qdesc_event_type type;
  113. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  114. };
  115. #endif
  116. struct ast_del_ctxt {
  117. bool age;
  118. int del_count;
  119. };
  120. #ifdef QCA_SUPPORT_WDS_EXTENDED
  121. /**
  122. * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
  123. *
  124. * @peer: DP peer context
  125. *
  126. * This API checks whether the peer is WDS_EXT peer or not
  127. *
  128. * Return: true in the wds_ext peer else flase
  129. */
  130. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  131. {
  132. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  133. }
  134. #else
  135. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  136. {
  137. return false;
  138. }
  139. #endif
  140. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  141. void *arg);
  142. /**
  143. * dp_peer_unref_delete() - unref and delete peer
  144. * @peer: Datapath peer handle
  145. * @id: ID of module releasing reference
  146. *
  147. */
  148. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  149. /**
  150. * dp_txrx_peer_unref_delete() - unref and delete peer
  151. * @handle: Datapath txrx ref handle
  152. * @id: Module ID of the caller
  153. *
  154. */
  155. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  156. /**
  157. * dp_peer_find_hash_find() - returns legacy or mlo link peer from
  158. * peer_hash_table matching vdev_id and mac_address
  159. * @soc: soc handle
  160. * @peer_mac_addr: peer mac address
  161. * @mac_addr_is_aligned: is mac addr aligned
  162. * @vdev_id: vdev_id
  163. * @mod_id: id of module requesting reference
  164. *
  165. * return: peer in success
  166. * NULL in failure
  167. */
  168. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  169. uint8_t *peer_mac_addr,
  170. int mac_addr_is_aligned,
  171. uint8_t vdev_id,
  172. enum dp_mod_id mod_id);
  173. /**
  174. * dp_peer_find_by_id_valid - check if peer exists for given id
  175. * @soc: core DP soc context
  176. * @peer_id: peer id from peer object can be retrieved
  177. *
  178. * Return: true if peer exists of false otherwise
  179. */
  180. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  181. /**
  182. * dp_peer_get_ref() - Returns peer object given the peer id
  183. *
  184. * @soc: core DP soc context
  185. * @peer: DP peer
  186. * @mod_id: id of module requesting the reference
  187. *
  188. * Return: QDF_STATUS_SUCCESS if reference held successfully
  189. * else QDF_STATUS_E_INVAL
  190. */
  191. static inline
  192. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  193. struct dp_peer *peer,
  194. enum dp_mod_id mod_id)
  195. {
  196. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  197. return QDF_STATUS_E_INVAL;
  198. if (mod_id > DP_MOD_ID_RX)
  199. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  200. return QDF_STATUS_SUCCESS;
  201. }
  202. /**
  203. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  204. *
  205. * @soc: core DP soc context
  206. * @peer_id: peer id from peer object can be retrieved
  207. * @mod_id: module id
  208. *
  209. * Return: struct dp_peer*: Pointer to DP peer object
  210. */
  211. static inline struct dp_peer *
  212. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  213. uint16_t peer_id,
  214. enum dp_mod_id mod_id)
  215. {
  216. struct dp_peer *peer;
  217. qdf_spin_lock_bh(&soc->peer_map_lock);
  218. peer = (peer_id >= soc->max_peer_id) ? NULL :
  219. soc->peer_id_to_obj_map[peer_id];
  220. if (!peer ||
  221. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  222. qdf_spin_unlock_bh(&soc->peer_map_lock);
  223. return NULL;
  224. }
  225. qdf_spin_unlock_bh(&soc->peer_map_lock);
  226. return peer;
  227. }
  228. /**
  229. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  230. * if peer state is active
  231. *
  232. * @soc: core DP soc context
  233. * @peer_id: peer id from peer object can be retrieved
  234. * @mod_id: ID of module requesting reference
  235. *
  236. * Return: struct dp_peer*: Pointer to DP peer object
  237. */
  238. static inline
  239. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  240. uint16_t peer_id,
  241. enum dp_mod_id mod_id)
  242. {
  243. struct dp_peer *peer;
  244. qdf_spin_lock_bh(&soc->peer_map_lock);
  245. peer = (peer_id >= soc->max_peer_id) ? NULL :
  246. soc->peer_id_to_obj_map[peer_id];
  247. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  248. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  249. qdf_spin_unlock_bh(&soc->peer_map_lock);
  250. return NULL;
  251. }
  252. qdf_spin_unlock_bh(&soc->peer_map_lock);
  253. return peer;
  254. }
  255. /**
  256. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  257. *
  258. * @soc: core DP soc context
  259. * @peer_id: peer id from peer object can be retrieved
  260. * @handle: reference handle
  261. * @mod_id: ID of module requesting reference
  262. *
  263. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  264. */
  265. static inline struct dp_txrx_peer *
  266. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  267. uint16_t peer_id,
  268. dp_txrx_ref_handle *handle,
  269. enum dp_mod_id mod_id)
  270. {
  271. struct dp_peer *peer;
  272. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  273. if (!peer)
  274. return NULL;
  275. if (!peer->txrx_peer) {
  276. dp_peer_unref_delete(peer, mod_id);
  277. return NULL;
  278. }
  279. *handle = (dp_txrx_ref_handle)peer;
  280. return peer->txrx_peer;
  281. }
  282. #ifdef PEER_CACHE_RX_PKTS
  283. /**
  284. * dp_rx_flush_rx_cached() - flush cached rx frames
  285. * @peer: peer
  286. * @drop: set flag to drop frames
  287. *
  288. * Return: None
  289. */
  290. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  291. #else
  292. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  293. {
  294. }
  295. #endif
  296. static inline void
  297. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  298. {
  299. qdf_spin_lock_bh(&peer->peer_info_lock);
  300. peer->state = OL_TXRX_PEER_STATE_DISC;
  301. qdf_spin_unlock_bh(&peer->peer_info_lock);
  302. dp_rx_flush_rx_cached(peer, true);
  303. }
  304. /**
  305. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  306. *
  307. * @vdev: DP vdev context
  308. * @func: function to be called for each peer
  309. * @arg: argument need to be passed to func
  310. * @mod_id: module_id
  311. *
  312. * Return: void
  313. */
  314. static inline void
  315. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  316. enum dp_mod_id mod_id)
  317. {
  318. struct dp_peer *peer;
  319. struct dp_peer *tmp_peer;
  320. struct dp_soc *soc = NULL;
  321. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  322. return;
  323. soc = vdev->pdev->soc;
  324. qdf_spin_lock_bh(&vdev->peer_list_lock);
  325. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  326. peer_list_elem,
  327. tmp_peer) {
  328. if (dp_peer_get_ref(soc, peer, mod_id) ==
  329. QDF_STATUS_SUCCESS) {
  330. (*func)(soc, peer, arg);
  331. dp_peer_unref_delete(peer, mod_id);
  332. }
  333. }
  334. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  335. }
  336. /**
  337. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  338. *
  339. * @pdev: DP pdev context
  340. * @func: function to be called for each peer
  341. * @arg: argument need to be passed to func
  342. * @mod_id: module_id
  343. *
  344. * Return: void
  345. */
  346. static inline void
  347. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  348. enum dp_mod_id mod_id)
  349. {
  350. struct dp_vdev *vdev;
  351. if (!pdev)
  352. return;
  353. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  354. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  355. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  356. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  357. }
  358. /**
  359. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  360. *
  361. * @soc: DP soc context
  362. * @func: function to be called for each peer
  363. * @arg: argument need to be passed to func
  364. * @mod_id: module_id
  365. *
  366. * Return: void
  367. */
  368. static inline void
  369. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  370. enum dp_mod_id mod_id)
  371. {
  372. struct dp_pdev *pdev;
  373. int i;
  374. if (!soc)
  375. return;
  376. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  377. pdev = soc->pdev_list[i];
  378. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  379. }
  380. }
  381. /**
  382. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  383. *
  384. * This API will cache the peers in local allocated memory and calls
  385. * iterate function outside the lock.
  386. *
  387. * As this API is allocating new memory it is suggested to use this
  388. * only when lock cannot be held
  389. *
  390. * @vdev: DP vdev context
  391. * @func: function to be called for each peer
  392. * @arg: argument need to be passed to func
  393. * @mod_id: module_id
  394. *
  395. * Return: void
  396. */
  397. static inline void
  398. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  399. dp_peer_iter_func *func,
  400. void *arg,
  401. enum dp_mod_id mod_id)
  402. {
  403. struct dp_peer *peer;
  404. struct dp_peer *tmp_peer;
  405. struct dp_soc *soc = NULL;
  406. struct dp_peer **peer_array = NULL;
  407. int i = 0;
  408. uint32_t num_peers = 0;
  409. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  410. return;
  411. num_peers = vdev->num_peers;
  412. soc = vdev->pdev->soc;
  413. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  414. if (!peer_array)
  415. return;
  416. qdf_spin_lock_bh(&vdev->peer_list_lock);
  417. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  418. peer_list_elem,
  419. tmp_peer) {
  420. if (i >= num_peers)
  421. break;
  422. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  423. peer_array[i] = peer;
  424. i = (i + 1);
  425. }
  426. }
  427. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  428. for (i = 0; i < num_peers; i++) {
  429. peer = peer_array[i];
  430. if (!peer)
  431. continue;
  432. (*func)(soc, peer, arg);
  433. dp_peer_unref_delete(peer, mod_id);
  434. }
  435. qdf_mem_free(peer_array);
  436. }
  437. /**
  438. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  439. *
  440. * This API will cache the peers in local allocated memory and calls
  441. * iterate function outside the lock.
  442. *
  443. * As this API is allocating new memory it is suggested to use this
  444. * only when lock cannot be held
  445. *
  446. * @pdev: DP pdev context
  447. * @func: function to be called for each peer
  448. * @arg: argument need to be passed to func
  449. * @mod_id: module_id
  450. *
  451. * Return: void
  452. */
  453. static inline void
  454. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  455. dp_peer_iter_func *func,
  456. void *arg,
  457. enum dp_mod_id mod_id)
  458. {
  459. struct dp_peer *peer;
  460. struct dp_peer *tmp_peer;
  461. struct dp_soc *soc = NULL;
  462. struct dp_vdev *vdev = NULL;
  463. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  464. int i = 0;
  465. int j = 0;
  466. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  467. if (!pdev || !pdev->soc)
  468. return;
  469. soc = pdev->soc;
  470. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  471. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  472. num_peers[i] = vdev->num_peers;
  473. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  474. sizeof(struct dp_peer *));
  475. if (!peer_array[i])
  476. break;
  477. qdf_spin_lock_bh(&vdev->peer_list_lock);
  478. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  479. peer_list_elem,
  480. tmp_peer) {
  481. if (j >= num_peers[i])
  482. break;
  483. if (dp_peer_get_ref(soc, peer, mod_id) ==
  484. QDF_STATUS_SUCCESS) {
  485. peer_array[i][j] = peer;
  486. j = (j + 1);
  487. }
  488. }
  489. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  490. i = (i + 1);
  491. }
  492. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  493. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  494. if (!peer_array[i])
  495. break;
  496. for (j = 0; j < num_peers[i]; j++) {
  497. peer = peer_array[i][j];
  498. if (!peer)
  499. continue;
  500. (*func)(soc, peer, arg);
  501. dp_peer_unref_delete(peer, mod_id);
  502. }
  503. qdf_mem_free(peer_array[i]);
  504. }
  505. }
  506. /**
  507. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  508. *
  509. * This API will cache the peers in local allocated memory and calls
  510. * iterate function outside the lock.
  511. *
  512. * As this API is allocating new memory it is suggested to use this
  513. * only when lock cannot be held
  514. *
  515. * @soc: DP soc context
  516. * @func: function to be called for each peer
  517. * @arg: argument need to be passed to func
  518. * @mod_id: module_id
  519. *
  520. * Return: void
  521. */
  522. static inline void
  523. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  524. dp_peer_iter_func *func,
  525. void *arg,
  526. enum dp_mod_id mod_id)
  527. {
  528. struct dp_pdev *pdev;
  529. int i;
  530. if (!soc)
  531. return;
  532. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  533. pdev = soc->pdev_list[i];
  534. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  535. }
  536. }
  537. #ifdef DP_PEER_STATE_DEBUG
  538. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  539. do { \
  540. if (!(_condition)) { \
  541. dp_alert("Invalid state shift from %u to %u peer " \
  542. QDF_MAC_ADDR_FMT, \
  543. (_peer)->peer_state, (_new_state), \
  544. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  545. QDF_ASSERT(0); \
  546. } \
  547. } while (0)
  548. #else
  549. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  550. do { \
  551. if (!(_condition)) { \
  552. dp_alert("Invalid state shift from %u to %u peer " \
  553. QDF_MAC_ADDR_FMT, \
  554. (_peer)->peer_state, (_new_state), \
  555. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  556. } \
  557. } while (0)
  558. #endif
  559. /**
  560. * dp_peer_state_cmp() - compare dp peer state
  561. *
  562. * @peer: DP peer
  563. * @state: state
  564. *
  565. * Return: true if state matches with peer state
  566. * false if it does not match
  567. */
  568. static inline bool
  569. dp_peer_state_cmp(struct dp_peer *peer,
  570. enum dp_peer_state state)
  571. {
  572. bool is_status_equal = false;
  573. qdf_spin_lock_bh(&peer->peer_state_lock);
  574. is_status_equal = (peer->peer_state == state);
  575. qdf_spin_unlock_bh(&peer->peer_state_lock);
  576. return is_status_equal;
  577. }
  578. /**
  579. * dp_print_ast_stats() - Dump AST table contents
  580. * @soc: Datapath soc handle
  581. *
  582. * Return: void
  583. */
  584. void dp_print_ast_stats(struct dp_soc *soc);
  585. /**
  586. * dp_rx_peer_map_handler() - handle peer map event from firmware
  587. * @soc: generic soc handle
  588. * @peer_id: peer_id from firmware
  589. * @hw_peer_id: ast index for this peer
  590. * @vdev_id: vdev ID
  591. * @peer_mac_addr: mac address of the peer
  592. * @ast_hash: ast hash value
  593. * @is_wds: flag to indicate peer map event for WDS ast entry
  594. *
  595. * associate the peer_id that firmware provided with peer entry
  596. * and update the ast table in the host with the hw_peer_id.
  597. *
  598. * Return: QDF_STATUS code
  599. */
  600. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  601. uint16_t hw_peer_id, uint8_t vdev_id,
  602. uint8_t *peer_mac_addr, uint16_t ast_hash,
  603. uint8_t is_wds);
  604. /**
  605. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  606. * @soc: generic soc handle
  607. * @peer_id: peer_id from firmware
  608. * @vdev_id: vdev ID
  609. * @peer_mac_addr: mac address of the peer or wds entry
  610. * @is_wds: flag to indicate peer map event for WDS ast entry
  611. * @free_wds_count: number of wds entries freed by FW with peer delete
  612. *
  613. * Return: none
  614. */
  615. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  616. uint8_t vdev_id, uint8_t *peer_mac_addr,
  617. uint8_t is_wds, uint32_t free_wds_count);
  618. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  619. /**
  620. * dp_rx_peer_ext_evt() - handle peer extended event from firmware
  621. * @soc: DP soc handle
  622. * @info: extended evt info
  623. *
  624. *
  625. * Return: QDF_STATUS
  626. */
  627. QDF_STATUS
  628. dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info);
  629. #endif
  630. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  631. /**
  632. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  633. * @soc: dp soc pointer
  634. * @vdev_id: vdev id
  635. * @peer_mac_addr: mac address of the peer
  636. *
  637. * This function resets the roamed peer auth status and mac address
  638. * after peer map indication of same peer is received from firmware.
  639. *
  640. * Return: None
  641. */
  642. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  643. uint8_t *peer_mac_addr);
  644. #else
  645. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  646. uint8_t *peer_mac_addr)
  647. {
  648. }
  649. #endif
  650. #ifdef WLAN_FEATURE_11BE_MLO
  651. /**
  652. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  653. * @soc: generic soc handle
  654. * @peer_id: ML peer_id from firmware
  655. * @peer_mac_addr: mac address of the peer
  656. * @mlo_flow_info: MLO AST flow info
  657. * @mlo_link_info: MLO link info
  658. *
  659. * associate the ML peer_id that firmware provided with peer entry
  660. * and update the ast table in the host with the hw_peer_id.
  661. *
  662. * Return: QDF_STATUS code
  663. */
  664. QDF_STATUS
  665. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  666. uint8_t *peer_mac_addr,
  667. struct dp_mlo_flow_override_info *mlo_flow_info,
  668. struct dp_mlo_link_info *mlo_link_info);
  669. /**
  670. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  671. * @soc: generic soc handle
  672. * @peer_id: peer_id from firmware
  673. *
  674. * Return: none
  675. */
  676. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  677. #endif
  678. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  679. enum cdp_sec_type sec_type, int is_unicast,
  680. u_int32_t *michael_key, u_int32_t *rx_pn);
  681. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  682. uint16_t peer_id, uint8_t *peer_mac);
  683. /**
  684. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  685. * @soc: SoC handle
  686. * @peer: peer to which ast node belongs
  687. * @mac_addr: MAC address of ast node
  688. * @type: AST entry type
  689. * @flags: AST configuration flags
  690. *
  691. * This API is used by WDS source port learning function to
  692. * add a new AST entry into peer AST list
  693. *
  694. * Return: QDF_STATUS code
  695. */
  696. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  697. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  698. uint32_t flags);
  699. /**
  700. * dp_peer_del_ast() - Delete and free AST entry
  701. * @soc: SoC handle
  702. * @ast_entry: AST entry of the node
  703. *
  704. * This function removes the AST entry from peer and soc tables
  705. * It assumes caller has taken the ast lock to protect the access to these
  706. * tables
  707. *
  708. * Return: None
  709. */
  710. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  711. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  712. struct dp_ast_entry *ast_entry);
  713. /**
  714. * dp_peer_update_ast() - Delete and free AST entry
  715. * @soc: SoC handle
  716. * @peer: peer to which ast node belongs
  717. * @ast_entry: AST entry of the node
  718. * @flags: wds or hmwds
  719. *
  720. * This function update the AST entry to the roamed peer and soc tables
  721. * It assumes caller has taken the ast lock to protect the access to these
  722. * tables
  723. *
  724. * Return: 0 if ast entry is updated successfully
  725. * -1 failure
  726. */
  727. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  728. struct dp_ast_entry *ast_entry, uint32_t flags);
  729. /**
  730. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  731. * @soc: SoC handle
  732. * @ast_mac_addr: Mac address
  733. * @pdev_id: pdev Id
  734. *
  735. * It assumes caller has taken the ast lock to protect the access to
  736. * AST hash table
  737. *
  738. * Return: AST entry
  739. */
  740. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  741. uint8_t *ast_mac_addr,
  742. uint8_t pdev_id);
  743. /**
  744. * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
  745. * @soc: SoC handle
  746. * @ast_mac_addr: Mac address
  747. * @vdev_id: vdev Id
  748. *
  749. * It assumes caller has taken the ast lock to protect the access to
  750. * AST hash table
  751. *
  752. * Return: AST entry
  753. */
  754. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  755. uint8_t *ast_mac_addr,
  756. uint8_t vdev_id);
  757. /**
  758. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  759. * @soc: SoC handle
  760. * @ast_mac_addr: Mac address
  761. *
  762. * It assumes caller has taken the ast lock to protect the access to
  763. * AST hash table
  764. *
  765. * Return: AST entry
  766. */
  767. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  768. uint8_t *ast_mac_addr);
  769. /**
  770. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  771. * @soc: SoC handle
  772. * @ast_entry: AST entry of the node
  773. *
  774. * This function gets the pdev_id from the ast entry.
  775. *
  776. * Return: (uint8_t) pdev_id
  777. */
  778. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  779. struct dp_ast_entry *ast_entry);
  780. /**
  781. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  782. * @soc: SoC handle
  783. * @ast_entry: AST entry of the node
  784. *
  785. * This function gets the next hop from the ast entry.
  786. *
  787. * Return: (uint8_t) next_hop
  788. */
  789. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  790. struct dp_ast_entry *ast_entry);
  791. /**
  792. * dp_peer_ast_set_type() - set type from the ast entry
  793. * @soc: SoC handle
  794. * @ast_entry: AST entry of the node
  795. * @type: AST entry type
  796. *
  797. * This function sets the type in the ast entry.
  798. *
  799. * Return:
  800. */
  801. void dp_peer_ast_set_type(struct dp_soc *soc,
  802. struct dp_ast_entry *ast_entry,
  803. enum cdp_txrx_ast_entry_type type);
  804. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  805. struct dp_ast_entry *ast_entry,
  806. struct dp_peer *peer);
  807. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  808. void dp_peer_ast_send_multi_wds_del(
  809. struct dp_soc *soc, uint8_t vdev_id,
  810. struct peer_del_multi_wds_entries *wds_list);
  811. #endif
  812. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  813. struct cdp_soc *dp_soc,
  814. void *cookie,
  815. enum cdp_ast_free_status status);
  816. /**
  817. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  818. * @soc: SoC handle
  819. * @ase: Address search entry
  820. *
  821. * This function removes the AST entry from soc AST hash table
  822. * It assumes caller has taken the ast lock to protect the access to this table
  823. *
  824. * Return: None
  825. */
  826. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  827. struct dp_ast_entry *ase);
  828. /**
  829. * dp_peer_free_ast_entry() - Free up the ast entry memory
  830. * @soc: SoC handle
  831. * @ast_entry: Address search entry
  832. *
  833. * This API is used to free up the memory associated with
  834. * AST entry.
  835. *
  836. * Return: None
  837. */
  838. void dp_peer_free_ast_entry(struct dp_soc *soc,
  839. struct dp_ast_entry *ast_entry);
  840. /**
  841. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  842. * @soc: SoC handle
  843. * @ast_entry: Address search entry
  844. * @peer: peer
  845. *
  846. * This API is used to remove/unlink AST entry from the peer list
  847. * and hash list.
  848. *
  849. * Return: None
  850. */
  851. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  852. struct dp_ast_entry *ast_entry,
  853. struct dp_peer *peer);
  854. /**
  855. * dp_peer_mec_detach_entry() - Detach the MEC entry
  856. * @soc: SoC handle
  857. * @mecentry: MEC entry of the node
  858. * @ptr: pointer to free list
  859. *
  860. * The MEC entry is detached from MEC table and added to free_list
  861. * to free the object outside lock
  862. *
  863. * Return: None
  864. */
  865. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  866. void *ptr);
  867. /**
  868. * dp_peer_mec_free_list() - free the MEC entry from free_list
  869. * @soc: SoC handle
  870. * @ptr: pointer to free list
  871. *
  872. * Return: None
  873. */
  874. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  875. /**
  876. * dp_peer_mec_add_entry()
  877. * @soc: SoC handle
  878. * @vdev: vdev to which mec node belongs
  879. * @mac_addr: MAC address of mec node
  880. *
  881. * This function allocates and adds MEC entry to MEC table.
  882. * It assumes caller has taken the mec lock to protect the access to these
  883. * tables
  884. *
  885. * Return: QDF_STATUS
  886. */
  887. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  888. struct dp_vdev *vdev,
  889. uint8_t *mac_addr);
  890. /**
  891. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
  892. * within pdev
  893. * @soc: SoC handle
  894. * @pdev_id: pdev Id
  895. * @mec_mac_addr: MAC address of mec node
  896. *
  897. * It assumes caller has taken the mec_lock to protect the access to
  898. * MEC hash table
  899. *
  900. * Return: MEC entry
  901. */
  902. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  903. uint8_t pdev_id,
  904. uint8_t *mec_mac_addr);
  905. #define DP_AST_ASSERT(_condition) \
  906. do { \
  907. if (!(_condition)) { \
  908. dp_print_ast_stats(soc);\
  909. QDF_BUG(_condition); \
  910. } \
  911. } while (0)
  912. /**
  913. * dp_peer_update_inactive_time() - Update inactive time for peer
  914. * @pdev: pdev object
  915. * @tag_type: htt_tlv_tag type
  916. * @tag_buf: buf message
  917. */
  918. void
  919. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  920. uint32_t *tag_buf);
  921. #ifndef QCA_MULTIPASS_SUPPORT
  922. static inline
  923. /**
  924. * dp_peer_set_vlan_id() - set vlan_id for this peer
  925. * @cdp_soc: soc handle
  926. * @vdev_id: id of vdev object
  927. * @peer_mac: mac address
  928. * @vlan_id: vlan id for peer
  929. *
  930. * Return: void
  931. */
  932. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  933. uint8_t vdev_id, uint8_t *peer_mac,
  934. uint16_t vlan_id)
  935. {
  936. }
  937. /**
  938. * dp_set_vlan_groupkey() - set vlan map for vdev
  939. * @soc_hdl: pointer to soc
  940. * @vdev_id: id of vdev handle
  941. * @vlan_id: vlan_id
  942. * @group_key: group key for vlan
  943. *
  944. * Return: set success/failure
  945. */
  946. static inline
  947. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  948. uint16_t vlan_id, uint16_t group_key)
  949. {
  950. return QDF_STATUS_SUCCESS;
  951. }
  952. /**
  953. * dp_peer_multipass_list_init() - initialize multipass peer list
  954. * @vdev: pointer to vdev
  955. *
  956. * Return: void
  957. */
  958. static inline
  959. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  960. {
  961. }
  962. /**
  963. * dp_peer_multipass_list_remove() - remove peer from special peer list
  964. * @peer: peer handle
  965. *
  966. * Return: void
  967. */
  968. static inline
  969. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  970. {
  971. }
  972. #else
  973. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  974. uint8_t vdev_id, uint8_t *peer_mac,
  975. uint16_t vlan_id);
  976. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  977. uint16_t vlan_id, uint16_t group_key);
  978. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  979. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  980. #endif
  981. #ifndef QCA_PEER_MULTIQ_SUPPORT
  982. /**
  983. * dp_peer_reset_flowq_map() - reset peer flowq map table
  984. * @peer: dp peer handle
  985. *
  986. * Return: none
  987. */
  988. static inline
  989. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  990. {
  991. }
  992. /**
  993. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  994. * @soc_hdl: generic soc handle
  995. * @is_wds: flag to indicate if peer is wds
  996. * @peer_id: peer_id from htt peer map message
  997. * @peer_mac_addr: mac address of the peer
  998. * @ast_info: ast flow override information from peer map
  999. *
  1000. * Return: none
  1001. */
  1002. static inline
  1003. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  1004. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  1005. struct dp_ast_flow_override_info *ast_info)
  1006. {
  1007. }
  1008. #else
  1009. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  1010. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  1011. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  1012. struct dp_ast_flow_override_info *ast_info);
  1013. #endif
  1014. #ifdef QCA_PEER_EXT_STATS
  1015. /**
  1016. * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
  1017. * @soc: DP SoC context
  1018. * @txrx_peer: DP txrx peer context
  1019. *
  1020. * Allocate the peer delay stats context
  1021. *
  1022. * Return: QDF_STATUS_SUCCESS if allocation is
  1023. * successful
  1024. */
  1025. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  1026. struct dp_txrx_peer *txrx_peer);
  1027. /**
  1028. * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
  1029. * @soc: DP SoC context
  1030. * @txrx_peer: txrx DP peer context
  1031. *
  1032. * Free the peer delay stats context
  1033. *
  1034. * Return: Void
  1035. */
  1036. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  1037. struct dp_txrx_peer *txrx_peer);
  1038. /**
  1039. * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
  1040. * @txrx_peer: dp_txrx_peer handle
  1041. *
  1042. * Return: void
  1043. */
  1044. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1045. #else
  1046. static inline
  1047. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  1048. struct dp_txrx_peer *txrx_peer)
  1049. {
  1050. return QDF_STATUS_SUCCESS;
  1051. }
  1052. static inline
  1053. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  1054. struct dp_txrx_peer *txrx_peer)
  1055. {
  1056. }
  1057. static inline
  1058. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1059. {
  1060. }
  1061. #endif
  1062. #ifdef WLAN_PEER_JITTER
  1063. /**
  1064. * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
  1065. * @pdev: Datapath pdev handle
  1066. * @txrx_peer: dp_txrx_peer handle
  1067. *
  1068. * Return: QDF_STATUS
  1069. */
  1070. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1071. struct dp_txrx_peer *txrx_peer);
  1072. /**
  1073. * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
  1074. * @pdev: Datapath pdev handle
  1075. * @txrx_peer: dp_txrx_peer handle
  1076. *
  1077. * Return: void
  1078. */
  1079. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1080. struct dp_txrx_peer *txrx_peer);
  1081. /**
  1082. * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
  1083. * @txrx_peer: dp_txrx_peer handle
  1084. *
  1085. * Return: void
  1086. */
  1087. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1088. #else
  1089. static inline
  1090. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1091. struct dp_txrx_peer *txrx_peer)
  1092. {
  1093. return QDF_STATUS_SUCCESS;
  1094. }
  1095. static inline
  1096. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1097. struct dp_txrx_peer *txrx_peer)
  1098. {
  1099. }
  1100. static inline
  1101. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1102. {
  1103. }
  1104. #endif
  1105. #ifndef CONFIG_SAWF_DEF_QUEUES
  1106. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  1107. struct dp_peer *peer)
  1108. {
  1109. return QDF_STATUS_SUCCESS;
  1110. }
  1111. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  1112. struct dp_peer *peer)
  1113. {
  1114. return QDF_STATUS_SUCCESS;
  1115. }
  1116. #endif
  1117. #ifndef CONFIG_SAWF
  1118. static inline
  1119. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  1120. struct dp_txrx_peer *txrx_peer)
  1121. {
  1122. return QDF_STATUS_SUCCESS;
  1123. }
  1124. static inline
  1125. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  1126. struct dp_txrx_peer *txrx_peer)
  1127. {
  1128. return QDF_STATUS_SUCCESS;
  1129. }
  1130. #endif
  1131. /**
  1132. * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
  1133. * @soc: DP soc
  1134. * @vdev: vdev
  1135. * @mod_id: id of module requesting reference
  1136. *
  1137. * Return: VDEV BSS peer
  1138. */
  1139. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  1140. struct dp_vdev *vdev,
  1141. enum dp_mod_id mod_id);
  1142. /**
  1143. * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
  1144. * @soc: DP soc
  1145. * @vdev: vdev
  1146. * @mod_id: id of module requesting reference
  1147. *
  1148. * Return: VDEV self peer
  1149. */
  1150. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  1151. struct dp_vdev *vdev,
  1152. enum dp_mod_id mod_id);
  1153. void dp_peer_ast_table_detach(struct dp_soc *soc);
  1154. /**
  1155. * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
  1156. * @soc: soc handle
  1157. *
  1158. * Return: none
  1159. */
  1160. void dp_peer_find_map_detach(struct dp_soc *soc);
  1161. void dp_soc_wds_detach(struct dp_soc *soc);
  1162. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  1163. /**
  1164. * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
  1165. * @soc: soc handle
  1166. * @mac_addr: MAC address to be used to find peer
  1167. * @vdev_id: VDEV id
  1168. * @mod_id: MODULE ID
  1169. *
  1170. * Return: struct dp_peer
  1171. */
  1172. struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
  1173. uint8_t vdev_id, enum dp_mod_id mod_id);
  1174. /**
  1175. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  1176. * @soc: SoC handle
  1177. *
  1178. * Return: QDF_STATUS
  1179. */
  1180. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  1181. /**
  1182. * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
  1183. * @soc: SoC handle
  1184. *
  1185. * Return: QDF_STATUS
  1186. */
  1187. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  1188. /**
  1189. * dp_del_wds_entry_wrapper() - delete a WDS AST entry
  1190. * @soc: DP soc structure pointer
  1191. * @vdev_id: vdev_id
  1192. * @wds_macaddr: MAC address of ast node
  1193. * @type: type from enum cdp_txrx_ast_entry_type
  1194. * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
  1195. *
  1196. * This API is used to delete an AST entry from fw
  1197. *
  1198. * Return: None
  1199. */
  1200. void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
  1201. uint8_t *wds_macaddr, uint8_t type,
  1202. uint8_t delete_in_fw);
  1203. void dp_soc_wds_attach(struct dp_soc *soc);
  1204. /**
  1205. * dp_peer_mec_hash_detach() - Free MEC Hash table
  1206. * @soc: SoC handle
  1207. *
  1208. * Return: None
  1209. */
  1210. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  1211. /**
  1212. * dp_peer_ast_hash_detach() - Free AST Hash table
  1213. * @soc: SoC handle
  1214. *
  1215. * Return: None
  1216. */
  1217. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  1218. #ifdef FEATURE_AST
  1219. /**
  1220. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  1221. * @soc: datapath soc handle
  1222. * @peer: datapath peer handle
  1223. *
  1224. * Delete the AST entries belonging to a peer
  1225. */
  1226. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1227. struct dp_peer *peer)
  1228. {
  1229. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  1230. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  1231. /*
  1232. * Delete peer self ast entry. This is done to handle scenarios
  1233. * where peer is freed before peer map is received(for ex in case
  1234. * of auth disallow due to ACL) in such cases self ast is not added
  1235. * to peer->ast_list.
  1236. */
  1237. if (peer->self_ast_entry) {
  1238. dp_peer_del_ast(soc, peer->self_ast_entry);
  1239. peer->self_ast_entry = NULL;
  1240. }
  1241. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  1242. dp_peer_del_ast(soc, ast_entry);
  1243. }
  1244. /**
  1245. * dp_print_peer_ast_entries() - Dump AST entries of peer
  1246. * @soc: Datapath soc handle
  1247. * @peer: Datapath peer
  1248. * @arg: argument to iterate function
  1249. *
  1250. * Return: void
  1251. */
  1252. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  1253. void *arg);
  1254. #else
  1255. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  1256. struct dp_peer *peer, void *arg)
  1257. {
  1258. }
  1259. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1260. struct dp_peer *peer)
  1261. {
  1262. }
  1263. #endif
  1264. #ifdef FEATURE_MEC
  1265. /**
  1266. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  1267. * @soc: SoC handle
  1268. *
  1269. * Return: none
  1270. */
  1271. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  1272. /**
  1273. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  1274. * @soc: SoC handle
  1275. *
  1276. * Return: none
  1277. */
  1278. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  1279. /**
  1280. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  1281. * @soc: Datapath SOC
  1282. *
  1283. * Return: None
  1284. */
  1285. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  1286. #else
  1287. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  1288. {
  1289. }
  1290. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  1291. {
  1292. }
  1293. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  1294. {
  1295. }
  1296. #endif
  1297. static inline int dp_peer_find_mac_addr_cmp(
  1298. union dp_align_mac_addr *mac_addr1,
  1299. union dp_align_mac_addr *mac_addr2)
  1300. {
  1301. /*
  1302. * Intentionally use & rather than &&.
  1303. * because the operands are binary rather than generic boolean,
  1304. * the functionality is equivalent.
  1305. * Using && has the advantage of short-circuited evaluation,
  1306. * but using & has the advantage of no conditional branching,
  1307. * which is a more significant benefit.
  1308. */
  1309. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1310. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1311. }
  1312. /**
  1313. * dp_peer_delete() - delete DP peer
  1314. *
  1315. * @soc: Datatpath soc
  1316. * @peer: Datapath peer
  1317. * @arg: argument to iter function
  1318. *
  1319. * Return: void
  1320. */
  1321. void dp_peer_delete(struct dp_soc *soc,
  1322. struct dp_peer *peer,
  1323. void *arg);
  1324. /**
  1325. * dp_mlo_peer_delete() - delete MLO DP peer
  1326. *
  1327. * @soc: Datapath soc
  1328. * @peer: Datapath peer
  1329. * @arg: argument to iter function
  1330. *
  1331. * Return: void
  1332. */
  1333. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  1334. #ifdef WLAN_FEATURE_11BE_MLO
  1335. /* is MLO connection mld peer */
  1336. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1337. /* set peer type */
  1338. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1339. ((_peer)->peer_type = (_type_val))
  1340. /* is legacy peer */
  1341. #define IS_DP_LEGACY_PEER(_peer) \
  1342. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1343. /* is MLO connection link peer */
  1344. #define IS_MLO_DP_LINK_PEER(_peer) \
  1345. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1346. /* is MLO connection mld peer */
  1347. #define IS_MLO_DP_MLD_PEER(_peer) \
  1348. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1349. /* Get Mld peer from link peer */
  1350. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1351. ((link_peer)->mld_peer)
  1352. #ifdef WLAN_MLO_MULTI_CHIP
  1353. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  1354. {
  1355. if (soc->arch_ops.mlo_get_chip_id)
  1356. return soc->arch_ops.mlo_get_chip_id(soc);
  1357. return 0;
  1358. }
  1359. static inline struct dp_peer *
  1360. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1361. uint8_t *peer_mac_addr,
  1362. int mac_addr_is_aligned,
  1363. uint8_t vdev_id,
  1364. uint8_t chip_id,
  1365. enum dp_mod_id mod_id)
  1366. {
  1367. if (soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id)
  1368. return soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id
  1369. (soc, peer_mac_addr,
  1370. mac_addr_is_aligned,
  1371. vdev_id, chip_id,
  1372. mod_id);
  1373. return NULL;
  1374. }
  1375. #else
  1376. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  1377. {
  1378. return 0;
  1379. }
  1380. static inline struct dp_peer *
  1381. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1382. uint8_t *peer_mac_addr,
  1383. int mac_addr_is_aligned,
  1384. uint8_t vdev_id,
  1385. uint8_t chip_id,
  1386. enum dp_mod_id mod_id)
  1387. {
  1388. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1389. mac_addr_is_aligned,
  1390. vdev_id, mod_id);
  1391. }
  1392. #endif
  1393. /**
  1394. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1395. * matching mac_address
  1396. * @soc: soc handle
  1397. * @peer_mac_addr: mld peer mac address
  1398. * @mac_addr_is_aligned: is mac addr aligned
  1399. * @vdev_id: vdev_id
  1400. * @mod_id: id of module requesting reference
  1401. *
  1402. * Return: peer in success
  1403. * NULL in failure
  1404. */
  1405. static inline
  1406. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1407. uint8_t *peer_mac_addr,
  1408. int mac_addr_is_aligned,
  1409. uint8_t vdev_id,
  1410. enum dp_mod_id mod_id)
  1411. {
  1412. if (soc->arch_ops.mlo_peer_find_hash_find)
  1413. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1414. peer_mac_addr,
  1415. mac_addr_is_aligned,
  1416. mod_id, vdev_id);
  1417. return NULL;
  1418. }
  1419. /**
  1420. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1421. * peer_type
  1422. * @soc: DP SOC handle
  1423. * @peer_info: peer information for hash find
  1424. * @mod_id: ID of module requesting reference
  1425. *
  1426. * Return: peer handle
  1427. */
  1428. static inline
  1429. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1430. struct cdp_peer_info *peer_info,
  1431. enum dp_mod_id mod_id)
  1432. {
  1433. struct dp_peer *peer = NULL;
  1434. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1435. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1436. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1437. peer_info->mac_addr_is_aligned,
  1438. peer_info->vdev_id,
  1439. mod_id);
  1440. if (peer)
  1441. return peer;
  1442. }
  1443. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1444. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1445. peer = dp_mld_peer_find_hash_find(
  1446. soc, peer_info->mac_addr,
  1447. peer_info->mac_addr_is_aligned,
  1448. peer_info->vdev_id,
  1449. mod_id);
  1450. return peer;
  1451. }
  1452. /**
  1453. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1454. * increase mld peer ref_cnt
  1455. * @link_peer: link peer pointer
  1456. * @mld_peer: mld peer pointer
  1457. *
  1458. * Return: none
  1459. */
  1460. static inline
  1461. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1462. struct dp_peer *mld_peer)
  1463. {
  1464. /* increase mld_peer ref_cnt */
  1465. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1466. link_peer->mld_peer = mld_peer;
  1467. }
  1468. /**
  1469. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1470. * decrease mld peer ref_cnt
  1471. * @link_peer: link peer pointer
  1472. *
  1473. * Return: None
  1474. */
  1475. static inline
  1476. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1477. {
  1478. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1479. link_peer->mld_peer = NULL;
  1480. }
  1481. /**
  1482. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1483. * @mld_peer: mld peer pointer
  1484. *
  1485. * Return: None
  1486. */
  1487. static inline
  1488. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1489. {
  1490. int i;
  1491. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1492. mld_peer->num_links = 0;
  1493. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1494. mld_peer->link_peers[i].is_valid = false;
  1495. }
  1496. /**
  1497. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1498. * @mld_peer: mld peer pointer
  1499. *
  1500. * Return: None
  1501. */
  1502. static inline
  1503. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1504. {
  1505. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1506. }
  1507. /**
  1508. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1509. * @mld_peer: mld dp peer pointer
  1510. * @link_peer: link dp peer pointer
  1511. *
  1512. * Return: None
  1513. */
  1514. static inline
  1515. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1516. struct dp_peer *link_peer)
  1517. {
  1518. int i;
  1519. struct dp_peer_link_info *link_peer_info;
  1520. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1521. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1522. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1523. link_peer_info = &mld_peer->link_peers[i];
  1524. if (!link_peer_info->is_valid) {
  1525. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1526. link_peer->mac_addr.raw,
  1527. QDF_MAC_ADDR_SIZE);
  1528. link_peer_info->is_valid = true;
  1529. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1530. link_peer_info->chip_id =
  1531. dp_get_chip_id(link_peer->vdev->pdev->soc);
  1532. mld_peer->num_links++;
  1533. break;
  1534. }
  1535. }
  1536. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1537. dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1538. "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1539. "idx %u num_links %u",
  1540. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1541. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1542. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1543. i, mld_peer->num_links);
  1544. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
  1545. mld_peer, link_peer, i,
  1546. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1547. }
  1548. /**
  1549. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1550. * @mld_peer: MLD dp peer pointer
  1551. * @link_peer: link dp peer pointer
  1552. *
  1553. * Return: number of links left after deletion
  1554. */
  1555. static inline
  1556. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1557. struct dp_peer *link_peer)
  1558. {
  1559. int i;
  1560. struct dp_peer_link_info *link_peer_info;
  1561. uint8_t num_links;
  1562. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1563. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1564. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1565. link_peer_info = &mld_peer->link_peers[i];
  1566. if (link_peer_info->is_valid &&
  1567. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1568. &link_peer_info->mac_addr)) {
  1569. link_peer_info->is_valid = false;
  1570. mld_peer->num_links--;
  1571. break;
  1572. }
  1573. }
  1574. num_links = mld_peer->num_links;
  1575. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1576. dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1577. "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1578. "idx %u num_links %u",
  1579. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1580. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1581. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1582. i, mld_peer->num_links);
  1583. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
  1584. mld_peer, link_peer, i,
  1585. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1586. return num_links;
  1587. }
  1588. /**
  1589. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1590. * increase link peers ref_cnt
  1591. * @soc: dp_soc handle
  1592. * @mld_peer: dp mld peer pointer
  1593. * @mld_link_peers: structure that hold links peers pointer array and number
  1594. * @mod_id: id of module requesting reference
  1595. *
  1596. * Return: None
  1597. */
  1598. static inline
  1599. void dp_get_link_peers_ref_from_mld_peer(
  1600. struct dp_soc *soc,
  1601. struct dp_peer *mld_peer,
  1602. struct dp_mld_link_peers *mld_link_peers,
  1603. enum dp_mod_id mod_id)
  1604. {
  1605. struct dp_peer *peer;
  1606. uint8_t i = 0, j = 0;
  1607. struct dp_peer_link_info *link_peer_info;
  1608. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1609. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1610. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1611. link_peer_info = &mld_peer->link_peers[i];
  1612. if (link_peer_info->is_valid) {
  1613. peer = dp_link_peer_hash_find_by_chip_id(
  1614. soc,
  1615. link_peer_info->mac_addr.raw,
  1616. true,
  1617. link_peer_info->vdev_id,
  1618. link_peer_info->chip_id,
  1619. mod_id);
  1620. if (peer)
  1621. mld_link_peers->link_peers[j++] = peer;
  1622. }
  1623. }
  1624. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1625. mld_link_peers->num_links = j;
  1626. }
  1627. /**
  1628. * dp_release_link_peers_ref() - release all link peers reference
  1629. * @mld_link_peers: structure that hold links peers pointer array and number
  1630. * @mod_id: id of module requesting reference
  1631. *
  1632. * Return: None.
  1633. */
  1634. static inline
  1635. void dp_release_link_peers_ref(
  1636. struct dp_mld_link_peers *mld_link_peers,
  1637. enum dp_mod_id mod_id)
  1638. {
  1639. struct dp_peer *peer;
  1640. uint8_t i;
  1641. for (i = 0; i < mld_link_peers->num_links; i++) {
  1642. peer = mld_link_peers->link_peers[i];
  1643. if (peer)
  1644. dp_peer_unref_delete(peer, mod_id);
  1645. mld_link_peers->link_peers[i] = NULL;
  1646. }
  1647. mld_link_peers->num_links = 0;
  1648. }
  1649. /**
  1650. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1651. * @soc: Datapath soc handle
  1652. * @peer_id: peer id
  1653. * @lmac_id: lmac id to find the link peer on given lmac
  1654. *
  1655. * Return: peer_id of link peer if found
  1656. * else return HTT_INVALID_PEER
  1657. */
  1658. static inline
  1659. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1660. uint8_t lmac_id)
  1661. {
  1662. uint8_t i;
  1663. struct dp_peer *peer;
  1664. struct dp_peer *link_peer;
  1665. struct dp_soc *link_peer_soc;
  1666. struct dp_mld_link_peers link_peers_info;
  1667. uint16_t link_peer_id = HTT_INVALID_PEER;
  1668. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1669. if (!peer)
  1670. return HTT_INVALID_PEER;
  1671. if (IS_MLO_DP_MLD_PEER(peer)) {
  1672. /* get link peers with reference */
  1673. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1674. DP_MOD_ID_CDP);
  1675. for (i = 0; i < link_peers_info.num_links; i++) {
  1676. link_peer = link_peers_info.link_peers[i];
  1677. link_peer_soc = link_peer->vdev->pdev->soc;
  1678. if ((link_peer_soc == soc) &&
  1679. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1680. link_peer_id = link_peer->peer_id;
  1681. break;
  1682. }
  1683. }
  1684. /* release link peers reference */
  1685. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1686. } else {
  1687. link_peer_id = peer_id;
  1688. }
  1689. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1690. return link_peer_id;
  1691. }
  1692. /**
  1693. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1694. * @soc: soc handle
  1695. * @peer_mac: peer mac address
  1696. * @mac_addr_is_aligned: is mac addr aligned
  1697. * @vdev_id: vdev_id
  1698. * @mod_id: id of module requesting reference
  1699. *
  1700. * for MLO connection, get corresponding MLD peer,
  1701. * otherwise get link peer for non-MLO case.
  1702. *
  1703. * Return: peer in success
  1704. * NULL in failure
  1705. */
  1706. static inline
  1707. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1708. uint8_t *peer_mac,
  1709. int mac_addr_is_aligned,
  1710. uint8_t vdev_id,
  1711. enum dp_mod_id mod_id)
  1712. {
  1713. struct dp_peer *ta_peer = NULL;
  1714. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1715. peer_mac, 0, vdev_id,
  1716. mod_id);
  1717. if (peer) {
  1718. /* mlo connection link peer, get mld peer with reference */
  1719. if (IS_MLO_DP_LINK_PEER(peer)) {
  1720. /* increase mld peer ref_cnt */
  1721. if (QDF_STATUS_SUCCESS ==
  1722. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1723. ta_peer = peer->mld_peer;
  1724. else
  1725. ta_peer = NULL;
  1726. /* release peer reference that added by hash find */
  1727. dp_peer_unref_delete(peer, mod_id);
  1728. } else {
  1729. /* mlo MLD peer or non-mlo link peer */
  1730. ta_peer = peer;
  1731. }
  1732. } else {
  1733. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1734. QDF_MAC_ADDR_REF(peer_mac));
  1735. }
  1736. return ta_peer;
  1737. }
  1738. /**
  1739. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1740. * @soc: core DP soc context
  1741. * @peer_id: peer id from peer object can be retrieved
  1742. * @mod_id: ID of module requesting reference
  1743. *
  1744. * for MLO connection, get corresponding MLD peer,
  1745. * otherwise get link peer for non-MLO case.
  1746. *
  1747. * Return: peer in success
  1748. * NULL in failure
  1749. */
  1750. static inline
  1751. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1752. uint16_t peer_id,
  1753. enum dp_mod_id mod_id)
  1754. {
  1755. struct dp_peer *ta_peer = NULL;
  1756. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1757. if (peer) {
  1758. /* mlo connection link peer, get mld peer with reference */
  1759. if (IS_MLO_DP_LINK_PEER(peer)) {
  1760. /* increase mld peer ref_cnt */
  1761. if (QDF_STATUS_SUCCESS ==
  1762. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1763. ta_peer = peer->mld_peer;
  1764. else
  1765. ta_peer = NULL;
  1766. /* release peer reference that added by hash find */
  1767. dp_peer_unref_delete(peer, mod_id);
  1768. } else {
  1769. /* mlo MLD peer or non-mlo link peer */
  1770. ta_peer = peer;
  1771. }
  1772. }
  1773. return ta_peer;
  1774. }
  1775. /**
  1776. * dp_peer_mlo_delete() - peer MLO related delete operation
  1777. * @peer: DP peer handle
  1778. * Return: None
  1779. */
  1780. static inline
  1781. void dp_peer_mlo_delete(struct dp_peer *peer)
  1782. {
  1783. struct dp_peer *ml_peer;
  1784. struct dp_soc *soc;
  1785. dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
  1786. QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
  1787. /* MLO connection link peer */
  1788. if (IS_MLO_DP_LINK_PEER(peer)) {
  1789. ml_peer = peer->mld_peer;
  1790. soc = ml_peer->vdev->pdev->soc;
  1791. /* if last link peer deletion, delete MLD peer */
  1792. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1793. dp_peer_delete(soc, peer->mld_peer, NULL);
  1794. }
  1795. }
  1796. /**
  1797. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1798. * @soc: Soc handle
  1799. * @peer: DP peer handle
  1800. * @vdev_id: Vdev ID
  1801. * @setup_info: peer setup information for MLO
  1802. */
  1803. QDF_STATUS dp_peer_mlo_setup(
  1804. struct dp_soc *soc,
  1805. struct dp_peer *peer,
  1806. uint8_t vdev_id,
  1807. struct cdp_peer_setup_info *setup_info);
  1808. /**
  1809. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1810. * @peer: datapath peer
  1811. *
  1812. * Return: MLD peer in case of MLO Link peer
  1813. * Peer itself in other cases
  1814. */
  1815. static inline
  1816. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1817. {
  1818. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1819. }
  1820. /**
  1821. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1822. * peer id
  1823. * @soc: core DP soc context
  1824. * @peer_id: peer id
  1825. * @mod_id: ID of module requesting reference
  1826. *
  1827. * Return: primary link peer for the MLO peer
  1828. * legacy peer itself in case of legacy peer
  1829. */
  1830. static inline
  1831. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1832. uint16_t peer_id,
  1833. enum dp_mod_id mod_id)
  1834. {
  1835. uint8_t i;
  1836. struct dp_mld_link_peers link_peers_info;
  1837. struct dp_peer *peer;
  1838. struct dp_peer *link_peer;
  1839. struct dp_peer *primary_peer = NULL;
  1840. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1841. if (!peer)
  1842. return NULL;
  1843. if (IS_MLO_DP_MLD_PEER(peer)) {
  1844. /* get link peers with reference */
  1845. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1846. mod_id);
  1847. for (i = 0; i < link_peers_info.num_links; i++) {
  1848. link_peer = link_peers_info.link_peers[i];
  1849. if (link_peer->primary_link) {
  1850. primary_peer = link_peer;
  1851. /*
  1852. * Take additional reference over
  1853. * primary link peer.
  1854. */
  1855. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1856. break;
  1857. }
  1858. }
  1859. /* release link peers reference */
  1860. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1861. dp_peer_unref_delete(peer, mod_id);
  1862. } else {
  1863. primary_peer = peer;
  1864. }
  1865. return primary_peer;
  1866. }
  1867. /**
  1868. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1869. * @peer: Datapath peer
  1870. *
  1871. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1872. * dp_txrx_peer from peer itself for other cases
  1873. */
  1874. static inline
  1875. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1876. {
  1877. return IS_MLO_DP_LINK_PEER(peer) ?
  1878. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1879. }
  1880. /**
  1881. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1882. * @peer: Datapath peer
  1883. *
  1884. * Return: true if peer is primary link peer or legacy peer
  1885. * false otherwise
  1886. */
  1887. static inline
  1888. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1889. {
  1890. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1891. return true;
  1892. else if (IS_DP_LEGACY_PEER(peer))
  1893. return true;
  1894. else
  1895. return false;
  1896. }
  1897. /**
  1898. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1899. *
  1900. * @soc: core DP soc context
  1901. * @peer_id: peer id from peer object can be retrieved
  1902. * @handle: reference handle
  1903. * @mod_id: ID of module requesting reference
  1904. *
  1905. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1906. */
  1907. static inline struct dp_txrx_peer *
  1908. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1909. uint16_t peer_id,
  1910. dp_txrx_ref_handle *handle,
  1911. enum dp_mod_id mod_id)
  1912. {
  1913. struct dp_peer *peer;
  1914. struct dp_txrx_peer *txrx_peer;
  1915. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1916. if (!peer)
  1917. return NULL;
  1918. txrx_peer = dp_get_txrx_peer(peer);
  1919. if (txrx_peer) {
  1920. *handle = (dp_txrx_ref_handle)peer;
  1921. return txrx_peer;
  1922. }
  1923. dp_peer_unref_delete(peer, mod_id);
  1924. return NULL;
  1925. }
  1926. /**
  1927. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1928. *
  1929. * @soc: core DP soc context
  1930. *
  1931. * Return: void
  1932. */
  1933. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1934. #else
  1935. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1936. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1937. /* is legacy peer */
  1938. #define IS_DP_LEGACY_PEER(_peer) true
  1939. #define IS_MLO_DP_LINK_PEER(_peer) false
  1940. #define IS_MLO_DP_MLD_PEER(_peer) false
  1941. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1942. static inline
  1943. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1944. struct cdp_peer_info *peer_info,
  1945. enum dp_mod_id mod_id)
  1946. {
  1947. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1948. peer_info->mac_addr_is_aligned,
  1949. peer_info->vdev_id,
  1950. mod_id);
  1951. }
  1952. static inline
  1953. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1954. uint8_t *peer_mac,
  1955. int mac_addr_is_aligned,
  1956. uint8_t vdev_id,
  1957. enum dp_mod_id mod_id)
  1958. {
  1959. return dp_peer_find_hash_find(soc, peer_mac,
  1960. mac_addr_is_aligned, vdev_id,
  1961. mod_id);
  1962. }
  1963. static inline
  1964. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1965. uint16_t peer_id,
  1966. enum dp_mod_id mod_id)
  1967. {
  1968. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1969. }
  1970. static inline
  1971. QDF_STATUS dp_peer_mlo_setup(
  1972. struct dp_soc *soc,
  1973. struct dp_peer *peer,
  1974. uint8_t vdev_id,
  1975. struct cdp_peer_setup_info *setup_info)
  1976. {
  1977. return QDF_STATUS_SUCCESS;
  1978. }
  1979. static inline
  1980. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1981. {
  1982. }
  1983. static inline
  1984. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1985. {
  1986. }
  1987. static inline
  1988. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1989. {
  1990. }
  1991. static inline
  1992. void dp_peer_mlo_delete(struct dp_peer *peer)
  1993. {
  1994. }
  1995. static inline
  1996. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1997. struct dp_peer *link_peer)
  1998. {
  1999. }
  2000. static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
  2001. {
  2002. return 0;
  2003. }
  2004. static inline struct dp_peer *
  2005. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  2006. uint8_t *peer_mac_addr,
  2007. int mac_addr_is_aligned,
  2008. uint8_t vdev_id,
  2009. uint8_t chip_id,
  2010. enum dp_mod_id mod_id)
  2011. {
  2012. return dp_peer_find_hash_find(soc, peer_mac_addr,
  2013. mac_addr_is_aligned,
  2014. vdev_id, mod_id);
  2015. }
  2016. static inline
  2017. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  2018. {
  2019. return peer;
  2020. }
  2021. static inline
  2022. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  2023. uint16_t peer_id,
  2024. enum dp_mod_id mod_id)
  2025. {
  2026. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  2027. }
  2028. static inline
  2029. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  2030. {
  2031. return peer->txrx_peer;
  2032. }
  2033. static inline
  2034. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  2035. {
  2036. return true;
  2037. }
  2038. /**
  2039. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  2040. *
  2041. * @soc: core DP soc context
  2042. * @peer_id: peer id from peer object can be retrieved
  2043. * @handle: reference handle
  2044. * @mod_id: ID of module requesting reference
  2045. *
  2046. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  2047. */
  2048. static inline struct dp_txrx_peer *
  2049. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  2050. uint16_t peer_id,
  2051. dp_txrx_ref_handle *handle,
  2052. enum dp_mod_id mod_id)
  2053. {
  2054. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  2055. }
  2056. static inline
  2057. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  2058. uint8_t lmac_id)
  2059. {
  2060. return peer_id;
  2061. }
  2062. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  2063. {
  2064. }
  2065. #endif /* WLAN_FEATURE_11BE_MLO */
  2066. static inline
  2067. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  2068. {
  2069. uint8_t i;
  2070. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  2071. sizeof(struct dp_rx_tid_defrag));
  2072. for (i = 0; i < DP_MAX_TIDS; i++)
  2073. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2074. }
  2075. static inline
  2076. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  2077. {
  2078. uint8_t i;
  2079. for (i = 0; i < DP_MAX_TIDS; i++)
  2080. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2081. }
  2082. #ifdef PEER_CACHE_RX_PKTS
  2083. static inline
  2084. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2085. {
  2086. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  2087. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  2088. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  2089. DP_RX_CACHED_BUFQ_THRESH);
  2090. }
  2091. static inline
  2092. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2093. {
  2094. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  2095. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  2096. }
  2097. #else
  2098. static inline
  2099. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2100. {
  2101. }
  2102. static inline
  2103. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2104. {
  2105. }
  2106. #endif
  2107. /**
  2108. * dp_peer_update_state() - update dp peer state
  2109. *
  2110. * @soc: core DP soc context
  2111. * @peer: DP peer
  2112. * @state: new state
  2113. *
  2114. * Return: None
  2115. */
  2116. static inline void
  2117. dp_peer_update_state(struct dp_soc *soc,
  2118. struct dp_peer *peer,
  2119. enum dp_peer_state state)
  2120. {
  2121. uint8_t peer_state;
  2122. qdf_spin_lock_bh(&peer->peer_state_lock);
  2123. peer_state = peer->peer_state;
  2124. switch (state) {
  2125. case DP_PEER_STATE_INIT:
  2126. DP_PEER_STATE_ASSERT
  2127. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  2128. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  2129. break;
  2130. case DP_PEER_STATE_ACTIVE:
  2131. DP_PEER_STATE_ASSERT(peer, state,
  2132. (peer_state == DP_PEER_STATE_INIT));
  2133. break;
  2134. case DP_PEER_STATE_LOGICAL_DELETE:
  2135. DP_PEER_STATE_ASSERT(peer, state,
  2136. (peer_state == DP_PEER_STATE_ACTIVE) ||
  2137. (peer_state == DP_PEER_STATE_INIT));
  2138. break;
  2139. case DP_PEER_STATE_INACTIVE:
  2140. if (IS_MLO_DP_MLD_PEER(peer))
  2141. DP_PEER_STATE_ASSERT
  2142. (peer, state,
  2143. (peer_state == DP_PEER_STATE_ACTIVE));
  2144. else
  2145. DP_PEER_STATE_ASSERT
  2146. (peer, state,
  2147. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2148. break;
  2149. case DP_PEER_STATE_FREED:
  2150. if (peer->sta_self_peer)
  2151. DP_PEER_STATE_ASSERT
  2152. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  2153. else
  2154. DP_PEER_STATE_ASSERT
  2155. (peer, state,
  2156. (peer_state == DP_PEER_STATE_INACTIVE) ||
  2157. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2158. break;
  2159. default:
  2160. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2161. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  2162. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2163. return;
  2164. }
  2165. peer->peer_state = state;
  2166. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2167. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  2168. peer_state, state,
  2169. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2170. }
  2171. /**
  2172. * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer
  2173. * list based on type of peer (Legacy or MLD peer)
  2174. *
  2175. * @vdev: DP vdev context
  2176. * @func: function to be called for each peer
  2177. * @arg: argument need to be passed to func
  2178. * @mod_id: module_id
  2179. * @peer_type: type of peer - MLO Link Peer or Legacy Peer
  2180. *
  2181. * Return: void
  2182. */
  2183. static inline void
  2184. dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev,
  2185. dp_peer_iter_func *func,
  2186. void *arg, enum dp_mod_id mod_id,
  2187. enum dp_peer_type peer_type)
  2188. {
  2189. struct dp_peer *peer;
  2190. struct dp_peer *tmp_peer;
  2191. struct dp_soc *soc = NULL;
  2192. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  2193. return;
  2194. soc = vdev->pdev->soc;
  2195. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2196. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  2197. peer_list_elem,
  2198. tmp_peer) {
  2199. if (dp_peer_get_ref(soc, peer, mod_id) ==
  2200. QDF_STATUS_SUCCESS) {
  2201. if ((peer_type == DP_PEER_TYPE_LEGACY &&
  2202. (IS_DP_LEGACY_PEER(peer))) ||
  2203. (peer_type == DP_PEER_TYPE_MLO_LINK &&
  2204. (IS_MLO_DP_LINK_PEER(peer)))) {
  2205. (*func)(soc, peer, arg);
  2206. }
  2207. dp_peer_unref_delete(peer, mod_id);
  2208. }
  2209. }
  2210. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2211. }
  2212. #ifdef REO_SHARED_QREF_TABLE_EN
  2213. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2214. struct dp_peer *peer);
  2215. #else
  2216. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2217. struct dp_peer *peer) {}
  2218. #endif
  2219. /**
  2220. * dp_peer_check_wds_ext_peer() - Check WDS ext peer
  2221. *
  2222. * @peer: DP peer
  2223. *
  2224. * Return: True for WDS ext peer, false otherwise
  2225. */
  2226. bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
  2227. /**
  2228. * dp_gen_ml_peer_id() - Generate MLD peer id for DP
  2229. *
  2230. * @soc: DP soc context
  2231. * @peer_id: mld peer id
  2232. *
  2233. * Return: DP MLD peer id
  2234. */
  2235. uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
  2236. #ifdef FEATURE_AST
  2237. /**
  2238. * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
  2239. * @soc: SoC handle
  2240. * @peer_id: peer id from firmware
  2241. * @mac_addr: MAC address of ast node
  2242. * @hw_peer_id: HW AST Index returned by target in peer map event
  2243. * @vdev_id: vdev id for VAP to which the peer belongs to
  2244. * @ast_hash: ast hash value in HW
  2245. * @is_wds: flag to indicate peer map event for WDS ast entry
  2246. *
  2247. * Return: QDF_STATUS code
  2248. */
  2249. QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
  2250. uint8_t *mac_addr, uint16_t hw_peer_id,
  2251. uint8_t vdev_id, uint16_t ast_hash,
  2252. uint8_t is_wds);
  2253. #endif
  2254. #endif /* _DP_PEER_H_ */