dp_peer.h 61 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #include "dp_internal.h"
  25. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  26. #include "hal_reo.h"
  27. #endif
  28. #define DP_INVALID_PEER_ID 0xffff
  29. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  30. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  31. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  32. #define DP_PEER_HASH_LOAD_MULT 2
  33. #define DP_PEER_HASH_LOAD_SHIFT 0
  34. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  35. #define DP_RX_CACHED_BUFQ_THRESH 64
  36. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  39. #define dp_peer_info(params...) \
  40. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  41. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  42. #ifdef REO_QDESC_HISTORY
  43. enum reo_qdesc_event_type {
  44. REO_QDESC_UPDATE_CB = 0,
  45. REO_QDESC_FREE,
  46. };
  47. struct reo_qdesc_event {
  48. qdf_dma_addr_t qdesc_addr;
  49. uint64_t ts;
  50. enum reo_qdesc_event_type type;
  51. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  52. };
  53. #endif
  54. struct ast_del_ctxt {
  55. bool age;
  56. int del_count;
  57. };
  58. #ifdef QCA_SUPPORT_WDS_EXTENDED
  59. /**
  60. * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
  61. *
  62. * @peer: DP peer context
  63. *
  64. * This API checks whether the peer is WDS_EXT peer or not
  65. *
  66. * Return: true in the wds_ext peer else flase
  67. */
  68. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  69. {
  70. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  71. }
  72. #else
  73. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  74. {
  75. return false;
  76. }
  77. #endif
  78. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  79. void *arg);
  80. /**
  81. * dp_peer_unref_delete() - unref and delete peer
  82. * @peer: Datapath peer handle
  83. * @id: ID of module releasing reference
  84. *
  85. */
  86. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  87. /**
  88. * dp_txrx_peer_unref_delete() - unref and delete peer
  89. * @handle: Datapath txrx ref handle
  90. * @id: Module ID of the caller
  91. *
  92. */
  93. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  94. /**
  95. * dp_peer_find_hash_find() - returns legacy or mlo link peer from
  96. * peer_hash_table matching vdev_id and mac_address
  97. * @soc: soc handle
  98. * @peer_mac_addr: peer mac address
  99. * @mac_addr_is_aligned: is mac addr aligned
  100. * @vdev_id: vdev_id
  101. * @mod_id: id of module requesting reference
  102. *
  103. * return: peer in success
  104. * NULL in failure
  105. */
  106. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  107. uint8_t *peer_mac_addr,
  108. int mac_addr_is_aligned,
  109. uint8_t vdev_id,
  110. enum dp_mod_id mod_id);
  111. /**
  112. * dp_peer_find_by_id_valid - check if peer exists for given id
  113. * @soc: core DP soc context
  114. * @peer_id: peer id from peer object can be retrieved
  115. *
  116. * Return: true if peer exists of false otherwise
  117. */
  118. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  119. #ifdef DP_UMAC_HW_RESET_SUPPORT
  120. void dp_reset_tid_q_setup(struct dp_soc *soc);
  121. #endif
  122. /**
  123. * dp_peer_get_ref() - Returns peer object given the peer id
  124. *
  125. * @soc: core DP soc context
  126. * @peer: DP peer
  127. * @mod_id: id of module requesting the reference
  128. *
  129. * Return: QDF_STATUS_SUCCESS if reference held successfully
  130. * else QDF_STATUS_E_INVAL
  131. */
  132. static inline
  133. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  134. struct dp_peer *peer,
  135. enum dp_mod_id mod_id)
  136. {
  137. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  138. return QDF_STATUS_E_INVAL;
  139. if (mod_id > DP_MOD_ID_RX)
  140. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  141. return QDF_STATUS_SUCCESS;
  142. }
  143. /**
  144. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  145. *
  146. * @soc: core DP soc context
  147. * @peer_id: peer id from peer object can be retrieved
  148. * @mod_id: module id
  149. *
  150. * Return: struct dp_peer*: Pointer to DP peer object
  151. */
  152. static inline struct dp_peer *
  153. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  154. uint16_t peer_id,
  155. enum dp_mod_id mod_id)
  156. {
  157. struct dp_peer *peer;
  158. qdf_spin_lock_bh(&soc->peer_map_lock);
  159. peer = (peer_id >= soc->max_peer_id) ? NULL :
  160. soc->peer_id_to_obj_map[peer_id];
  161. if (!peer ||
  162. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  163. qdf_spin_unlock_bh(&soc->peer_map_lock);
  164. return NULL;
  165. }
  166. qdf_spin_unlock_bh(&soc->peer_map_lock);
  167. return peer;
  168. }
  169. /**
  170. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  171. * if peer state is active
  172. *
  173. * @soc: core DP soc context
  174. * @peer_id: peer id from peer object can be retrieved
  175. * @mod_id: ID of module requesting reference
  176. *
  177. * Return: struct dp_peer*: Pointer to DP peer object
  178. */
  179. static inline
  180. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  181. uint16_t peer_id,
  182. enum dp_mod_id mod_id)
  183. {
  184. struct dp_peer *peer;
  185. qdf_spin_lock_bh(&soc->peer_map_lock);
  186. peer = (peer_id >= soc->max_peer_id) ? NULL :
  187. soc->peer_id_to_obj_map[peer_id];
  188. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  189. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  190. qdf_spin_unlock_bh(&soc->peer_map_lock);
  191. return NULL;
  192. }
  193. qdf_spin_unlock_bh(&soc->peer_map_lock);
  194. return peer;
  195. }
  196. /**
  197. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  198. *
  199. * @soc: core DP soc context
  200. * @peer_id: peer id from peer object can be retrieved
  201. * @handle: reference handle
  202. * @mod_id: ID of module requesting reference
  203. *
  204. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  205. */
  206. static inline struct dp_txrx_peer *
  207. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  208. uint16_t peer_id,
  209. dp_txrx_ref_handle *handle,
  210. enum dp_mod_id mod_id)
  211. {
  212. struct dp_peer *peer;
  213. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  214. if (!peer)
  215. return NULL;
  216. if (!peer->txrx_peer) {
  217. dp_peer_unref_delete(peer, mod_id);
  218. return NULL;
  219. }
  220. *handle = (dp_txrx_ref_handle)peer;
  221. return peer->txrx_peer;
  222. }
  223. #ifdef PEER_CACHE_RX_PKTS
  224. /**
  225. * dp_rx_flush_rx_cached() - flush cached rx frames
  226. * @peer: peer
  227. * @drop: set flag to drop frames
  228. *
  229. * Return: None
  230. */
  231. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  232. #else
  233. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  234. {
  235. }
  236. #endif
  237. static inline void
  238. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  239. {
  240. qdf_spin_lock_bh(&peer->peer_info_lock);
  241. peer->state = OL_TXRX_PEER_STATE_DISC;
  242. qdf_spin_unlock_bh(&peer->peer_info_lock);
  243. dp_rx_flush_rx_cached(peer, true);
  244. }
  245. /**
  246. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  247. *
  248. * @vdev: DP vdev context
  249. * @func: function to be called for each peer
  250. * @arg: argument need to be passed to func
  251. * @mod_id: module_id
  252. *
  253. * Return: void
  254. */
  255. static inline void
  256. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  257. enum dp_mod_id mod_id)
  258. {
  259. struct dp_peer *peer;
  260. struct dp_peer *tmp_peer;
  261. struct dp_soc *soc = NULL;
  262. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  263. return;
  264. soc = vdev->pdev->soc;
  265. qdf_spin_lock_bh(&vdev->peer_list_lock);
  266. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  267. peer_list_elem,
  268. tmp_peer) {
  269. if (dp_peer_get_ref(soc, peer, mod_id) ==
  270. QDF_STATUS_SUCCESS) {
  271. (*func)(soc, peer, arg);
  272. dp_peer_unref_delete(peer, mod_id);
  273. }
  274. }
  275. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  276. }
  277. /**
  278. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  279. *
  280. * @pdev: DP pdev context
  281. * @func: function to be called for each peer
  282. * @arg: argument need to be passed to func
  283. * @mod_id: module_id
  284. *
  285. * Return: void
  286. */
  287. static inline void
  288. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  289. enum dp_mod_id mod_id)
  290. {
  291. struct dp_vdev *vdev;
  292. if (!pdev)
  293. return;
  294. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  295. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  296. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  297. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  298. }
  299. /**
  300. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  301. *
  302. * @soc: DP soc context
  303. * @func: function to be called for each peer
  304. * @arg: argument need to be passed to func
  305. * @mod_id: module_id
  306. *
  307. * Return: void
  308. */
  309. static inline void
  310. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  311. enum dp_mod_id mod_id)
  312. {
  313. struct dp_pdev *pdev;
  314. int i;
  315. if (!soc)
  316. return;
  317. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  318. pdev = soc->pdev_list[i];
  319. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  320. }
  321. }
  322. /**
  323. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  324. *
  325. * This API will cache the peers in local allocated memory and calls
  326. * iterate function outside the lock.
  327. *
  328. * As this API is allocating new memory it is suggested to use this
  329. * only when lock cannot be held
  330. *
  331. * @vdev: DP vdev context
  332. * @func: function to be called for each peer
  333. * @arg: argument need to be passed to func
  334. * @mod_id: module_id
  335. *
  336. * Return: void
  337. */
  338. static inline void
  339. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  340. dp_peer_iter_func *func,
  341. void *arg,
  342. enum dp_mod_id mod_id)
  343. {
  344. struct dp_peer *peer;
  345. struct dp_peer *tmp_peer;
  346. struct dp_soc *soc = NULL;
  347. struct dp_peer **peer_array = NULL;
  348. int i = 0;
  349. uint32_t num_peers = 0;
  350. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  351. return;
  352. num_peers = vdev->num_peers;
  353. soc = vdev->pdev->soc;
  354. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  355. if (!peer_array)
  356. return;
  357. qdf_spin_lock_bh(&vdev->peer_list_lock);
  358. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  359. peer_list_elem,
  360. tmp_peer) {
  361. if (i >= num_peers)
  362. break;
  363. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  364. peer_array[i] = peer;
  365. i = (i + 1);
  366. }
  367. }
  368. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  369. for (i = 0; i < num_peers; i++) {
  370. peer = peer_array[i];
  371. if (!peer)
  372. continue;
  373. (*func)(soc, peer, arg);
  374. dp_peer_unref_delete(peer, mod_id);
  375. }
  376. qdf_mem_free(peer_array);
  377. }
  378. /**
  379. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  380. *
  381. * This API will cache the peers in local allocated memory and calls
  382. * iterate function outside the lock.
  383. *
  384. * As this API is allocating new memory it is suggested to use this
  385. * only when lock cannot be held
  386. *
  387. * @pdev: DP pdev context
  388. * @func: function to be called for each peer
  389. * @arg: argument need to be passed to func
  390. * @mod_id: module_id
  391. *
  392. * Return: void
  393. */
  394. static inline void
  395. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  396. dp_peer_iter_func *func,
  397. void *arg,
  398. enum dp_mod_id mod_id)
  399. {
  400. struct dp_peer *peer;
  401. struct dp_peer *tmp_peer;
  402. struct dp_soc *soc = NULL;
  403. struct dp_vdev *vdev = NULL;
  404. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  405. int i = 0;
  406. int j = 0;
  407. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  408. if (!pdev || !pdev->soc)
  409. return;
  410. soc = pdev->soc;
  411. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  412. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  413. num_peers[i] = vdev->num_peers;
  414. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  415. sizeof(struct dp_peer *));
  416. if (!peer_array[i])
  417. break;
  418. qdf_spin_lock_bh(&vdev->peer_list_lock);
  419. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  420. peer_list_elem,
  421. tmp_peer) {
  422. if (j >= num_peers[i])
  423. break;
  424. if (dp_peer_get_ref(soc, peer, mod_id) ==
  425. QDF_STATUS_SUCCESS) {
  426. peer_array[i][j] = peer;
  427. j = (j + 1);
  428. }
  429. }
  430. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  431. i = (i + 1);
  432. }
  433. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  434. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  435. if (!peer_array[i])
  436. break;
  437. for (j = 0; j < num_peers[i]; j++) {
  438. peer = peer_array[i][j];
  439. if (!peer)
  440. continue;
  441. (*func)(soc, peer, arg);
  442. dp_peer_unref_delete(peer, mod_id);
  443. }
  444. qdf_mem_free(peer_array[i]);
  445. }
  446. }
  447. /**
  448. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  449. *
  450. * This API will cache the peers in local allocated memory and calls
  451. * iterate function outside the lock.
  452. *
  453. * As this API is allocating new memory it is suggested to use this
  454. * only when lock cannot be held
  455. *
  456. * @soc: DP soc context
  457. * @func: function to be called for each peer
  458. * @arg: argument need to be passed to func
  459. * @mod_id: module_id
  460. *
  461. * Return: void
  462. */
  463. static inline void
  464. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  465. dp_peer_iter_func *func,
  466. void *arg,
  467. enum dp_mod_id mod_id)
  468. {
  469. struct dp_pdev *pdev;
  470. int i;
  471. if (!soc)
  472. return;
  473. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  474. pdev = soc->pdev_list[i];
  475. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  476. }
  477. }
  478. #ifdef DP_PEER_STATE_DEBUG
  479. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  480. do { \
  481. if (!(_condition)) { \
  482. dp_alert("Invalid state shift from %u to %u peer " \
  483. QDF_MAC_ADDR_FMT, \
  484. (_peer)->peer_state, (_new_state), \
  485. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  486. QDF_ASSERT(0); \
  487. } \
  488. } while (0)
  489. #else
  490. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  491. do { \
  492. if (!(_condition)) { \
  493. dp_alert("Invalid state shift from %u to %u peer " \
  494. QDF_MAC_ADDR_FMT, \
  495. (_peer)->peer_state, (_new_state), \
  496. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  497. } \
  498. } while (0)
  499. #endif
  500. /**
  501. * dp_peer_state_cmp() - compare dp peer state
  502. *
  503. * @peer: DP peer
  504. * @state: state
  505. *
  506. * Return: true if state matches with peer state
  507. * false if it does not match
  508. */
  509. static inline bool
  510. dp_peer_state_cmp(struct dp_peer *peer,
  511. enum dp_peer_state state)
  512. {
  513. bool is_status_equal = false;
  514. qdf_spin_lock_bh(&peer->peer_state_lock);
  515. is_status_equal = (peer->peer_state == state);
  516. qdf_spin_unlock_bh(&peer->peer_state_lock);
  517. return is_status_equal;
  518. }
  519. /**
  520. * dp_print_ast_stats() - Dump AST table contents
  521. * @soc: Datapath soc handle
  522. *
  523. * Return: void
  524. */
  525. void dp_print_ast_stats(struct dp_soc *soc);
  526. /**
  527. * dp_rx_peer_map_handler() - handle peer map event from firmware
  528. * @soc: generic soc handle
  529. * @peer_id: peer_id from firmware
  530. * @hw_peer_id: ast index for this peer
  531. * @vdev_id: vdev ID
  532. * @peer_mac_addr: mac address of the peer
  533. * @ast_hash: ast hash value
  534. * @is_wds: flag to indicate peer map event for WDS ast entry
  535. *
  536. * associate the peer_id that firmware provided with peer entry
  537. * and update the ast table in the host with the hw_peer_id.
  538. *
  539. * Return: QDF_STATUS code
  540. */
  541. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  542. uint16_t hw_peer_id, uint8_t vdev_id,
  543. uint8_t *peer_mac_addr, uint16_t ast_hash,
  544. uint8_t is_wds);
  545. /**
  546. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  547. * @soc: generic soc handle
  548. * @peer_id: peer_id from firmware
  549. * @vdev_id: vdev ID
  550. * @peer_mac_addr: mac address of the peer or wds entry
  551. * @is_wds: flag to indicate peer map event for WDS ast entry
  552. * @free_wds_count: number of wds entries freed by FW with peer delete
  553. *
  554. * Return: none
  555. */
  556. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  557. uint8_t vdev_id, uint8_t *peer_mac_addr,
  558. uint8_t is_wds, uint32_t free_wds_count);
  559. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  560. /**
  561. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  562. * @soc: dp soc pointer
  563. * @vdev_id: vdev id
  564. * @peer_mac_addr: mac address of the peer
  565. *
  566. * This function resets the roamed peer auth status and mac address
  567. * after peer map indication of same peer is received from firmware.
  568. *
  569. * Return: None
  570. */
  571. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  572. uint8_t *peer_mac_addr);
  573. #else
  574. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  575. uint8_t *peer_mac_addr)
  576. {
  577. }
  578. #endif
  579. #ifdef WLAN_FEATURE_11BE_MLO
  580. /**
  581. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  582. * @soc: generic soc handle
  583. * @peer_id: ML peer_id from firmware
  584. * @peer_mac_addr: mac address of the peer
  585. * @mlo_flow_info: MLO AST flow info
  586. * @mlo_link_info: MLO link info
  587. *
  588. * associate the ML peer_id that firmware provided with peer entry
  589. * and update the ast table in the host with the hw_peer_id.
  590. *
  591. * Return: QDF_STATUS code
  592. */
  593. QDF_STATUS
  594. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  595. uint8_t *peer_mac_addr,
  596. struct dp_mlo_flow_override_info *mlo_flow_info,
  597. struct dp_mlo_link_info *mlo_link_info);
  598. /**
  599. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  600. * @soc: generic soc handle
  601. * @peer_id: peer_id from firmware
  602. *
  603. * Return: none
  604. */
  605. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  606. #endif
  607. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  608. enum cdp_sec_type sec_type, int is_unicast,
  609. u_int32_t *michael_key, u_int32_t *rx_pn);
  610. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  611. uint8_t tid, uint16_t win_sz);
  612. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  613. uint16_t peer_id, uint8_t *peer_mac);
  614. /**
  615. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  616. * @soc: SoC handle
  617. * @peer: peer to which ast node belongs
  618. * @mac_addr: MAC address of ast node
  619. * @type: AST entry type
  620. * @flags: AST configuration flags
  621. *
  622. * This API is used by WDS source port learning function to
  623. * add a new AST entry into peer AST list
  624. *
  625. * Return: QDF_STATUS code
  626. */
  627. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  628. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  629. uint32_t flags);
  630. /**
  631. * dp_peer_del_ast() - Delete and free AST entry
  632. * @soc: SoC handle
  633. * @ast_entry: AST entry of the node
  634. *
  635. * This function removes the AST entry from peer and soc tables
  636. * It assumes caller has taken the ast lock to protect the access to these
  637. * tables
  638. *
  639. * Return: None
  640. */
  641. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  642. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  643. struct dp_ast_entry *ast_entry);
  644. /**
  645. * dp_peer_update_ast() - Delete and free AST entry
  646. * @soc: SoC handle
  647. * @peer: peer to which ast node belongs
  648. * @ast_entry: AST entry of the node
  649. * @flags: wds or hmwds
  650. *
  651. * This function update the AST entry to the roamed peer and soc tables
  652. * It assumes caller has taken the ast lock to protect the access to these
  653. * tables
  654. *
  655. * Return: 0 if ast entry is updated successfully
  656. * -1 failure
  657. */
  658. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  659. struct dp_ast_entry *ast_entry, uint32_t flags);
  660. /**
  661. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  662. * @soc: SoC handle
  663. * @ast_mac_addr: Mac address
  664. * @pdev_id: pdev Id
  665. *
  666. * It assumes caller has taken the ast lock to protect the access to
  667. * AST hash table
  668. *
  669. * Return: AST entry
  670. */
  671. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  672. uint8_t *ast_mac_addr,
  673. uint8_t pdev_id);
  674. /**
  675. * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
  676. * @soc: SoC handle
  677. * @ast_mac_addr: Mac address
  678. * @vdev_id: vdev Id
  679. *
  680. * It assumes caller has taken the ast lock to protect the access to
  681. * AST hash table
  682. *
  683. * Return: AST entry
  684. */
  685. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  686. uint8_t *ast_mac_addr,
  687. uint8_t vdev_id);
  688. /**
  689. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  690. * @soc: SoC handle
  691. * @ast_mac_addr: Mac address
  692. *
  693. * It assumes caller has taken the ast lock to protect the access to
  694. * AST hash table
  695. *
  696. * Return: AST entry
  697. */
  698. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  699. uint8_t *ast_mac_addr);
  700. /**
  701. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  702. * @soc: SoC handle
  703. * @ast_entry: AST entry of the node
  704. *
  705. * This function gets the pdev_id from the ast entry.
  706. *
  707. * Return: (uint8_t) pdev_id
  708. */
  709. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  710. struct dp_ast_entry *ast_entry);
  711. /**
  712. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  713. * @soc: SoC handle
  714. * @ast_entry: AST entry of the node
  715. *
  716. * This function gets the next hop from the ast entry.
  717. *
  718. * Return: (uint8_t) next_hop
  719. */
  720. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  721. struct dp_ast_entry *ast_entry);
  722. /**
  723. * dp_peer_ast_set_type() - set type from the ast entry
  724. * @soc: SoC handle
  725. * @ast_entry: AST entry of the node
  726. * @type: AST entry type
  727. *
  728. * This function sets the type in the ast entry.
  729. *
  730. * Return:
  731. */
  732. void dp_peer_ast_set_type(struct dp_soc *soc,
  733. struct dp_ast_entry *ast_entry,
  734. enum cdp_txrx_ast_entry_type type);
  735. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  736. struct dp_ast_entry *ast_entry,
  737. struct dp_peer *peer);
  738. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  739. void dp_peer_ast_send_multi_wds_del(
  740. struct dp_soc *soc, uint8_t vdev_id,
  741. struct peer_del_multi_wds_entries *wds_list);
  742. #endif
  743. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  744. struct cdp_soc *dp_soc,
  745. void *cookie,
  746. enum cdp_ast_free_status status);
  747. /**
  748. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  749. * @soc: SoC handle
  750. * @ase: Address search entry
  751. *
  752. * This function removes the AST entry from soc AST hash table
  753. * It assumes caller has taken the ast lock to protect the access to this table
  754. *
  755. * Return: None
  756. */
  757. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  758. struct dp_ast_entry *ase);
  759. /**
  760. * dp_peer_free_ast_entry() - Free up the ast entry memory
  761. * @soc: SoC handle
  762. * @ast_entry: Address search entry
  763. *
  764. * This API is used to free up the memory associated with
  765. * AST entry.
  766. *
  767. * Return: None
  768. */
  769. void dp_peer_free_ast_entry(struct dp_soc *soc,
  770. struct dp_ast_entry *ast_entry);
  771. /**
  772. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  773. * @soc: SoC handle
  774. * @ast_entry: Address search entry
  775. * @peer: peer
  776. *
  777. * This API is used to remove/unlink AST entry from the peer list
  778. * and hash list.
  779. *
  780. * Return: None
  781. */
  782. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  783. struct dp_ast_entry *ast_entry,
  784. struct dp_peer *peer);
  785. /**
  786. * dp_peer_mec_detach_entry() - Detach the MEC entry
  787. * @soc: SoC handle
  788. * @mecentry: MEC entry of the node
  789. * @ptr: pointer to free list
  790. *
  791. * The MEC entry is detached from MEC table and added to free_list
  792. * to free the object outside lock
  793. *
  794. * Return: None
  795. */
  796. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  797. void *ptr);
  798. /**
  799. * dp_peer_mec_free_list() - free the MEC entry from free_list
  800. * @soc: SoC handle
  801. * @ptr: pointer to free list
  802. *
  803. * Return: None
  804. */
  805. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  806. /**
  807. * dp_peer_mec_add_entry()
  808. * @soc: SoC handle
  809. * @vdev: vdev to which mec node belongs
  810. * @mac_addr: MAC address of mec node
  811. *
  812. * This function allocates and adds MEC entry to MEC table.
  813. * It assumes caller has taken the mec lock to protect the access to these
  814. * tables
  815. *
  816. * Return: QDF_STATUS
  817. */
  818. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  819. struct dp_vdev *vdev,
  820. uint8_t *mac_addr);
  821. /**
  822. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
  823. * within pdev
  824. * @soc: SoC handle
  825. * @pdev_id: pdev Id
  826. * @mec_mac_addr: MAC address of mec node
  827. *
  828. * It assumes caller has taken the mec_lock to protect the access to
  829. * MEC hash table
  830. *
  831. * Return: MEC entry
  832. */
  833. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  834. uint8_t pdev_id,
  835. uint8_t *mec_mac_addr);
  836. #define DP_AST_ASSERT(_condition) \
  837. do { \
  838. if (!(_condition)) { \
  839. dp_print_ast_stats(soc);\
  840. QDF_BUG(_condition); \
  841. } \
  842. } while (0)
  843. /**
  844. * dp_peer_update_inactive_time() - Update inactive time for peer
  845. * @pdev: pdev object
  846. * @tag_type: htt_tlv_tag type
  847. * @tag_buf: buf message
  848. */
  849. void
  850. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  851. uint32_t *tag_buf);
  852. #ifndef QCA_MULTIPASS_SUPPORT
  853. static inline
  854. /**
  855. * dp_peer_set_vlan_id() - set vlan_id for this peer
  856. * @cdp_soc: soc handle
  857. * @vdev_id: id of vdev object
  858. * @peer_mac: mac address
  859. * @vlan_id: vlan id for peer
  860. *
  861. * Return: void
  862. */
  863. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  864. uint8_t vdev_id, uint8_t *peer_mac,
  865. uint16_t vlan_id)
  866. {
  867. }
  868. /**
  869. * dp_set_vlan_groupkey() - set vlan map for vdev
  870. * @soc_hdl: pointer to soc
  871. * @vdev_id: id of vdev handle
  872. * @vlan_id: vlan_id
  873. * @group_key: group key for vlan
  874. *
  875. * Return: set success/failure
  876. */
  877. static inline
  878. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  879. uint16_t vlan_id, uint16_t group_key)
  880. {
  881. return QDF_STATUS_SUCCESS;
  882. }
  883. /**
  884. * dp_peer_multipass_list_init() - initialize multipass peer list
  885. * @vdev: pointer to vdev
  886. *
  887. * Return: void
  888. */
  889. static inline
  890. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  891. {
  892. }
  893. /**
  894. * dp_peer_multipass_list_remove() - remove peer from special peer list
  895. * @peer: peer handle
  896. *
  897. * Return: void
  898. */
  899. static inline
  900. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  901. {
  902. }
  903. #else
  904. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  905. uint8_t vdev_id, uint8_t *peer_mac,
  906. uint16_t vlan_id);
  907. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  908. uint16_t vlan_id, uint16_t group_key);
  909. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  910. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  911. #endif
  912. #ifndef QCA_PEER_MULTIQ_SUPPORT
  913. /**
  914. * dp_peer_reset_flowq_map() - reset peer flowq map table
  915. * @peer: dp peer handle
  916. *
  917. * Return: none
  918. */
  919. static inline
  920. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  921. {
  922. }
  923. /**
  924. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  925. * @soc_hdl: generic soc handle
  926. * @is_wds: flag to indicate if peer is wds
  927. * @peer_id: peer_id from htt peer map message
  928. * @peer_mac_addr: mac address of the peer
  929. * @ast_info: ast flow override information from peer map
  930. *
  931. * Return: none
  932. */
  933. static inline
  934. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  935. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  936. struct dp_ast_flow_override_info *ast_info)
  937. {
  938. }
  939. #else
  940. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  941. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  942. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  943. struct dp_ast_flow_override_info *ast_info);
  944. #endif
  945. /**
  946. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  947. * after deleting the entries (ie., setting valid=0)
  948. *
  949. * @soc: DP SOC handle
  950. * @cb_ctxt: Callback context
  951. * @reo_status: REO command status
  952. */
  953. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  954. void *cb_ctxt,
  955. union hal_reo_status *reo_status);
  956. #ifdef QCA_PEER_EXT_STATS
  957. /**
  958. * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
  959. * @soc: DP SoC context
  960. * @txrx_peer: DP txrx peer context
  961. *
  962. * Allocate the peer delay stats context
  963. *
  964. * Return: QDF_STATUS_SUCCESS if allocation is
  965. * successful
  966. */
  967. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  968. struct dp_txrx_peer *txrx_peer);
  969. /**
  970. * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
  971. * @soc: DP SoC context
  972. * @txrx_peer: txrx DP peer context
  973. *
  974. * Free the peer delay stats context
  975. *
  976. * Return: Void
  977. */
  978. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  979. struct dp_txrx_peer *txrx_peer);
  980. /**
  981. * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
  982. * @txrx_peer: dp_txrx_peer handle
  983. *
  984. * Return: void
  985. */
  986. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  987. #else
  988. static inline
  989. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  990. struct dp_txrx_peer *txrx_peer)
  991. {
  992. return QDF_STATUS_SUCCESS;
  993. }
  994. static inline
  995. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  996. struct dp_txrx_peer *txrx_peer)
  997. {
  998. }
  999. static inline
  1000. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1001. {
  1002. }
  1003. #endif
  1004. #ifdef WLAN_PEER_JITTER
  1005. /**
  1006. * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
  1007. * @pdev: Datapath pdev handle
  1008. * @txrx_peer: dp_txrx_peer handle
  1009. *
  1010. * Return: QDF_STATUS
  1011. */
  1012. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1013. struct dp_txrx_peer *txrx_peer);
  1014. /**
  1015. * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
  1016. * @pdev: Datapath pdev handle
  1017. * @txrx_peer: dp_txrx_peer handle
  1018. *
  1019. * Return: void
  1020. */
  1021. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1022. struct dp_txrx_peer *txrx_peer);
  1023. /**
  1024. * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
  1025. * @txrx_peer: dp_txrx_peer handle
  1026. *
  1027. * Return: void
  1028. */
  1029. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1030. #else
  1031. static inline
  1032. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1033. struct dp_txrx_peer *txrx_peer)
  1034. {
  1035. return QDF_STATUS_SUCCESS;
  1036. }
  1037. static inline
  1038. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1039. struct dp_txrx_peer *txrx_peer)
  1040. {
  1041. }
  1042. static inline
  1043. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1044. {
  1045. }
  1046. #endif
  1047. #ifndef CONFIG_SAWF_DEF_QUEUES
  1048. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  1049. struct dp_peer *peer)
  1050. {
  1051. return QDF_STATUS_SUCCESS;
  1052. }
  1053. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  1054. struct dp_peer *peer)
  1055. {
  1056. return QDF_STATUS_SUCCESS;
  1057. }
  1058. #endif
  1059. #ifndef CONFIG_SAWF
  1060. static inline
  1061. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  1062. struct dp_txrx_peer *txrx_peer)
  1063. {
  1064. return QDF_STATUS_SUCCESS;
  1065. }
  1066. static inline
  1067. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  1068. struct dp_txrx_peer *txrx_peer)
  1069. {
  1070. return QDF_STATUS_SUCCESS;
  1071. }
  1072. #endif
  1073. /**
  1074. * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
  1075. * @soc: DP soc
  1076. * @vdev: vdev
  1077. * @mod_id: id of module requesting reference
  1078. *
  1079. * Return: VDEV BSS peer
  1080. */
  1081. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  1082. struct dp_vdev *vdev,
  1083. enum dp_mod_id mod_id);
  1084. /**
  1085. * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
  1086. * @soc: DP soc
  1087. * @vdev: vdev
  1088. * @mod_id: id of module requesting reference
  1089. *
  1090. * Return: VDEV self peer
  1091. */
  1092. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  1093. struct dp_vdev *vdev,
  1094. enum dp_mod_id mod_id);
  1095. void dp_peer_ast_table_detach(struct dp_soc *soc);
  1096. /**
  1097. * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
  1098. * @soc: soc handle
  1099. *
  1100. * Return: none
  1101. */
  1102. void dp_peer_find_map_detach(struct dp_soc *soc);
  1103. void dp_soc_wds_detach(struct dp_soc *soc);
  1104. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  1105. /**
  1106. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  1107. * @soc: SoC handle
  1108. *
  1109. * Return: QDF_STATUS
  1110. */
  1111. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  1112. /**
  1113. * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
  1114. * @soc: SoC handle
  1115. *
  1116. * Return: QDF_STATUS
  1117. */
  1118. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  1119. /**
  1120. * dp_del_wds_entry_wrapper() - delete a WDS AST entry
  1121. * @soc: DP soc structure pointer
  1122. * @vdev_id: vdev_id
  1123. * @wds_macaddr: MAC address of ast node
  1124. * @type: type from enum cdp_txrx_ast_entry_type
  1125. * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
  1126. *
  1127. * This API is used to delete an AST entry from fw
  1128. *
  1129. * Return: None
  1130. */
  1131. void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
  1132. uint8_t *wds_macaddr, uint8_t type,
  1133. uint8_t delete_in_fw);
  1134. void dp_soc_wds_attach(struct dp_soc *soc);
  1135. /**
  1136. * dp_peer_mec_hash_detach() - Free MEC Hash table
  1137. * @soc: SoC handle
  1138. *
  1139. * Return: None
  1140. */
  1141. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  1142. /**
  1143. * dp_peer_ast_hash_detach() - Free AST Hash table
  1144. * @soc: SoC handle
  1145. *
  1146. * Return: None
  1147. */
  1148. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  1149. #ifdef FEATURE_AST
  1150. /**
  1151. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  1152. * @soc: datapath soc handle
  1153. * @peer: datapath peer handle
  1154. *
  1155. * Delete the AST entries belonging to a peer
  1156. */
  1157. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1158. struct dp_peer *peer)
  1159. {
  1160. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  1161. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  1162. /*
  1163. * Delete peer self ast entry. This is done to handle scenarios
  1164. * where peer is freed before peer map is received(for ex in case
  1165. * of auth disallow due to ACL) in such cases self ast is not added
  1166. * to peer->ast_list.
  1167. */
  1168. if (peer->self_ast_entry) {
  1169. dp_peer_del_ast(soc, peer->self_ast_entry);
  1170. peer->self_ast_entry = NULL;
  1171. }
  1172. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  1173. dp_peer_del_ast(soc, ast_entry);
  1174. }
  1175. /**
  1176. * dp_print_peer_ast_entries() - Dump AST entries of peer
  1177. * @soc: Datapath soc handle
  1178. * @peer: Datapath peer
  1179. * @arg: argument to iterate function
  1180. *
  1181. * Return: void
  1182. */
  1183. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  1184. void *arg);
  1185. #else
  1186. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  1187. struct dp_peer *peer, void *arg)
  1188. {
  1189. }
  1190. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1191. struct dp_peer *peer)
  1192. {
  1193. }
  1194. #endif
  1195. #ifdef FEATURE_MEC
  1196. /**
  1197. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  1198. * @soc: SoC handle
  1199. *
  1200. * Return: none
  1201. */
  1202. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  1203. /**
  1204. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  1205. * @soc: SoC handle
  1206. *
  1207. * Return: none
  1208. */
  1209. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  1210. /**
  1211. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  1212. * @soc: Datapath SOC
  1213. *
  1214. * Return: None
  1215. */
  1216. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  1217. #else
  1218. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  1219. {
  1220. }
  1221. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  1222. {
  1223. }
  1224. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  1225. {
  1226. }
  1227. #endif
  1228. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  1229. /**
  1230. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  1231. * @soc: dp_soc handle
  1232. * @peer: peer
  1233. *
  1234. * This function is used to send cache flush cmd to reo and
  1235. * to register the callback to handle the dumping of the reo
  1236. * queue stas from DDR
  1237. *
  1238. * Return: none
  1239. */
  1240. void dp_send_cache_flush_for_rx_tid(
  1241. struct dp_soc *soc, struct dp_peer *peer);
  1242. /**
  1243. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  1244. * @soc_hdl: cdp_soc_t handle
  1245. * @vdev_id: vdev id
  1246. *
  1247. * Handler to get rx tid info from DDR after h/w cache is
  1248. * invalidated first using the cache flush cmd.
  1249. *
  1250. * Return: none
  1251. */
  1252. void dp_get_rx_reo_queue_info(
  1253. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1254. /**
  1255. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  1256. * @soc: dp_soc handle
  1257. * @cb_ctxt: callback context
  1258. * @reo_status: vdev id
  1259. *
  1260. * This is the callback function registered after sending the reo cmd
  1261. * to flush the h/w cache and invalidate it. In the callback the reo
  1262. * queue desc info is dumped from DDR.
  1263. *
  1264. * Return: none
  1265. */
  1266. void dp_dump_rx_reo_queue_info(
  1267. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  1268. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  1269. static inline void dp_get_rx_reo_queue_info(
  1270. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  1271. {
  1272. }
  1273. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  1274. static inline int dp_peer_find_mac_addr_cmp(
  1275. union dp_align_mac_addr *mac_addr1,
  1276. union dp_align_mac_addr *mac_addr2)
  1277. {
  1278. /*
  1279. * Intentionally use & rather than &&.
  1280. * because the operands are binary rather than generic boolean,
  1281. * the functionality is equivalent.
  1282. * Using && has the advantage of short-circuited evaluation,
  1283. * but using & has the advantage of no conditional branching,
  1284. * which is a more significant benefit.
  1285. */
  1286. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1287. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1288. }
  1289. /**
  1290. * dp_peer_delete() - delete DP peer
  1291. *
  1292. * @soc: Datatpath soc
  1293. * @peer: Datapath peer
  1294. * @arg: argument to iter function
  1295. *
  1296. * Return: void
  1297. */
  1298. void dp_peer_delete(struct dp_soc *soc,
  1299. struct dp_peer *peer,
  1300. void *arg);
  1301. /**
  1302. * dp_mlo_peer_delete() - delete MLO DP peer
  1303. *
  1304. * @soc: Datapath soc
  1305. * @peer: Datapath peer
  1306. * @arg: argument to iter function
  1307. *
  1308. * Return: void
  1309. */
  1310. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  1311. #ifdef WLAN_FEATURE_11BE_MLO
  1312. /* is MLO connection mld peer */
  1313. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1314. /* set peer type */
  1315. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1316. ((_peer)->peer_type = (_type_val))
  1317. /* is legacy peer */
  1318. #define IS_DP_LEGACY_PEER(_peer) \
  1319. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1320. /* is MLO connection link peer */
  1321. #define IS_MLO_DP_LINK_PEER(_peer) \
  1322. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1323. /* is MLO connection mld peer */
  1324. #define IS_MLO_DP_MLD_PEER(_peer) \
  1325. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1326. /* Get Mld peer from link peer */
  1327. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1328. ((link_peer)->mld_peer)
  1329. #ifdef WLAN_MLO_MULTI_CHIP
  1330. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1331. struct dp_peer *
  1332. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1333. uint8_t *peer_mac_addr,
  1334. int mac_addr_is_aligned,
  1335. uint8_t vdev_id,
  1336. uint8_t chip_id,
  1337. enum dp_mod_id mod_id);
  1338. #else
  1339. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1340. {
  1341. return 0;
  1342. }
  1343. static inline struct dp_peer *
  1344. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1345. uint8_t *peer_mac_addr,
  1346. int mac_addr_is_aligned,
  1347. uint8_t vdev_id,
  1348. uint8_t chip_id,
  1349. enum dp_mod_id mod_id)
  1350. {
  1351. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1352. mac_addr_is_aligned,
  1353. vdev_id, mod_id);
  1354. }
  1355. #endif
  1356. /**
  1357. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1358. * matching mac_address
  1359. * @soc: soc handle
  1360. * @peer_mac_addr: mld peer mac address
  1361. * @mac_addr_is_aligned: is mac addr aligned
  1362. * @vdev_id: vdev_id
  1363. * @mod_id: id of module requesting reference
  1364. *
  1365. * Return: peer in success
  1366. * NULL in failure
  1367. */
  1368. static inline
  1369. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1370. uint8_t *peer_mac_addr,
  1371. int mac_addr_is_aligned,
  1372. uint8_t vdev_id,
  1373. enum dp_mod_id mod_id)
  1374. {
  1375. if (soc->arch_ops.mlo_peer_find_hash_find)
  1376. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1377. peer_mac_addr,
  1378. mac_addr_is_aligned,
  1379. mod_id, vdev_id);
  1380. return NULL;
  1381. }
  1382. /**
  1383. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1384. * peer_type
  1385. * @soc: DP SOC handle
  1386. * @peer_info: peer information for hash find
  1387. * @mod_id: ID of module requesting reference
  1388. *
  1389. * Return: peer handle
  1390. */
  1391. static inline
  1392. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1393. struct cdp_peer_info *peer_info,
  1394. enum dp_mod_id mod_id)
  1395. {
  1396. struct dp_peer *peer = NULL;
  1397. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1398. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1399. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1400. peer_info->mac_addr_is_aligned,
  1401. peer_info->vdev_id,
  1402. mod_id);
  1403. if (peer)
  1404. return peer;
  1405. }
  1406. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1407. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1408. peer = dp_mld_peer_find_hash_find(
  1409. soc, peer_info->mac_addr,
  1410. peer_info->mac_addr_is_aligned,
  1411. peer_info->vdev_id,
  1412. mod_id);
  1413. return peer;
  1414. }
  1415. /**
  1416. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1417. * increase mld peer ref_cnt
  1418. * @link_peer: link peer pointer
  1419. * @mld_peer: mld peer pointer
  1420. *
  1421. * Return: none
  1422. */
  1423. static inline
  1424. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1425. struct dp_peer *mld_peer)
  1426. {
  1427. /* increase mld_peer ref_cnt */
  1428. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1429. link_peer->mld_peer = mld_peer;
  1430. }
  1431. /**
  1432. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1433. * decrease mld peer ref_cnt
  1434. * @link_peer: link peer pointer
  1435. *
  1436. * Return: None
  1437. */
  1438. static inline
  1439. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1440. {
  1441. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1442. link_peer->mld_peer = NULL;
  1443. }
  1444. /**
  1445. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1446. * @mld_peer: mld peer pointer
  1447. *
  1448. * Return: None
  1449. */
  1450. static inline
  1451. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1452. {
  1453. int i;
  1454. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1455. mld_peer->num_links = 0;
  1456. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1457. mld_peer->link_peers[i].is_valid = false;
  1458. }
  1459. /**
  1460. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1461. * @mld_peer: mld peer pointer
  1462. *
  1463. * Return: None
  1464. */
  1465. static inline
  1466. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1467. {
  1468. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1469. }
  1470. /**
  1471. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1472. * @mld_peer: mld dp peer pointer
  1473. * @link_peer: link dp peer pointer
  1474. *
  1475. * Return: None
  1476. */
  1477. static inline
  1478. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1479. struct dp_peer *link_peer)
  1480. {
  1481. int i;
  1482. struct dp_peer_link_info *link_peer_info;
  1483. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1484. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1485. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1486. link_peer_info = &mld_peer->link_peers[i];
  1487. if (!link_peer_info->is_valid) {
  1488. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1489. link_peer->mac_addr.raw,
  1490. QDF_MAC_ADDR_SIZE);
  1491. link_peer_info->is_valid = true;
  1492. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1493. link_peer_info->chip_id =
  1494. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1495. mld_peer->num_links++;
  1496. break;
  1497. }
  1498. }
  1499. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1500. dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1501. "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1502. "idx %u num_links %u",
  1503. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1504. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1505. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1506. i, mld_peer->num_links);
  1507. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
  1508. mld_peer, link_peer, i,
  1509. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1510. }
  1511. /**
  1512. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1513. * @mld_peer: MLD dp peer pointer
  1514. * @link_peer: link dp peer pointer
  1515. *
  1516. * Return: number of links left after deletion
  1517. */
  1518. static inline
  1519. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1520. struct dp_peer *link_peer)
  1521. {
  1522. int i;
  1523. struct dp_peer_link_info *link_peer_info;
  1524. uint8_t num_links;
  1525. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1526. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1527. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1528. link_peer_info = &mld_peer->link_peers[i];
  1529. if (link_peer_info->is_valid &&
  1530. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1531. &link_peer_info->mac_addr)) {
  1532. link_peer_info->is_valid = false;
  1533. mld_peer->num_links--;
  1534. break;
  1535. }
  1536. }
  1537. num_links = mld_peer->num_links;
  1538. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1539. dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1540. "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1541. "idx %u num_links %u",
  1542. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1543. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1544. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1545. i, mld_peer->num_links);
  1546. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
  1547. mld_peer, link_peer, i,
  1548. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1549. return num_links;
  1550. }
  1551. /**
  1552. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1553. * increase link peers ref_cnt
  1554. * @soc: dp_soc handle
  1555. * @mld_peer: dp mld peer pointer
  1556. * @mld_link_peers: structure that hold links peers pointer array and number
  1557. * @mod_id: id of module requesting reference
  1558. *
  1559. * Return: None
  1560. */
  1561. static inline
  1562. void dp_get_link_peers_ref_from_mld_peer(
  1563. struct dp_soc *soc,
  1564. struct dp_peer *mld_peer,
  1565. struct dp_mld_link_peers *mld_link_peers,
  1566. enum dp_mod_id mod_id)
  1567. {
  1568. struct dp_peer *peer;
  1569. uint8_t i = 0, j = 0;
  1570. struct dp_peer_link_info *link_peer_info;
  1571. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1572. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1573. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1574. link_peer_info = &mld_peer->link_peers[i];
  1575. if (link_peer_info->is_valid) {
  1576. peer = dp_link_peer_hash_find_by_chip_id(
  1577. soc,
  1578. link_peer_info->mac_addr.raw,
  1579. true,
  1580. link_peer_info->vdev_id,
  1581. link_peer_info->chip_id,
  1582. mod_id);
  1583. if (peer)
  1584. mld_link_peers->link_peers[j++] = peer;
  1585. }
  1586. }
  1587. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1588. mld_link_peers->num_links = j;
  1589. }
  1590. /**
  1591. * dp_release_link_peers_ref() - release all link peers reference
  1592. * @mld_link_peers: structure that hold links peers pointer array and number
  1593. * @mod_id: id of module requesting reference
  1594. *
  1595. * Return: None.
  1596. */
  1597. static inline
  1598. void dp_release_link_peers_ref(
  1599. struct dp_mld_link_peers *mld_link_peers,
  1600. enum dp_mod_id mod_id)
  1601. {
  1602. struct dp_peer *peer;
  1603. uint8_t i;
  1604. for (i = 0; i < mld_link_peers->num_links; i++) {
  1605. peer = mld_link_peers->link_peers[i];
  1606. if (peer)
  1607. dp_peer_unref_delete(peer, mod_id);
  1608. mld_link_peers->link_peers[i] = NULL;
  1609. }
  1610. mld_link_peers->num_links = 0;
  1611. }
  1612. /**
  1613. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1614. * @soc: Datapath soc handle
  1615. * @peer_id: peer id
  1616. * @lmac_id: lmac id to find the link peer on given lmac
  1617. *
  1618. * Return: peer_id of link peer if found
  1619. * else return HTT_INVALID_PEER
  1620. */
  1621. static inline
  1622. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1623. uint8_t lmac_id)
  1624. {
  1625. uint8_t i;
  1626. struct dp_peer *peer;
  1627. struct dp_peer *link_peer;
  1628. struct dp_soc *link_peer_soc;
  1629. struct dp_mld_link_peers link_peers_info;
  1630. uint16_t link_peer_id = HTT_INVALID_PEER;
  1631. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1632. if (!peer)
  1633. return HTT_INVALID_PEER;
  1634. if (IS_MLO_DP_MLD_PEER(peer)) {
  1635. /* get link peers with reference */
  1636. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1637. DP_MOD_ID_CDP);
  1638. for (i = 0; i < link_peers_info.num_links; i++) {
  1639. link_peer = link_peers_info.link_peers[i];
  1640. link_peer_soc = link_peer->vdev->pdev->soc;
  1641. if ((link_peer_soc == soc) &&
  1642. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1643. link_peer_id = link_peer->peer_id;
  1644. break;
  1645. }
  1646. }
  1647. /* release link peers reference */
  1648. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1649. } else {
  1650. link_peer_id = peer_id;
  1651. }
  1652. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1653. return link_peer_id;
  1654. }
  1655. /**
  1656. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1657. * @soc: soc handle
  1658. * @peer_mac: peer mac address
  1659. * @mac_addr_is_aligned: is mac addr aligned
  1660. * @vdev_id: vdev_id
  1661. * @mod_id: id of module requesting reference
  1662. *
  1663. * for MLO connection, get corresponding MLD peer,
  1664. * otherwise get link peer for non-MLO case.
  1665. *
  1666. * Return: peer in success
  1667. * NULL in failure
  1668. */
  1669. static inline
  1670. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1671. uint8_t *peer_mac,
  1672. int mac_addr_is_aligned,
  1673. uint8_t vdev_id,
  1674. enum dp_mod_id mod_id)
  1675. {
  1676. struct dp_peer *ta_peer = NULL;
  1677. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1678. peer_mac, 0, vdev_id,
  1679. mod_id);
  1680. if (peer) {
  1681. /* mlo connection link peer, get mld peer with reference */
  1682. if (IS_MLO_DP_LINK_PEER(peer)) {
  1683. /* increase mld peer ref_cnt */
  1684. if (QDF_STATUS_SUCCESS ==
  1685. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1686. ta_peer = peer->mld_peer;
  1687. else
  1688. ta_peer = NULL;
  1689. /* release peer reference that added by hash find */
  1690. dp_peer_unref_delete(peer, mod_id);
  1691. } else {
  1692. /* mlo MLD peer or non-mlo link peer */
  1693. ta_peer = peer;
  1694. }
  1695. } else {
  1696. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1697. QDF_MAC_ADDR_REF(peer_mac));
  1698. }
  1699. return ta_peer;
  1700. }
  1701. /**
  1702. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1703. * @soc: core DP soc context
  1704. * @peer_id: peer id from peer object can be retrieved
  1705. * @mod_id: ID of module requesting reference
  1706. *
  1707. * for MLO connection, get corresponding MLD peer,
  1708. * otherwise get link peer for non-MLO case.
  1709. *
  1710. * Return: peer in success
  1711. * NULL in failure
  1712. */
  1713. static inline
  1714. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1715. uint16_t peer_id,
  1716. enum dp_mod_id mod_id)
  1717. {
  1718. struct dp_peer *ta_peer = NULL;
  1719. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1720. if (peer) {
  1721. /* mlo connection link peer, get mld peer with reference */
  1722. if (IS_MLO_DP_LINK_PEER(peer)) {
  1723. /* increase mld peer ref_cnt */
  1724. if (QDF_STATUS_SUCCESS ==
  1725. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1726. ta_peer = peer->mld_peer;
  1727. else
  1728. ta_peer = NULL;
  1729. /* release peer reference that added by hash find */
  1730. dp_peer_unref_delete(peer, mod_id);
  1731. } else {
  1732. /* mlo MLD peer or non-mlo link peer */
  1733. ta_peer = peer;
  1734. }
  1735. }
  1736. return ta_peer;
  1737. }
  1738. /**
  1739. * dp_peer_mlo_delete() - peer MLO related delete operation
  1740. * @peer: DP peer handle
  1741. * Return: None
  1742. */
  1743. static inline
  1744. void dp_peer_mlo_delete(struct dp_peer *peer)
  1745. {
  1746. struct dp_peer *ml_peer;
  1747. struct dp_soc *soc;
  1748. dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
  1749. QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
  1750. /* MLO connection link peer */
  1751. if (IS_MLO_DP_LINK_PEER(peer)) {
  1752. ml_peer = peer->mld_peer;
  1753. soc = ml_peer->vdev->pdev->soc;
  1754. /* if last link peer deletion, delete MLD peer */
  1755. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1756. dp_peer_delete(soc, peer->mld_peer, NULL);
  1757. }
  1758. }
  1759. /**
  1760. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1761. * @soc: Soc handle
  1762. * @peer: DP peer handle
  1763. * @vdev_id: Vdev ID
  1764. * @setup_info: peer setup information for MLO
  1765. */
  1766. QDF_STATUS dp_peer_mlo_setup(
  1767. struct dp_soc *soc,
  1768. struct dp_peer *peer,
  1769. uint8_t vdev_id,
  1770. struct cdp_peer_setup_info *setup_info);
  1771. /**
  1772. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1773. * @peer: datapath peer
  1774. *
  1775. * Return: MLD peer in case of MLO Link peer
  1776. * Peer itself in other cases
  1777. */
  1778. static inline
  1779. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1780. {
  1781. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1782. }
  1783. /**
  1784. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1785. * peer id
  1786. * @soc: core DP soc context
  1787. * @peer_id: peer id
  1788. * @mod_id: ID of module requesting reference
  1789. *
  1790. * Return: primary link peer for the MLO peer
  1791. * legacy peer itself in case of legacy peer
  1792. */
  1793. static inline
  1794. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1795. uint16_t peer_id,
  1796. enum dp_mod_id mod_id)
  1797. {
  1798. uint8_t i;
  1799. struct dp_mld_link_peers link_peers_info;
  1800. struct dp_peer *peer;
  1801. struct dp_peer *link_peer;
  1802. struct dp_peer *primary_peer = NULL;
  1803. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1804. if (!peer)
  1805. return NULL;
  1806. if (IS_MLO_DP_MLD_PEER(peer)) {
  1807. /* get link peers with reference */
  1808. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1809. mod_id);
  1810. for (i = 0; i < link_peers_info.num_links; i++) {
  1811. link_peer = link_peers_info.link_peers[i];
  1812. if (link_peer->primary_link) {
  1813. primary_peer = link_peer;
  1814. /*
  1815. * Take additional reference over
  1816. * primary link peer.
  1817. */
  1818. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1819. break;
  1820. }
  1821. }
  1822. /* release link peers reference */
  1823. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1824. dp_peer_unref_delete(peer, mod_id);
  1825. } else {
  1826. primary_peer = peer;
  1827. }
  1828. return primary_peer;
  1829. }
  1830. /**
  1831. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1832. * @peer: Datapath peer
  1833. *
  1834. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1835. * dp_txrx_peer from peer itself for other cases
  1836. */
  1837. static inline
  1838. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1839. {
  1840. return IS_MLO_DP_LINK_PEER(peer) ?
  1841. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1842. }
  1843. /**
  1844. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1845. * @peer: Datapath peer
  1846. *
  1847. * Return: true if peer is primary link peer or legacy peer
  1848. * false otherwise
  1849. */
  1850. static inline
  1851. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1852. {
  1853. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1854. return true;
  1855. else if (IS_DP_LEGACY_PEER(peer))
  1856. return true;
  1857. else
  1858. return false;
  1859. }
  1860. /**
  1861. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1862. *
  1863. * @soc: core DP soc context
  1864. * @peer_id: peer id from peer object can be retrieved
  1865. * @handle: reference handle
  1866. * @mod_id: ID of module requesting reference
  1867. *
  1868. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1869. */
  1870. static inline struct dp_txrx_peer *
  1871. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1872. uint16_t peer_id,
  1873. dp_txrx_ref_handle *handle,
  1874. enum dp_mod_id mod_id)
  1875. {
  1876. struct dp_peer *peer;
  1877. struct dp_txrx_peer *txrx_peer;
  1878. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1879. if (!peer)
  1880. return NULL;
  1881. txrx_peer = dp_get_txrx_peer(peer);
  1882. if (txrx_peer) {
  1883. *handle = (dp_txrx_ref_handle)peer;
  1884. return txrx_peer;
  1885. }
  1886. dp_peer_unref_delete(peer, mod_id);
  1887. return NULL;
  1888. }
  1889. /**
  1890. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1891. *
  1892. * @soc: core DP soc context
  1893. *
  1894. * Return: void
  1895. */
  1896. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1897. #else
  1898. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1899. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1900. /* is legacy peer */
  1901. #define IS_DP_LEGACY_PEER(_peer) true
  1902. #define IS_MLO_DP_LINK_PEER(_peer) false
  1903. #define IS_MLO_DP_MLD_PEER(_peer) false
  1904. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1905. static inline
  1906. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1907. struct cdp_peer_info *peer_info,
  1908. enum dp_mod_id mod_id)
  1909. {
  1910. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1911. peer_info->mac_addr_is_aligned,
  1912. peer_info->vdev_id,
  1913. mod_id);
  1914. }
  1915. static inline
  1916. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1917. uint8_t *peer_mac,
  1918. int mac_addr_is_aligned,
  1919. uint8_t vdev_id,
  1920. enum dp_mod_id mod_id)
  1921. {
  1922. return dp_peer_find_hash_find(soc, peer_mac,
  1923. mac_addr_is_aligned, vdev_id,
  1924. mod_id);
  1925. }
  1926. static inline
  1927. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1928. uint16_t peer_id,
  1929. enum dp_mod_id mod_id)
  1930. {
  1931. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1932. }
  1933. static inline
  1934. QDF_STATUS dp_peer_mlo_setup(
  1935. struct dp_soc *soc,
  1936. struct dp_peer *peer,
  1937. uint8_t vdev_id,
  1938. struct cdp_peer_setup_info *setup_info)
  1939. {
  1940. return QDF_STATUS_SUCCESS;
  1941. }
  1942. static inline
  1943. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1944. {
  1945. }
  1946. static inline
  1947. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1948. {
  1949. }
  1950. static inline
  1951. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1952. {
  1953. }
  1954. static inline
  1955. void dp_peer_mlo_delete(struct dp_peer *peer)
  1956. {
  1957. }
  1958. static inline
  1959. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1960. struct dp_peer *link_peer)
  1961. {
  1962. }
  1963. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1964. {
  1965. return 0;
  1966. }
  1967. static inline struct dp_peer *
  1968. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1969. uint8_t *peer_mac_addr,
  1970. int mac_addr_is_aligned,
  1971. uint8_t vdev_id,
  1972. uint8_t chip_id,
  1973. enum dp_mod_id mod_id)
  1974. {
  1975. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1976. mac_addr_is_aligned,
  1977. vdev_id, mod_id);
  1978. }
  1979. static inline
  1980. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1981. {
  1982. return peer;
  1983. }
  1984. static inline
  1985. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1986. uint16_t peer_id,
  1987. enum dp_mod_id mod_id)
  1988. {
  1989. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1990. }
  1991. static inline
  1992. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1993. {
  1994. return peer->txrx_peer;
  1995. }
  1996. static inline
  1997. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1998. {
  1999. return true;
  2000. }
  2001. /**
  2002. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  2003. *
  2004. * @soc: core DP soc context
  2005. * @peer_id: peer id from peer object can be retrieved
  2006. * @handle: reference handle
  2007. * @mod_id: ID of module requesting reference
  2008. *
  2009. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  2010. */
  2011. static inline struct dp_txrx_peer *
  2012. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  2013. uint16_t peer_id,
  2014. dp_txrx_ref_handle *handle,
  2015. enum dp_mod_id mod_id)
  2016. {
  2017. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  2018. }
  2019. static inline
  2020. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  2021. uint8_t lmac_id)
  2022. {
  2023. return peer_id;
  2024. }
  2025. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  2026. {
  2027. }
  2028. #endif /* WLAN_FEATURE_11BE_MLO */
  2029. static inline
  2030. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  2031. {
  2032. uint8_t i;
  2033. if (IS_MLO_DP_MLD_PEER(peer)) {
  2034. dp_peer_info("skip for mld peer");
  2035. return QDF_STATUS_SUCCESS;
  2036. }
  2037. if (peer->rx_tid) {
  2038. QDF_BUG(0);
  2039. dp_peer_err("peer rx_tid mem already exist");
  2040. return QDF_STATUS_E_FAILURE;
  2041. }
  2042. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  2043. sizeof(struct dp_rx_tid));
  2044. if (!peer->rx_tid) {
  2045. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  2046. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2047. return QDF_STATUS_E_NOMEM;
  2048. }
  2049. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  2050. for (i = 0; i < DP_MAX_TIDS; i++)
  2051. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  2052. return QDF_STATUS_SUCCESS;
  2053. }
  2054. static inline
  2055. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  2056. {
  2057. uint8_t i;
  2058. if (!IS_MLO_DP_LINK_PEER(peer)) {
  2059. for (i = 0; i < DP_MAX_TIDS; i++)
  2060. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  2061. qdf_mem_free(peer->rx_tid);
  2062. }
  2063. peer->rx_tid = NULL;
  2064. }
  2065. static inline
  2066. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  2067. {
  2068. uint8_t i;
  2069. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  2070. sizeof(struct dp_rx_tid_defrag));
  2071. for (i = 0; i < DP_MAX_TIDS; i++)
  2072. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2073. }
  2074. static inline
  2075. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  2076. {
  2077. uint8_t i;
  2078. for (i = 0; i < DP_MAX_TIDS; i++)
  2079. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  2080. }
  2081. #ifdef PEER_CACHE_RX_PKTS
  2082. static inline
  2083. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2084. {
  2085. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  2086. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  2087. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  2088. DP_RX_CACHED_BUFQ_THRESH);
  2089. }
  2090. static inline
  2091. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2092. {
  2093. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  2094. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  2095. }
  2096. #else
  2097. static inline
  2098. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2099. {
  2100. }
  2101. static inline
  2102. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2103. {
  2104. }
  2105. #endif
  2106. /**
  2107. * dp_peer_update_state() - update dp peer state
  2108. *
  2109. * @soc: core DP soc context
  2110. * @peer: DP peer
  2111. * @state: new state
  2112. *
  2113. * Return: None
  2114. */
  2115. static inline void
  2116. dp_peer_update_state(struct dp_soc *soc,
  2117. struct dp_peer *peer,
  2118. enum dp_peer_state state)
  2119. {
  2120. uint8_t peer_state;
  2121. qdf_spin_lock_bh(&peer->peer_state_lock);
  2122. peer_state = peer->peer_state;
  2123. switch (state) {
  2124. case DP_PEER_STATE_INIT:
  2125. DP_PEER_STATE_ASSERT
  2126. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  2127. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  2128. break;
  2129. case DP_PEER_STATE_ACTIVE:
  2130. DP_PEER_STATE_ASSERT(peer, state,
  2131. (peer_state == DP_PEER_STATE_INIT));
  2132. break;
  2133. case DP_PEER_STATE_LOGICAL_DELETE:
  2134. DP_PEER_STATE_ASSERT(peer, state,
  2135. (peer_state == DP_PEER_STATE_ACTIVE) ||
  2136. (peer_state == DP_PEER_STATE_INIT));
  2137. break;
  2138. case DP_PEER_STATE_INACTIVE:
  2139. if (IS_MLO_DP_MLD_PEER(peer))
  2140. DP_PEER_STATE_ASSERT
  2141. (peer, state,
  2142. (peer_state == DP_PEER_STATE_ACTIVE));
  2143. else
  2144. DP_PEER_STATE_ASSERT
  2145. (peer, state,
  2146. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2147. break;
  2148. case DP_PEER_STATE_FREED:
  2149. if (peer->sta_self_peer)
  2150. DP_PEER_STATE_ASSERT
  2151. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  2152. else
  2153. DP_PEER_STATE_ASSERT
  2154. (peer, state,
  2155. (peer_state == DP_PEER_STATE_INACTIVE) ||
  2156. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2157. break;
  2158. default:
  2159. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2160. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  2161. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2162. return;
  2163. }
  2164. peer->peer_state = state;
  2165. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2166. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  2167. peer_state, state,
  2168. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2169. }
  2170. #ifdef REO_SHARED_QREF_TABLE_EN
  2171. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2172. struct dp_peer *peer);
  2173. #else
  2174. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2175. struct dp_peer *peer) {}
  2176. #endif
  2177. /**
  2178. * dp_peer_check_wds_ext_peer() - Check WDS ext peer
  2179. *
  2180. * @peer: DP peer
  2181. *
  2182. * Return: True for WDS ext peer, false otherwise
  2183. */
  2184. bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
  2185. /**
  2186. * dp_gen_ml_peer_id() - Generate MLD peer id for DP
  2187. *
  2188. * @soc: DP soc context
  2189. * @peer_id: mld peer id
  2190. *
  2191. * Return: DP MLD peer id
  2192. */
  2193. uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
  2194. #endif /* _DP_PEER_H_ */