dp_peer.h 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  25. #include "hal_reo.h"
  26. #endif
  27. #define DP_INVALID_PEER_ID 0xffff
  28. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  29. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  30. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  31. #define DP_PEER_HASH_LOAD_MULT 2
  32. #define DP_PEER_HASH_LOAD_SHIFT 0
  33. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  34. #define DP_RX_CACHED_BUFQ_THRESH 64
  35. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  36. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_info(params...) \
  39. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  40. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  41. #ifdef REO_QDESC_HISTORY
  42. enum reo_qdesc_event_type {
  43. REO_QDESC_UPDATE_CB = 0,
  44. REO_QDESC_FREE,
  45. };
  46. struct reo_qdesc_event {
  47. qdf_dma_addr_t qdesc_addr;
  48. uint64_t ts;
  49. enum reo_qdesc_event_type type;
  50. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  51. };
  52. #endif
  53. struct ast_del_ctxt {
  54. bool age;
  55. int del_count;
  56. };
  57. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  58. void *arg);
  59. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  60. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  61. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  62. uint8_t *peer_mac_addr,
  63. int mac_addr_is_aligned,
  64. uint8_t vdev_id,
  65. enum dp_mod_id id);
  66. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  67. /**
  68. * dp_peer_get_ref() - Returns peer object given the peer id
  69. *
  70. * @soc : core DP soc context
  71. * @peer : DP peer
  72. * @mod_id : id of module requesting the reference
  73. *
  74. * Return: QDF_STATUS_SUCCESS if reference held successfully
  75. * else QDF_STATUS_E_INVAL
  76. */
  77. static inline
  78. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  79. struct dp_peer *peer,
  80. enum dp_mod_id mod_id)
  81. {
  82. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  83. return QDF_STATUS_E_INVAL;
  84. if (mod_id > DP_MOD_ID_RX)
  85. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  86. return QDF_STATUS_SUCCESS;
  87. }
  88. /**
  89. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  90. *
  91. * @soc : core DP soc context
  92. * @peer_id : peer id from peer object can be retrieved
  93. * @mod_id : module id
  94. *
  95. * Return: struct dp_peer*: Pointer to DP peer object
  96. */
  97. static inline struct dp_peer *
  98. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  99. uint16_t peer_id,
  100. enum dp_mod_id mod_id)
  101. {
  102. struct dp_peer *peer;
  103. qdf_spin_lock_bh(&soc->peer_map_lock);
  104. peer = (peer_id >= soc->max_peer_id) ? NULL :
  105. soc->peer_id_to_obj_map[peer_id];
  106. if (!peer ||
  107. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  108. qdf_spin_unlock_bh(&soc->peer_map_lock);
  109. return NULL;
  110. }
  111. qdf_spin_unlock_bh(&soc->peer_map_lock);
  112. return peer;
  113. }
  114. /**
  115. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  116. * if peer state is active
  117. *
  118. * @soc : core DP soc context
  119. * @peer_id : peer id from peer object can be retrieved
  120. * @mod_id : ID ot module requesting reference
  121. *
  122. * Return: struct dp_peer*: Pointer to DP peer object
  123. */
  124. static inline
  125. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  126. uint16_t peer_id,
  127. enum dp_mod_id mod_id)
  128. {
  129. struct dp_peer *peer;
  130. qdf_spin_lock_bh(&soc->peer_map_lock);
  131. peer = (peer_id >= soc->max_peer_id) ? NULL :
  132. soc->peer_id_to_obj_map[peer_id];
  133. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  134. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  135. qdf_spin_unlock_bh(&soc->peer_map_lock);
  136. return NULL;
  137. }
  138. qdf_spin_unlock_bh(&soc->peer_map_lock);
  139. return peer;
  140. }
  141. /**
  142. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  143. *
  144. * @soc : core DP soc context
  145. * @peer_id : peer id from peer object can be retrieved
  146. * @handle : reference handle
  147. * @mod_id : ID ot module requesting reference
  148. *
  149. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  150. */
  151. static inline struct dp_txrx_peer *
  152. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  153. uint16_t peer_id,
  154. dp_txrx_ref_handle *handle,
  155. enum dp_mod_id mod_id)
  156. {
  157. struct dp_peer *peer;
  158. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  159. if (!peer)
  160. return NULL;
  161. if (!peer->txrx_peer) {
  162. dp_peer_unref_delete(peer, mod_id);
  163. return NULL;
  164. }
  165. *handle = (dp_txrx_ref_handle)peer;
  166. return peer->txrx_peer;
  167. }
  168. #ifdef PEER_CACHE_RX_PKTS
  169. /**
  170. * dp_rx_flush_rx_cached() - flush cached rx frames
  171. * @peer: peer
  172. * @drop: set flag to drop frames
  173. *
  174. * Return: None
  175. */
  176. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  177. #else
  178. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  179. {
  180. }
  181. #endif
  182. static inline void
  183. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  184. {
  185. qdf_spin_lock_bh(&peer->peer_info_lock);
  186. peer->state = OL_TXRX_PEER_STATE_DISC;
  187. qdf_spin_unlock_bh(&peer->peer_info_lock);
  188. dp_rx_flush_rx_cached(peer, true);
  189. }
  190. /**
  191. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  192. *
  193. * @vdev : DP vdev context
  194. * @func : function to be called for each peer
  195. * @arg : argument need to be passed to func
  196. * @mod_id : module_id
  197. *
  198. * Return: void
  199. */
  200. static inline void
  201. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  202. enum dp_mod_id mod_id)
  203. {
  204. struct dp_peer *peer;
  205. struct dp_peer *tmp_peer;
  206. struct dp_soc *soc = NULL;
  207. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  208. return;
  209. soc = vdev->pdev->soc;
  210. qdf_spin_lock_bh(&vdev->peer_list_lock);
  211. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  212. peer_list_elem,
  213. tmp_peer) {
  214. if (dp_peer_get_ref(soc, peer, mod_id) ==
  215. QDF_STATUS_SUCCESS) {
  216. (*func)(soc, peer, arg);
  217. dp_peer_unref_delete(peer, mod_id);
  218. }
  219. }
  220. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  221. }
  222. /**
  223. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  224. *
  225. * @pdev : DP pdev context
  226. * @func : function to be called for each peer
  227. * @arg : argument need to be passed to func
  228. * @mod_id : module_id
  229. *
  230. * Return: void
  231. */
  232. static inline void
  233. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  234. enum dp_mod_id mod_id)
  235. {
  236. struct dp_vdev *vdev;
  237. if (!pdev)
  238. return;
  239. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  240. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  241. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  242. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  243. }
  244. /**
  245. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  246. *
  247. * @soc : DP soc context
  248. * @func : function to be called for each peer
  249. * @arg : argument need to be passed to func
  250. * @mod_id : module_id
  251. *
  252. * Return: void
  253. */
  254. static inline void
  255. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  256. enum dp_mod_id mod_id)
  257. {
  258. struct dp_pdev *pdev;
  259. int i;
  260. if (!soc)
  261. return;
  262. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  263. pdev = soc->pdev_list[i];
  264. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  265. }
  266. }
  267. /**
  268. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  269. *
  270. * This API will cache the peers in local allocated memory and calls
  271. * iterate function outside the lock.
  272. *
  273. * As this API is allocating new memory it is suggested to use this
  274. * only when lock cannot be held
  275. *
  276. * @vdev : DP vdev context
  277. * @func : function to be called for each peer
  278. * @arg : argument need to be passed to func
  279. * @mod_id : module_id
  280. *
  281. * Return: void
  282. */
  283. static inline void
  284. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  285. dp_peer_iter_func *func,
  286. void *arg,
  287. enum dp_mod_id mod_id)
  288. {
  289. struct dp_peer *peer;
  290. struct dp_peer *tmp_peer;
  291. struct dp_soc *soc = NULL;
  292. struct dp_peer **peer_array = NULL;
  293. int i = 0;
  294. uint32_t num_peers = 0;
  295. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  296. return;
  297. num_peers = vdev->num_peers;
  298. soc = vdev->pdev->soc;
  299. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  300. if (!peer_array)
  301. return;
  302. qdf_spin_lock_bh(&vdev->peer_list_lock);
  303. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  304. peer_list_elem,
  305. tmp_peer) {
  306. if (i >= num_peers)
  307. break;
  308. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  309. peer_array[i] = peer;
  310. i = (i + 1);
  311. }
  312. }
  313. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  314. for (i = 0; i < num_peers; i++) {
  315. peer = peer_array[i];
  316. if (!peer)
  317. continue;
  318. (*func)(soc, peer, arg);
  319. dp_peer_unref_delete(peer, mod_id);
  320. }
  321. qdf_mem_free(peer_array);
  322. }
  323. /**
  324. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  325. *
  326. * This API will cache the peers in local allocated memory and calls
  327. * iterate function outside the lock.
  328. *
  329. * As this API is allocating new memory it is suggested to use this
  330. * only when lock cannot be held
  331. *
  332. * @pdev : DP pdev context
  333. * @func : function to be called for each peer
  334. * @arg : argument need to be passed to func
  335. * @mod_id : module_id
  336. *
  337. * Return: void
  338. */
  339. static inline void
  340. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  341. dp_peer_iter_func *func,
  342. void *arg,
  343. enum dp_mod_id mod_id)
  344. {
  345. struct dp_peer *peer;
  346. struct dp_peer *tmp_peer;
  347. struct dp_soc *soc = NULL;
  348. struct dp_vdev *vdev = NULL;
  349. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  350. int i = 0;
  351. int j = 0;
  352. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  353. if (!pdev || !pdev->soc)
  354. return;
  355. soc = pdev->soc;
  356. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  357. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  358. num_peers[i] = vdev->num_peers;
  359. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  360. sizeof(struct dp_peer *));
  361. if (!peer_array[i])
  362. break;
  363. qdf_spin_lock_bh(&vdev->peer_list_lock);
  364. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  365. peer_list_elem,
  366. tmp_peer) {
  367. if (j >= num_peers[i])
  368. break;
  369. if (dp_peer_get_ref(soc, peer, mod_id) ==
  370. QDF_STATUS_SUCCESS) {
  371. peer_array[i][j] = peer;
  372. j = (j + 1);
  373. }
  374. }
  375. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  376. i = (i + 1);
  377. }
  378. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  379. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  380. if (!peer_array[i])
  381. break;
  382. for (j = 0; j < num_peers[i]; j++) {
  383. peer = peer_array[i][j];
  384. if (!peer)
  385. continue;
  386. (*func)(soc, peer, arg);
  387. dp_peer_unref_delete(peer, mod_id);
  388. }
  389. qdf_mem_free(peer_array[i]);
  390. }
  391. }
  392. /**
  393. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  394. *
  395. * This API will cache the peers in local allocated memory and calls
  396. * iterate function outside the lock.
  397. *
  398. * As this API is allocating new memory it is suggested to use this
  399. * only when lock cannot be held
  400. *
  401. * @soc : DP soc context
  402. * @func : function to be called for each peer
  403. * @arg : argument need to be passed to func
  404. * @mod_id : module_id
  405. *
  406. * Return: void
  407. */
  408. static inline void
  409. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  410. dp_peer_iter_func *func,
  411. void *arg,
  412. enum dp_mod_id mod_id)
  413. {
  414. struct dp_pdev *pdev;
  415. int i;
  416. if (!soc)
  417. return;
  418. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  419. pdev = soc->pdev_list[i];
  420. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  421. }
  422. }
  423. #ifdef DP_PEER_STATE_DEBUG
  424. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  425. do { \
  426. if (!(_condition)) { \
  427. dp_alert("Invalid state shift from %u to %u peer " \
  428. QDF_MAC_ADDR_FMT, \
  429. (_peer)->peer_state, (_new_state), \
  430. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  431. QDF_ASSERT(0); \
  432. } \
  433. } while (0)
  434. #else
  435. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  436. do { \
  437. if (!(_condition)) { \
  438. dp_alert("Invalid state shift from %u to %u peer " \
  439. QDF_MAC_ADDR_FMT, \
  440. (_peer)->peer_state, (_new_state), \
  441. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  442. } \
  443. } while (0)
  444. #endif
  445. /**
  446. * dp_peer_state_cmp() - compare dp peer state
  447. *
  448. * @peer : DP peer
  449. * @state : state
  450. *
  451. * Return: true if state matches with peer state
  452. * false if it does not match
  453. */
  454. static inline bool
  455. dp_peer_state_cmp(struct dp_peer *peer,
  456. enum dp_peer_state state)
  457. {
  458. bool is_status_equal = false;
  459. qdf_spin_lock_bh(&peer->peer_state_lock);
  460. is_status_equal = (peer->peer_state == state);
  461. qdf_spin_unlock_bh(&peer->peer_state_lock);
  462. return is_status_equal;
  463. }
  464. /**
  465. * dp_peer_update_state() - update dp peer state
  466. *
  467. * @soc : core DP soc context
  468. * @peer : DP peer
  469. * @state : new state
  470. *
  471. * Return: None
  472. */
  473. static inline void
  474. dp_peer_update_state(struct dp_soc *soc,
  475. struct dp_peer *peer,
  476. enum dp_peer_state state)
  477. {
  478. uint8_t peer_state;
  479. qdf_spin_lock_bh(&peer->peer_state_lock);
  480. peer_state = peer->peer_state;
  481. switch (state) {
  482. case DP_PEER_STATE_INIT:
  483. DP_PEER_STATE_ASSERT
  484. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  485. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  486. break;
  487. case DP_PEER_STATE_ACTIVE:
  488. DP_PEER_STATE_ASSERT(peer, state,
  489. (peer_state == DP_PEER_STATE_INIT));
  490. break;
  491. case DP_PEER_STATE_LOGICAL_DELETE:
  492. DP_PEER_STATE_ASSERT(peer, state,
  493. (peer_state == DP_PEER_STATE_ACTIVE) ||
  494. (peer_state == DP_PEER_STATE_INIT));
  495. break;
  496. case DP_PEER_STATE_INACTIVE:
  497. DP_PEER_STATE_ASSERT
  498. (peer, state,
  499. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  500. break;
  501. case DP_PEER_STATE_FREED:
  502. if (peer->sta_self_peer)
  503. DP_PEER_STATE_ASSERT
  504. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  505. else
  506. DP_PEER_STATE_ASSERT
  507. (peer, state,
  508. (peer_state == DP_PEER_STATE_INACTIVE) ||
  509. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  510. break;
  511. default:
  512. qdf_spin_unlock_bh(&peer->peer_state_lock);
  513. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  514. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  515. return;
  516. }
  517. peer->peer_state = state;
  518. qdf_spin_unlock_bh(&peer->peer_state_lock);
  519. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  520. peer_state, state,
  521. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  522. }
  523. void dp_print_ast_stats(struct dp_soc *soc);
  524. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  525. uint16_t hw_peer_id, uint8_t vdev_id,
  526. uint8_t *peer_mac_addr, uint16_t ast_hash,
  527. uint8_t is_wds);
  528. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  529. uint8_t vdev_id, uint8_t *peer_mac_addr,
  530. uint8_t is_wds, uint32_t free_wds_count);
  531. #ifdef WLAN_FEATURE_11BE_MLO
  532. /**
  533. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  534. * @soc_handle - genereic soc handle
  535. * @peer_id - ML peer_id from firmware
  536. * @peer_mac_addr - mac address of the peer
  537. * @mlo_ast_flow_info: MLO AST flow info
  538. * @mlo_link_info - MLO link info
  539. *
  540. * associate the ML peer_id that firmware provided with peer entry
  541. * and update the ast table in the host with the hw_peer_id.
  542. *
  543. * Return: QDF_STATUS code
  544. */
  545. QDF_STATUS
  546. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  547. uint8_t *peer_mac_addr,
  548. struct dp_mlo_flow_override_info *mlo_flow_info,
  549. struct dp_mlo_link_info *mlo_link_info);
  550. /**
  551. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  552. * @soc_handle - genereic soc handle
  553. * @peeri_id - peer_id from firmware
  554. *
  555. * Return: none
  556. */
  557. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  558. #endif
  559. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  560. enum cdp_sec_type sec_type, int is_unicast,
  561. u_int32_t *michael_key, u_int32_t *rx_pn);
  562. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  563. uint8_t tid, uint16_t win_sz);
  564. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  565. uint16_t peer_id, uint8_t *peer_mac);
  566. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  567. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  568. uint32_t flags);
  569. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  570. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  571. struct dp_ast_entry *ast_entry);
  572. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  573. struct dp_ast_entry *ast_entry, uint32_t flags);
  574. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  575. uint8_t *ast_mac_addr,
  576. uint8_t pdev_id);
  577. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  578. uint8_t *ast_mac_addr,
  579. uint8_t vdev_id);
  580. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  581. uint8_t *ast_mac_addr);
  582. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  583. struct dp_ast_entry *ast_entry);
  584. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  585. struct dp_ast_entry *ast_entry);
  586. void dp_peer_ast_set_type(struct dp_soc *soc,
  587. struct dp_ast_entry *ast_entry,
  588. enum cdp_txrx_ast_entry_type type);
  589. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  590. struct dp_ast_entry *ast_entry,
  591. struct dp_peer *peer);
  592. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  593. void dp_peer_ast_send_multi_wds_del(
  594. struct dp_soc *soc, uint8_t vdev_id,
  595. struct peer_del_multi_wds_entries *wds_list);
  596. #endif
  597. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  598. struct cdp_soc *dp_soc,
  599. void *cookie,
  600. enum cdp_ast_free_status status);
  601. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  602. struct dp_ast_entry *ase);
  603. void dp_peer_free_ast_entry(struct dp_soc *soc,
  604. struct dp_ast_entry *ast_entry);
  605. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  606. struct dp_ast_entry *ast_entry,
  607. struct dp_peer *peer);
  608. /**
  609. * dp_peer_mec_detach_entry() - Detach the MEC entry
  610. * @soc: SoC handle
  611. * @mecentry: MEC entry of the node
  612. * @ptr: pointer to free list
  613. *
  614. * The MEC entry is detached from MEC table and added to free_list
  615. * to free the object outside lock
  616. *
  617. * Return: None
  618. */
  619. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  620. void *ptr);
  621. /**
  622. * dp_peer_mec_free_list() - free the MEC entry from free_list
  623. * @soc: SoC handle
  624. * @ptr: pointer to free list
  625. *
  626. * Return: None
  627. */
  628. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  629. /**
  630. * dp_peer_mec_add_entry()
  631. * @soc: SoC handle
  632. * @vdev: vdev to which mec node belongs
  633. * @mac_addr: MAC address of mec node
  634. *
  635. * This function allocates and adds MEC entry to MEC table.
  636. * It assumes caller has taken the mec lock to protect the access to these
  637. * tables
  638. *
  639. * Return: QDF_STATUS
  640. */
  641. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  642. struct dp_vdev *vdev,
  643. uint8_t *mac_addr);
  644. /**
  645. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  646. * within pdev
  647. * @soc: SoC handle
  648. *
  649. * It assumes caller has taken the mec_lock to protect the access to
  650. * MEC hash table
  651. *
  652. * Return: MEC entry
  653. */
  654. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  655. uint8_t pdev_id,
  656. uint8_t *mec_mac_addr);
  657. #define DP_AST_ASSERT(_condition) \
  658. do { \
  659. if (!(_condition)) { \
  660. dp_print_ast_stats(soc);\
  661. QDF_BUG(_condition); \
  662. } \
  663. } while (0)
  664. /**
  665. * dp_peer_update_inactive_time - Update inactive time for peer
  666. * @pdev: pdev object
  667. * @tag_type: htt_tlv_tag type
  668. * #tag_buf: buf message
  669. */
  670. void
  671. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  672. uint32_t *tag_buf);
  673. #ifndef QCA_MULTIPASS_SUPPORT
  674. /**
  675. * dp_peer_set_vlan_id: set vlan_id for this peer
  676. * @cdp_soc: soc handle
  677. * @vdev_id: id of vdev object
  678. * @peer_mac: mac address
  679. * @vlan_id: vlan id for peer
  680. *
  681. * return: void
  682. */
  683. static inline
  684. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  685. uint8_t vdev_id, uint8_t *peer_mac,
  686. uint16_t vlan_id)
  687. {
  688. }
  689. /**
  690. * dp_set_vlan_groupkey: set vlan map for vdev
  691. * @soc: pointer to soc
  692. * @vdev_id: id of vdev handle
  693. * @vlan_id: vlan_id
  694. * @group_key: group key for vlan
  695. *
  696. * return: set success/failure
  697. */
  698. static inline
  699. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  700. uint16_t vlan_id, uint16_t group_key)
  701. {
  702. return QDF_STATUS_SUCCESS;
  703. }
  704. /**
  705. * dp_peer_multipass_list_init: initialize multipass peer list
  706. * @vdev: pointer to vdev
  707. *
  708. * return: void
  709. */
  710. static inline
  711. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  712. {
  713. }
  714. /**
  715. * dp_peer_multipass_list_remove: remove peer from special peer list
  716. * @peer: peer handle
  717. *
  718. * return: void
  719. */
  720. static inline
  721. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  722. {
  723. }
  724. #else
  725. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  726. uint8_t vdev_id, uint8_t *peer_mac,
  727. uint16_t vlan_id);
  728. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  729. uint16_t vlan_id, uint16_t group_key);
  730. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  731. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  732. #endif
  733. #ifndef QCA_PEER_MULTIQ_SUPPORT
  734. /**
  735. * dp_peer_reset_flowq_map() - reset peer flowq map table
  736. * @peer - dp peer handle
  737. *
  738. * Return: none
  739. */
  740. static inline
  741. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  742. {
  743. }
  744. /**
  745. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  746. * @soc - genereic soc handle
  747. * @is_wds - flag to indicate if peer is wds
  748. * @peer_id - peer_id from htt peer map message
  749. * @peer_mac_addr - mac address of the peer
  750. * @ast_info - ast flow override information from peer map
  751. *
  752. * Return: none
  753. */
  754. static inline
  755. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  756. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  757. struct dp_ast_flow_override_info *ast_info)
  758. {
  759. }
  760. #else
  761. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  762. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  763. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  764. struct dp_ast_flow_override_info *ast_info);
  765. #endif
  766. /*
  767. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  768. * after deleting the entries (ie., setting valid=0)
  769. *
  770. * @soc: DP SOC handle
  771. * @cb_ctxt: Callback context
  772. * @reo_status: REO command status
  773. */
  774. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  775. void *cb_ctxt,
  776. union hal_reo_status *reo_status);
  777. #ifdef QCA_PEER_EXT_STATS
  778. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  779. struct dp_txrx_peer *txrx_peer);
  780. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  781. struct dp_txrx_peer *txrx_peer);
  782. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  783. #else
  784. static inline
  785. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  786. struct dp_txrx_peer *txrx_peer)
  787. {
  788. return QDF_STATUS_SUCCESS;
  789. }
  790. static inline
  791. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  792. struct dp_txrx_peer *txrx_peer)
  793. {
  794. }
  795. static inline
  796. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  797. {
  798. }
  799. #endif
  800. #ifdef WLAN_PEER_JITTER
  801. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  802. struct dp_txrx_peer *txrx_peer);
  803. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  804. struct dp_txrx_peer *txrx_peer);
  805. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  806. #else
  807. static inline
  808. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  809. struct dp_txrx_peer *txrx_peer)
  810. {
  811. return QDF_STATUS_SUCCESS;
  812. }
  813. static inline
  814. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  815. struct dp_txrx_peer *txrx_peer)
  816. {
  817. }
  818. static inline
  819. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  820. {
  821. }
  822. #endif
  823. #ifndef CONFIG_SAWF_DEF_QUEUES
  824. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  825. struct dp_peer *peer)
  826. {
  827. return QDF_STATUS_SUCCESS;
  828. }
  829. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  830. struct dp_peer *peer)
  831. {
  832. return QDF_STATUS_SUCCESS;
  833. }
  834. #endif
  835. #ifndef CONFIG_SAWF
  836. static inline
  837. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  838. struct dp_txrx_peer *txrx_peer)
  839. {
  840. return QDF_STATUS_SUCCESS;
  841. }
  842. static inline
  843. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  844. struct dp_txrx_peer *txrx_peer)
  845. {
  846. return QDF_STATUS_SUCCESS;
  847. }
  848. #endif
  849. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  850. struct dp_vdev *vdev,
  851. enum dp_mod_id mod_id);
  852. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  853. struct dp_vdev *vdev,
  854. enum dp_mod_id mod_id);
  855. void dp_peer_ast_table_detach(struct dp_soc *soc);
  856. void dp_peer_find_map_detach(struct dp_soc *soc);
  857. void dp_soc_wds_detach(struct dp_soc *soc);
  858. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  859. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  860. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  861. void dp_soc_wds_attach(struct dp_soc *soc);
  862. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  863. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  864. #ifdef FEATURE_AST
  865. /*
  866. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  867. * @soc - datapath soc handle
  868. * @peer - datapath peer handle
  869. *
  870. * Delete the AST entries belonging to a peer
  871. */
  872. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  873. struct dp_peer *peer)
  874. {
  875. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  876. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  877. /*
  878. * Delete peer self ast entry. This is done to handle scenarios
  879. * where peer is freed before peer map is received(for ex in case
  880. * of auth disallow due to ACL) in such cases self ast is not added
  881. * to peer->ast_list.
  882. */
  883. if (peer->self_ast_entry) {
  884. dp_peer_del_ast(soc, peer->self_ast_entry);
  885. peer->self_ast_entry = NULL;
  886. }
  887. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  888. dp_peer_del_ast(soc, ast_entry);
  889. }
  890. #else
  891. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  892. struct dp_peer *peer)
  893. {
  894. }
  895. #endif
  896. #ifdef FEATURE_MEC
  897. /**
  898. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  899. * @soc: SoC handle
  900. *
  901. * Return: none
  902. */
  903. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  904. /**
  905. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  906. * @soc: SoC handle
  907. *
  908. * Return: none
  909. */
  910. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  911. /**
  912. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  913. * @soc: Datapath SOC
  914. *
  915. * Return: None
  916. */
  917. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  918. #else
  919. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  920. {
  921. }
  922. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  923. {
  924. }
  925. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  926. {
  927. }
  928. #endif
  929. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  930. /**
  931. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  932. * @soc : dp_soc handle
  933. * @peer: peer
  934. *
  935. * This function is used to send cache flush cmd to reo and
  936. * to register the callback to handle the dumping of the reo
  937. * queue stas from DDR
  938. *
  939. * Return: none
  940. */
  941. void dp_send_cache_flush_for_rx_tid(
  942. struct dp_soc *soc, struct dp_peer *peer);
  943. /**
  944. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  945. * @soc : cdp_soc_t handle
  946. * @vdev_id: vdev id
  947. *
  948. * Handler to get rx tid info from DDR after h/w cache is
  949. * invalidated first using the cache flush cmd.
  950. *
  951. * Return: none
  952. */
  953. void dp_get_rx_reo_queue_info(
  954. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  955. /**
  956. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  957. * @soc : dp_soc handle
  958. * @cb_ctxt - callback context
  959. * @reo_status: vdev id
  960. *
  961. * This is the callback function registered after sending the reo cmd
  962. * to flush the h/w cache and invalidate it. In the callback the reo
  963. * queue desc info is dumped from DDR.
  964. *
  965. * Return: none
  966. */
  967. void dp_dump_rx_reo_queue_info(
  968. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  969. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  970. static inline void dp_get_rx_reo_queue_info(
  971. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  972. {
  973. }
  974. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  975. static inline int dp_peer_find_mac_addr_cmp(
  976. union dp_align_mac_addr *mac_addr1,
  977. union dp_align_mac_addr *mac_addr2)
  978. {
  979. /*
  980. * Intentionally use & rather than &&.
  981. * because the operands are binary rather than generic boolean,
  982. * the functionality is equivalent.
  983. * Using && has the advantage of short-circuited evaluation,
  984. * but using & has the advantage of no conditional branching,
  985. * which is a more significant benefit.
  986. */
  987. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  988. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  989. }
  990. /**
  991. * dp_peer_delete() - delete DP peer
  992. *
  993. * @soc: Datatpath soc
  994. * @peer: Datapath peer
  995. * @arg: argument to iter function
  996. *
  997. * Return: void
  998. */
  999. void dp_peer_delete(struct dp_soc *soc,
  1000. struct dp_peer *peer,
  1001. void *arg);
  1002. #ifdef WLAN_FEATURE_11BE_MLO
  1003. /* is MLO connection mld peer */
  1004. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1005. /* set peer type */
  1006. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1007. ((_peer)->peer_type = (_type_val))
  1008. /* is legacy peer */
  1009. #define IS_DP_LEGACY_PEER(_peer) \
  1010. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1011. /* is MLO connection link peer */
  1012. #define IS_MLO_DP_LINK_PEER(_peer) \
  1013. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1014. /* is MLO connection mld peer */
  1015. #define IS_MLO_DP_MLD_PEER(_peer) \
  1016. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1017. /* Get Mld peer from link peer */
  1018. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1019. ((link_peer)->mld_peer)
  1020. #ifdef WLAN_MLO_MULTI_CHIP
  1021. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1022. struct dp_peer *
  1023. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1024. uint8_t *peer_mac_addr,
  1025. int mac_addr_is_aligned,
  1026. uint8_t vdev_id,
  1027. uint8_t chip_id,
  1028. enum dp_mod_id mod_id);
  1029. #else
  1030. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1031. {
  1032. return 0;
  1033. }
  1034. static inline struct dp_peer *
  1035. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1036. uint8_t *peer_mac_addr,
  1037. int mac_addr_is_aligned,
  1038. uint8_t vdev_id,
  1039. uint8_t chip_id,
  1040. enum dp_mod_id mod_id)
  1041. {
  1042. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1043. mac_addr_is_aligned,
  1044. vdev_id, mod_id);
  1045. }
  1046. #endif
  1047. /**
  1048. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1049. increase mld peer ref_cnt
  1050. * @link_peer: link peer pointer
  1051. * @mld_peer: mld peer pointer
  1052. *
  1053. * Return: none
  1054. */
  1055. static inline
  1056. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1057. struct dp_peer *mld_peer)
  1058. {
  1059. /* increase mld_peer ref_cnt */
  1060. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1061. link_peer->mld_peer = mld_peer;
  1062. }
  1063. /**
  1064. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1065. decrease mld peer ref_cnt
  1066. * @link_peer: link peer pointer
  1067. *
  1068. * Return: None
  1069. */
  1070. static inline
  1071. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1072. {
  1073. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1074. link_peer->mld_peer = NULL;
  1075. }
  1076. /**
  1077. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1078. * @mld_peer: mld peer pointer
  1079. *
  1080. * Return: None
  1081. */
  1082. static inline
  1083. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1084. {
  1085. int i;
  1086. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1087. mld_peer->num_links = 0;
  1088. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1089. mld_peer->link_peers[i].is_valid = false;
  1090. }
  1091. /**
  1092. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1093. * @mld_peer: mld peer pointer
  1094. *
  1095. * Return: None
  1096. */
  1097. static inline
  1098. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1099. {
  1100. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1101. }
  1102. /**
  1103. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1104. * @mld_peer: mld dp peer pointer
  1105. * @link_peer: link dp peer pointer
  1106. *
  1107. * Return: None
  1108. */
  1109. static inline
  1110. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1111. struct dp_peer *link_peer)
  1112. {
  1113. int i;
  1114. struct dp_peer_link_info *link_peer_info;
  1115. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1116. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1117. link_peer_info = &mld_peer->link_peers[i];
  1118. if (!link_peer_info->is_valid) {
  1119. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1120. link_peer->mac_addr.raw,
  1121. QDF_MAC_ADDR_SIZE);
  1122. link_peer_info->is_valid = true;
  1123. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1124. link_peer_info->chip_id =
  1125. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1126. mld_peer->num_links++;
  1127. break;
  1128. }
  1129. }
  1130. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1131. if (i == DP_MAX_MLO_LINKS)
  1132. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1133. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1134. }
  1135. /**
  1136. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1137. * @mld_peer: MLD dp peer pointer
  1138. * @link_peer: link dp peer pointer
  1139. *
  1140. * Return: number of links left after deletion
  1141. */
  1142. static inline
  1143. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1144. struct dp_peer *link_peer)
  1145. {
  1146. int i;
  1147. struct dp_peer_link_info *link_peer_info;
  1148. uint8_t num_links;
  1149. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1150. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1151. link_peer_info = &mld_peer->link_peers[i];
  1152. if (link_peer_info->is_valid &&
  1153. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1154. &link_peer_info->mac_addr)) {
  1155. link_peer_info->is_valid = false;
  1156. mld_peer->num_links--;
  1157. break;
  1158. }
  1159. }
  1160. num_links = mld_peer->num_links;
  1161. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1162. if (i == DP_MAX_MLO_LINKS)
  1163. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1164. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1165. return num_links;
  1166. }
  1167. /**
  1168. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1169. increase link peers ref_cnt
  1170. * @soc: dp_soc handle
  1171. * @mld_peer: dp mld peer pointer
  1172. * @mld_link_peers: structure that hold links peers ponter array and number
  1173. * @mod_id: id of module requesting reference
  1174. *
  1175. * Return: None
  1176. */
  1177. static inline
  1178. void dp_get_link_peers_ref_from_mld_peer(
  1179. struct dp_soc *soc,
  1180. struct dp_peer *mld_peer,
  1181. struct dp_mld_link_peers *mld_link_peers,
  1182. enum dp_mod_id mod_id)
  1183. {
  1184. struct dp_peer *peer;
  1185. uint8_t i = 0, j = 0;
  1186. struct dp_peer_link_info *link_peer_info;
  1187. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1188. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1189. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1190. link_peer_info = &mld_peer->link_peers[i];
  1191. if (link_peer_info->is_valid) {
  1192. peer = dp_link_peer_hash_find_by_chip_id(
  1193. soc,
  1194. link_peer_info->mac_addr.raw,
  1195. true,
  1196. link_peer_info->vdev_id,
  1197. link_peer_info->chip_id,
  1198. mod_id);
  1199. if (peer)
  1200. mld_link_peers->link_peers[j++] = peer;
  1201. }
  1202. }
  1203. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1204. mld_link_peers->num_links = j;
  1205. }
  1206. /**
  1207. * dp_release_link_peers_ref() - release all link peers reference
  1208. * @mld_link_peers: structure that hold links peers ponter array and number
  1209. * @mod_id: id of module requesting reference
  1210. *
  1211. * Return: None.
  1212. */
  1213. static inline
  1214. void dp_release_link_peers_ref(
  1215. struct dp_mld_link_peers *mld_link_peers,
  1216. enum dp_mod_id mod_id)
  1217. {
  1218. struct dp_peer *peer;
  1219. uint8_t i;
  1220. for (i = 0; i < mld_link_peers->num_links; i++) {
  1221. peer = mld_link_peers->link_peers[i];
  1222. if (peer)
  1223. dp_peer_unref_delete(peer, mod_id);
  1224. mld_link_peers->link_peers[i] = NULL;
  1225. }
  1226. mld_link_peers->num_links = 0;
  1227. }
  1228. /**
  1229. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1230. * @soc: Datapath soc handle
  1231. * @peer_id: peer id
  1232. * @lmac_id: lmac id to find the link peer on given lmac
  1233. *
  1234. * Return: peer_id of link peer if found
  1235. * else return HTT_INVALID_PEER
  1236. */
  1237. static inline
  1238. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1239. uint8_t lmac_id)
  1240. {
  1241. uint8_t i;
  1242. struct dp_peer *peer;
  1243. struct dp_peer *link_peer;
  1244. struct dp_soc *link_peer_soc;
  1245. struct dp_mld_link_peers link_peers_info;
  1246. uint16_t link_peer_id = HTT_INVALID_PEER;
  1247. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1248. if (!peer)
  1249. return HTT_INVALID_PEER;
  1250. if (IS_MLO_DP_MLD_PEER(peer)) {
  1251. /* get link peers with reference */
  1252. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1253. DP_MOD_ID_CDP);
  1254. for (i = 0; i < link_peers_info.num_links; i++) {
  1255. link_peer = link_peers_info.link_peers[i];
  1256. link_peer_soc = link_peer->vdev->pdev->soc;
  1257. if ((link_peer_soc == soc) &&
  1258. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1259. link_peer_id = link_peer->peer_id;
  1260. break;
  1261. }
  1262. }
  1263. /* release link peers reference */
  1264. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1265. } else {
  1266. link_peer_id = peer_id;
  1267. }
  1268. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1269. return link_peer_id;
  1270. }
  1271. /**
  1272. * dp_peer_get_tgt_peer_hash_find() - get MLD dp_peer handle
  1273. for processing
  1274. * @soc: soc handle
  1275. * @peer_mac_addr: peer mac address
  1276. * @mac_addr_is_aligned: is mac addr alligned
  1277. * @vdev_id: vdev_id
  1278. * @mod_id: id of module requesting reference
  1279. *
  1280. * for MLO connection, get corresponding MLD peer,
  1281. * otherwise get link peer for non-MLO case.
  1282. *
  1283. * return: peer in success
  1284. * NULL in failure
  1285. */
  1286. static inline
  1287. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1288. uint8_t *peer_mac,
  1289. int mac_addr_is_aligned,
  1290. uint8_t vdev_id,
  1291. enum dp_mod_id mod_id)
  1292. {
  1293. struct dp_peer *ta_peer = NULL;
  1294. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1295. peer_mac, 0, vdev_id,
  1296. mod_id);
  1297. if (peer) {
  1298. /* mlo connection link peer, get mld peer with reference */
  1299. if (IS_MLO_DP_LINK_PEER(peer)) {
  1300. /* increase mld peer ref_cnt */
  1301. if (QDF_STATUS_SUCCESS ==
  1302. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1303. ta_peer = peer->mld_peer;
  1304. else
  1305. ta_peer = NULL;
  1306. /* relese peer reference that added by hash find */
  1307. dp_peer_unref_delete(peer, mod_id);
  1308. } else {
  1309. /* mlo MLD peer or non-mlo link peer */
  1310. ta_peer = peer;
  1311. }
  1312. }
  1313. return ta_peer;
  1314. }
  1315. /**
  1316. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1317. * @soc : core DP soc context
  1318. * @peer_id : peer id from peer object can be retrieved
  1319. * @mod_id : ID ot module requesting reference
  1320. *
  1321. * for MLO connection, get corresponding MLD peer,
  1322. * otherwise get link peer for non-MLO case.
  1323. *
  1324. * return: peer in success
  1325. * NULL in failure
  1326. */
  1327. static inline
  1328. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1329. uint16_t peer_id,
  1330. enum dp_mod_id mod_id)
  1331. {
  1332. struct dp_peer *ta_peer = NULL;
  1333. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1334. if (peer) {
  1335. /* mlo connection link peer, get mld peer with reference */
  1336. if (IS_MLO_DP_LINK_PEER(peer)) {
  1337. /* increase mld peer ref_cnt */
  1338. if (QDF_STATUS_SUCCESS ==
  1339. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1340. ta_peer = peer->mld_peer;
  1341. else
  1342. ta_peer = NULL;
  1343. /* relese peer reference that added by hash find */
  1344. dp_peer_unref_delete(peer, mod_id);
  1345. } else {
  1346. /* mlo MLD peer or non-mlo link peer */
  1347. ta_peer = peer;
  1348. }
  1349. }
  1350. return ta_peer;
  1351. }
  1352. /**
  1353. * dp_peer_mlo_delete() - peer MLO related delete operation
  1354. * @peer: DP peer handle
  1355. * Return: None
  1356. */
  1357. static inline
  1358. void dp_peer_mlo_delete(struct dp_peer *peer)
  1359. {
  1360. struct dp_peer *ml_peer;
  1361. struct dp_soc *soc;
  1362. /* MLO connection link peer */
  1363. if (IS_MLO_DP_LINK_PEER(peer)) {
  1364. ml_peer = peer->mld_peer;
  1365. soc = ml_peer->vdev->pdev->soc;
  1366. /* if last link peer deletion, delete MLD peer */
  1367. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1368. dp_peer_delete(soc, peer->mld_peer, NULL);
  1369. }
  1370. }
  1371. /**
  1372. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1373. * @soc: Soc handle
  1374. * @vdev_id: Vdev ID
  1375. * @peer_setup_info: peer setup information for MLO
  1376. */
  1377. QDF_STATUS dp_peer_mlo_setup(
  1378. struct dp_soc *soc,
  1379. struct dp_peer *peer,
  1380. uint8_t vdev_id,
  1381. struct cdp_peer_setup_info *setup_info);
  1382. /**
  1383. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1384. * @peer: datapath peer
  1385. *
  1386. * Return: MLD peer in case of MLO Link peer
  1387. * Peer itself in other cases
  1388. */
  1389. static inline
  1390. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1391. {
  1392. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1393. }
  1394. /**
  1395. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1396. * peer id
  1397. * @soc: core DP soc context
  1398. * @peer_id: peer id
  1399. * @mod_id: ID of module requesting reference
  1400. *
  1401. * Return: primary link peer for the MLO peer
  1402. * legacy peer itself in case of legacy peer
  1403. */
  1404. static inline
  1405. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1406. uint16_t peer_id,
  1407. enum dp_mod_id mod_id)
  1408. {
  1409. uint8_t i;
  1410. struct dp_mld_link_peers link_peers_info;
  1411. struct dp_peer *peer;
  1412. struct dp_peer *link_peer;
  1413. struct dp_peer *primary_peer = NULL;
  1414. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1415. if (!peer)
  1416. return NULL;
  1417. if (IS_MLO_DP_MLD_PEER(peer)) {
  1418. /* get link peers with reference */
  1419. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1420. mod_id);
  1421. for (i = 0; i < link_peers_info.num_links; i++) {
  1422. link_peer = link_peers_info.link_peers[i];
  1423. if (link_peer->primary_link) {
  1424. primary_peer = link_peer;
  1425. /*
  1426. * Take additional reference over
  1427. * primary link peer.
  1428. */
  1429. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1430. break;
  1431. }
  1432. }
  1433. /* release link peers reference */
  1434. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1435. dp_peer_unref_delete(peer, mod_id);
  1436. } else {
  1437. primary_peer = peer;
  1438. }
  1439. return primary_peer;
  1440. }
  1441. /**
  1442. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1443. * @peer: Datapath peer
  1444. *
  1445. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1446. * dp_txrx_peer from peer itself for other cases
  1447. */
  1448. static inline
  1449. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1450. {
  1451. return IS_MLO_DP_LINK_PEER(peer) ?
  1452. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1453. }
  1454. /**
  1455. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1456. * @peer: Datapath peer
  1457. *
  1458. * Return: true if peer is primary link peer or legacy peer
  1459. * false otherwise
  1460. */
  1461. static inline
  1462. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1463. {
  1464. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1465. return true;
  1466. else if (IS_DP_LEGACY_PEER(peer))
  1467. return true;
  1468. else
  1469. return false;
  1470. }
  1471. /**
  1472. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1473. *
  1474. * @soc : core DP soc context
  1475. * @peer_id : peer id from peer object can be retrieved
  1476. * @handle : reference handle
  1477. * @mod_id : ID ot module requesting reference
  1478. *
  1479. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1480. */
  1481. static inline struct dp_txrx_peer *
  1482. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1483. uint16_t peer_id,
  1484. dp_txrx_ref_handle *handle,
  1485. enum dp_mod_id mod_id)
  1486. {
  1487. struct dp_peer *peer;
  1488. struct dp_txrx_peer *txrx_peer;
  1489. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1490. if (!peer)
  1491. return NULL;
  1492. txrx_peer = dp_get_txrx_peer(peer);
  1493. if (txrx_peer) {
  1494. *handle = (dp_txrx_ref_handle)peer;
  1495. return txrx_peer;
  1496. }
  1497. dp_peer_unref_delete(peer, mod_id);
  1498. return NULL;
  1499. }
  1500. #else
  1501. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1502. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1503. /* is legacy peer */
  1504. #define IS_DP_LEGACY_PEER(_peer) true
  1505. #define IS_MLO_DP_LINK_PEER(_peer) false
  1506. #define IS_MLO_DP_MLD_PEER(_peer) false
  1507. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1508. static inline
  1509. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1510. uint8_t *peer_mac,
  1511. int mac_addr_is_aligned,
  1512. uint8_t vdev_id,
  1513. enum dp_mod_id mod_id)
  1514. {
  1515. return dp_peer_find_hash_find(soc, peer_mac,
  1516. mac_addr_is_aligned, vdev_id,
  1517. mod_id);
  1518. }
  1519. static inline
  1520. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1521. uint16_t peer_id,
  1522. enum dp_mod_id mod_id)
  1523. {
  1524. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1525. }
  1526. static inline
  1527. QDF_STATUS dp_peer_mlo_setup(
  1528. struct dp_soc *soc,
  1529. struct dp_peer *peer,
  1530. uint8_t vdev_id,
  1531. struct cdp_peer_setup_info *setup_info)
  1532. {
  1533. return QDF_STATUS_SUCCESS;
  1534. }
  1535. static inline
  1536. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1537. {
  1538. }
  1539. static inline
  1540. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1541. {
  1542. }
  1543. static inline
  1544. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1545. {
  1546. }
  1547. static inline
  1548. void dp_peer_mlo_delete(struct dp_peer *peer)
  1549. {
  1550. }
  1551. static inline
  1552. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1553. struct dp_peer *link_peer)
  1554. {
  1555. }
  1556. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1557. {
  1558. return 0;
  1559. }
  1560. static inline struct dp_peer *
  1561. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1562. uint8_t *peer_mac_addr,
  1563. int mac_addr_is_aligned,
  1564. uint8_t vdev_id,
  1565. uint8_t chip_id,
  1566. enum dp_mod_id mod_id)
  1567. {
  1568. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1569. mac_addr_is_aligned,
  1570. vdev_id, mod_id);
  1571. }
  1572. static inline
  1573. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1574. {
  1575. return peer;
  1576. }
  1577. static inline
  1578. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1579. uint16_t peer_id,
  1580. enum dp_mod_id mod_id)
  1581. {
  1582. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1583. }
  1584. static inline
  1585. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1586. {
  1587. return peer->txrx_peer;
  1588. }
  1589. static inline
  1590. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1591. {
  1592. return true;
  1593. }
  1594. /**
  1595. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1596. *
  1597. * @soc : core DP soc context
  1598. * @peer_id : peer id from peer object can be retrieved
  1599. * @handle : reference handle
  1600. * @mod_id : ID ot module requesting reference
  1601. *
  1602. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1603. */
  1604. static inline struct dp_txrx_peer *
  1605. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1606. uint16_t peer_id,
  1607. dp_txrx_ref_handle *handle,
  1608. enum dp_mod_id mod_id)
  1609. {
  1610. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1611. }
  1612. static inline
  1613. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1614. uint8_t lmac_id)
  1615. {
  1616. return peer_id;
  1617. }
  1618. #endif /* WLAN_FEATURE_11BE_MLO */
  1619. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  1620. /**
  1621. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  1622. * @soc: Soc handle
  1623. * @peer: DP peer handle for ML peer
  1624. * @peer_id: peer_id
  1625. * Return: None
  1626. */
  1627. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1628. struct dp_peer *peer,
  1629. uint16_t peer_id);
  1630. /**
  1631. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  1632. * @soc: Soc handle
  1633. * @peer_id: peer_id
  1634. * Return: None
  1635. */
  1636. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1637. uint16_t peer_id);
  1638. #else
  1639. static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1640. struct dp_peer *peer,
  1641. uint16_t peer_id)
  1642. {
  1643. }
  1644. static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1645. uint16_t peer_id)
  1646. {
  1647. }
  1648. #endif
  1649. static inline
  1650. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1651. {
  1652. uint8_t i;
  1653. if (IS_MLO_DP_MLD_PEER(peer)) {
  1654. dp_peer_info("skip for mld peer");
  1655. return QDF_STATUS_SUCCESS;
  1656. }
  1657. if (peer->rx_tid) {
  1658. QDF_BUG(0);
  1659. dp_peer_err("peer rx_tid mem already exist");
  1660. return QDF_STATUS_E_FAILURE;
  1661. }
  1662. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1663. sizeof(struct dp_rx_tid));
  1664. if (!peer->rx_tid) {
  1665. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1666. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1667. return QDF_STATUS_E_NOMEM;
  1668. }
  1669. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1670. for (i = 0; i < DP_MAX_TIDS; i++)
  1671. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1672. return QDF_STATUS_SUCCESS;
  1673. }
  1674. static inline
  1675. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1676. {
  1677. uint8_t i;
  1678. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1679. for (i = 0; i < DP_MAX_TIDS; i++)
  1680. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1681. qdf_mem_free(peer->rx_tid);
  1682. }
  1683. peer->rx_tid = NULL;
  1684. }
  1685. static inline
  1686. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1687. {
  1688. uint8_t i;
  1689. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1690. sizeof(struct dp_rx_tid_defrag));
  1691. for (i = 0; i < DP_MAX_TIDS; i++)
  1692. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1693. }
  1694. static inline
  1695. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1696. {
  1697. uint8_t i;
  1698. for (i = 0; i < DP_MAX_TIDS; i++)
  1699. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1700. }
  1701. #ifdef PEER_CACHE_RX_PKTS
  1702. static inline
  1703. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1704. {
  1705. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  1706. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  1707. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  1708. DP_RX_CACHED_BUFQ_THRESH);
  1709. }
  1710. static inline
  1711. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1712. {
  1713. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  1714. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  1715. }
  1716. #else
  1717. static inline
  1718. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1719. {
  1720. }
  1721. static inline
  1722. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1723. {
  1724. }
  1725. #endif
  1726. #ifdef REO_SHARED_QREF_TABLE_EN
  1727. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1728. struct dp_peer *peer);
  1729. #else
  1730. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1731. struct dp_peer *peer) {}
  1732. #endif
  1733. #endif /* _DP_PEER_H_ */