dp_peer.h 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  25. #include "hal_reo.h"
  26. #endif
  27. #define DP_INVALID_PEER_ID 0xffff
  28. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  29. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  30. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  31. #define DP_PEER_HASH_LOAD_MULT 2
  32. #define DP_PEER_HASH_LOAD_SHIFT 0
  33. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  34. #define DP_RX_CACHED_BUFQ_THRESH 64
  35. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  36. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_info(params...) \
  39. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  40. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  41. #ifdef REO_QDESC_HISTORY
  42. enum reo_qdesc_event_type {
  43. REO_QDESC_UPDATE_CB = 0,
  44. REO_QDESC_FREE,
  45. };
  46. struct reo_qdesc_event {
  47. qdf_dma_addr_t qdesc_addr;
  48. uint64_t ts;
  49. enum reo_qdesc_event_type type;
  50. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  51. };
  52. #endif
  53. struct ast_del_ctxt {
  54. bool age;
  55. int del_count;
  56. };
  57. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  58. void *arg);
  59. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  60. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  61. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  62. uint8_t *peer_mac_addr,
  63. int mac_addr_is_aligned,
  64. uint8_t vdev_id,
  65. enum dp_mod_id id);
  66. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  67. /**
  68. * dp_peer_get_ref() - Returns peer object given the peer id
  69. *
  70. * @soc : core DP soc context
  71. * @peer : DP peer
  72. * @mod_id : id of module requesting the reference
  73. *
  74. * Return: QDF_STATUS_SUCCESS if reference held successfully
  75. * else QDF_STATUS_E_INVAL
  76. */
  77. static inline
  78. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  79. struct dp_peer *peer,
  80. enum dp_mod_id mod_id)
  81. {
  82. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  83. return QDF_STATUS_E_INVAL;
  84. if (mod_id > DP_MOD_ID_RX)
  85. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  86. return QDF_STATUS_SUCCESS;
  87. }
  88. /**
  89. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  90. *
  91. * @soc : core DP soc context
  92. * @peer_id : peer id from peer object can be retrieved
  93. * @mod_id : module id
  94. *
  95. * Return: struct dp_peer*: Pointer to DP peer object
  96. */
  97. static inline struct dp_peer *
  98. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  99. uint16_t peer_id,
  100. enum dp_mod_id mod_id)
  101. {
  102. struct dp_peer *peer;
  103. qdf_spin_lock_bh(&soc->peer_map_lock);
  104. peer = (peer_id >= soc->max_peer_id) ? NULL :
  105. soc->peer_id_to_obj_map[peer_id];
  106. if (!peer ||
  107. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  108. qdf_spin_unlock_bh(&soc->peer_map_lock);
  109. return NULL;
  110. }
  111. qdf_spin_unlock_bh(&soc->peer_map_lock);
  112. return peer;
  113. }
  114. /**
  115. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  116. * if peer state is active
  117. *
  118. * @soc : core DP soc context
  119. * @peer_id : peer id from peer object can be retrieved
  120. * @mod_id : ID ot module requesting reference
  121. *
  122. * Return: struct dp_peer*: Pointer to DP peer object
  123. */
  124. static inline
  125. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  126. uint16_t peer_id,
  127. enum dp_mod_id mod_id)
  128. {
  129. struct dp_peer *peer;
  130. qdf_spin_lock_bh(&soc->peer_map_lock);
  131. peer = (peer_id >= soc->max_peer_id) ? NULL :
  132. soc->peer_id_to_obj_map[peer_id];
  133. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  134. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  135. qdf_spin_unlock_bh(&soc->peer_map_lock);
  136. return NULL;
  137. }
  138. qdf_spin_unlock_bh(&soc->peer_map_lock);
  139. return peer;
  140. }
  141. /**
  142. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  143. *
  144. * @soc : core DP soc context
  145. * @peer_id : peer id from peer object can be retrieved
  146. * @handle : reference handle
  147. * @mod_id : ID ot module requesting reference
  148. *
  149. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  150. */
  151. static inline struct dp_txrx_peer *
  152. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  153. uint16_t peer_id,
  154. dp_txrx_ref_handle *handle,
  155. enum dp_mod_id mod_id)
  156. {
  157. struct dp_peer *peer;
  158. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  159. if (!peer)
  160. return NULL;
  161. if (!peer->txrx_peer) {
  162. dp_peer_unref_delete(peer, mod_id);
  163. return NULL;
  164. }
  165. *handle = (dp_txrx_ref_handle)peer;
  166. return peer->txrx_peer;
  167. }
  168. #ifdef PEER_CACHE_RX_PKTS
  169. /**
  170. * dp_rx_flush_rx_cached() - flush cached rx frames
  171. * @peer: peer
  172. * @drop: set flag to drop frames
  173. *
  174. * Return: None
  175. */
  176. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  177. #else
  178. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  179. {
  180. }
  181. #endif
  182. static inline void
  183. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  184. {
  185. qdf_spin_lock_bh(&peer->peer_info_lock);
  186. peer->state = OL_TXRX_PEER_STATE_DISC;
  187. qdf_spin_unlock_bh(&peer->peer_info_lock);
  188. dp_rx_flush_rx_cached(peer, true);
  189. }
  190. /**
  191. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  192. *
  193. * @vdev : DP vdev context
  194. * @func : function to be called for each peer
  195. * @arg : argument need to be passed to func
  196. * @mod_id : module_id
  197. *
  198. * Return: void
  199. */
  200. static inline void
  201. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  202. enum dp_mod_id mod_id)
  203. {
  204. struct dp_peer *peer;
  205. struct dp_peer *tmp_peer;
  206. struct dp_soc *soc = NULL;
  207. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  208. return;
  209. soc = vdev->pdev->soc;
  210. qdf_spin_lock_bh(&vdev->peer_list_lock);
  211. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  212. peer_list_elem,
  213. tmp_peer) {
  214. if (dp_peer_get_ref(soc, peer, mod_id) ==
  215. QDF_STATUS_SUCCESS) {
  216. (*func)(soc, peer, arg);
  217. dp_peer_unref_delete(peer, mod_id);
  218. }
  219. }
  220. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  221. }
  222. /**
  223. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  224. *
  225. * @pdev : DP pdev context
  226. * @func : function to be called for each peer
  227. * @arg : argument need to be passed to func
  228. * @mod_id : module_id
  229. *
  230. * Return: void
  231. */
  232. static inline void
  233. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  234. enum dp_mod_id mod_id)
  235. {
  236. struct dp_vdev *vdev;
  237. if (!pdev)
  238. return;
  239. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  240. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  241. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  242. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  243. }
  244. /**
  245. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  246. *
  247. * @soc : DP soc context
  248. * @func : function to be called for each peer
  249. * @arg : argument need to be passed to func
  250. * @mod_id : module_id
  251. *
  252. * Return: void
  253. */
  254. static inline void
  255. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  256. enum dp_mod_id mod_id)
  257. {
  258. struct dp_pdev *pdev;
  259. int i;
  260. if (!soc)
  261. return;
  262. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  263. pdev = soc->pdev_list[i];
  264. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  265. }
  266. }
  267. /**
  268. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  269. *
  270. * This API will cache the peers in local allocated memory and calls
  271. * iterate function outside the lock.
  272. *
  273. * As this API is allocating new memory it is suggested to use this
  274. * only when lock cannot be held
  275. *
  276. * @vdev : DP vdev context
  277. * @func : function to be called for each peer
  278. * @arg : argument need to be passed to func
  279. * @mod_id : module_id
  280. *
  281. * Return: void
  282. */
  283. static inline void
  284. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  285. dp_peer_iter_func *func,
  286. void *arg,
  287. enum dp_mod_id mod_id)
  288. {
  289. struct dp_peer *peer;
  290. struct dp_peer *tmp_peer;
  291. struct dp_soc *soc = NULL;
  292. struct dp_peer **peer_array = NULL;
  293. int i = 0;
  294. uint32_t num_peers = 0;
  295. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  296. return;
  297. num_peers = vdev->num_peers;
  298. soc = vdev->pdev->soc;
  299. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  300. if (!peer_array)
  301. return;
  302. qdf_spin_lock_bh(&vdev->peer_list_lock);
  303. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  304. peer_list_elem,
  305. tmp_peer) {
  306. if (i >= num_peers)
  307. break;
  308. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  309. peer_array[i] = peer;
  310. i = (i + 1);
  311. }
  312. }
  313. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  314. for (i = 0; i < num_peers; i++) {
  315. peer = peer_array[i];
  316. if (!peer)
  317. continue;
  318. (*func)(soc, peer, arg);
  319. dp_peer_unref_delete(peer, mod_id);
  320. }
  321. qdf_mem_free(peer_array);
  322. }
  323. /**
  324. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  325. *
  326. * This API will cache the peers in local allocated memory and calls
  327. * iterate function outside the lock.
  328. *
  329. * As this API is allocating new memory it is suggested to use this
  330. * only when lock cannot be held
  331. *
  332. * @pdev : DP pdev context
  333. * @func : function to be called for each peer
  334. * @arg : argument need to be passed to func
  335. * @mod_id : module_id
  336. *
  337. * Return: void
  338. */
  339. static inline void
  340. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  341. dp_peer_iter_func *func,
  342. void *arg,
  343. enum dp_mod_id mod_id)
  344. {
  345. struct dp_peer *peer;
  346. struct dp_peer *tmp_peer;
  347. struct dp_soc *soc = NULL;
  348. struct dp_vdev *vdev = NULL;
  349. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  350. int i = 0;
  351. int j = 0;
  352. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  353. if (!pdev || !pdev->soc)
  354. return;
  355. soc = pdev->soc;
  356. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  357. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  358. num_peers[i] = vdev->num_peers;
  359. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  360. sizeof(struct dp_peer *));
  361. if (!peer_array[i])
  362. break;
  363. qdf_spin_lock_bh(&vdev->peer_list_lock);
  364. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  365. peer_list_elem,
  366. tmp_peer) {
  367. if (j >= num_peers[i])
  368. break;
  369. if (dp_peer_get_ref(soc, peer, mod_id) ==
  370. QDF_STATUS_SUCCESS) {
  371. peer_array[i][j] = peer;
  372. j = (j + 1);
  373. }
  374. }
  375. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  376. i = (i + 1);
  377. }
  378. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  379. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  380. if (!peer_array[i])
  381. break;
  382. for (j = 0; j < num_peers[i]; j++) {
  383. peer = peer_array[i][j];
  384. if (!peer)
  385. continue;
  386. (*func)(soc, peer, arg);
  387. dp_peer_unref_delete(peer, mod_id);
  388. }
  389. qdf_mem_free(peer_array[i]);
  390. }
  391. }
  392. /**
  393. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  394. *
  395. * This API will cache the peers in local allocated memory and calls
  396. * iterate function outside the lock.
  397. *
  398. * As this API is allocating new memory it is suggested to use this
  399. * only when lock cannot be held
  400. *
  401. * @soc : DP soc context
  402. * @func : function to be called for each peer
  403. * @arg : argument need to be passed to func
  404. * @mod_id : module_id
  405. *
  406. * Return: void
  407. */
  408. static inline void
  409. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  410. dp_peer_iter_func *func,
  411. void *arg,
  412. enum dp_mod_id mod_id)
  413. {
  414. struct dp_pdev *pdev;
  415. int i;
  416. if (!soc)
  417. return;
  418. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  419. pdev = soc->pdev_list[i];
  420. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  421. }
  422. }
  423. #ifdef DP_PEER_STATE_DEBUG
  424. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  425. do { \
  426. if (!(_condition)) { \
  427. dp_alert("Invalid state shift from %u to %u peer " \
  428. QDF_MAC_ADDR_FMT, \
  429. (_peer)->peer_state, (_new_state), \
  430. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  431. QDF_ASSERT(0); \
  432. } \
  433. } while (0)
  434. #else
  435. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  436. do { \
  437. if (!(_condition)) { \
  438. dp_alert("Invalid state shift from %u to %u peer " \
  439. QDF_MAC_ADDR_FMT, \
  440. (_peer)->peer_state, (_new_state), \
  441. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  442. } \
  443. } while (0)
  444. #endif
  445. /**
  446. * dp_peer_state_cmp() - compare dp peer state
  447. *
  448. * @peer : DP peer
  449. * @state : state
  450. *
  451. * Return: true if state matches with peer state
  452. * false if it does not match
  453. */
  454. static inline bool
  455. dp_peer_state_cmp(struct dp_peer *peer,
  456. enum dp_peer_state state)
  457. {
  458. bool is_status_equal = false;
  459. qdf_spin_lock_bh(&peer->peer_state_lock);
  460. is_status_equal = (peer->peer_state == state);
  461. qdf_spin_unlock_bh(&peer->peer_state_lock);
  462. return is_status_equal;
  463. }
  464. void dp_print_ast_stats(struct dp_soc *soc);
  465. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  466. uint16_t hw_peer_id, uint8_t vdev_id,
  467. uint8_t *peer_mac_addr, uint16_t ast_hash,
  468. uint8_t is_wds);
  469. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  470. uint8_t vdev_id, uint8_t *peer_mac_addr,
  471. uint8_t is_wds, uint32_t free_wds_count);
  472. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  473. /**
  474. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  475. * @soc - dp soc pointer
  476. * @vdev_id - vdev id
  477. * @peer_mac_addr - mac address of the peer
  478. *
  479. * This function resets the roamed peer auth status and mac address
  480. * after peer map indication of same peer is received from firmware.
  481. *
  482. * Return: None
  483. */
  484. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  485. uint8_t *peer_mac_addr);
  486. #else
  487. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  488. uint8_t *peer_mac_addr)
  489. {
  490. }
  491. #endif
  492. #ifdef WLAN_FEATURE_11BE_MLO
  493. /**
  494. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  495. * @soc_handle - genereic soc handle
  496. * @peer_id - ML peer_id from firmware
  497. * @peer_mac_addr - mac address of the peer
  498. * @mlo_ast_flow_info: MLO AST flow info
  499. * @mlo_link_info - MLO link info
  500. *
  501. * associate the ML peer_id that firmware provided with peer entry
  502. * and update the ast table in the host with the hw_peer_id.
  503. *
  504. * Return: QDF_STATUS code
  505. */
  506. QDF_STATUS
  507. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  508. uint8_t *peer_mac_addr,
  509. struct dp_mlo_flow_override_info *mlo_flow_info,
  510. struct dp_mlo_link_info *mlo_link_info);
  511. /**
  512. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  513. * @soc_handle - genereic soc handle
  514. * @peeri_id - peer_id from firmware
  515. *
  516. * Return: none
  517. */
  518. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  519. #endif
  520. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  521. enum cdp_sec_type sec_type, int is_unicast,
  522. u_int32_t *michael_key, u_int32_t *rx_pn);
  523. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  524. uint8_t tid, uint16_t win_sz);
  525. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  526. uint16_t peer_id, uint8_t *peer_mac);
  527. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  528. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  529. uint32_t flags);
  530. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  531. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  532. struct dp_ast_entry *ast_entry);
  533. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  534. struct dp_ast_entry *ast_entry, uint32_t flags);
  535. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  536. uint8_t *ast_mac_addr,
  537. uint8_t pdev_id);
  538. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  539. uint8_t *ast_mac_addr,
  540. uint8_t vdev_id);
  541. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  542. uint8_t *ast_mac_addr);
  543. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  544. struct dp_ast_entry *ast_entry);
  545. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  546. struct dp_ast_entry *ast_entry);
  547. void dp_peer_ast_set_type(struct dp_soc *soc,
  548. struct dp_ast_entry *ast_entry,
  549. enum cdp_txrx_ast_entry_type type);
  550. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  551. struct dp_ast_entry *ast_entry,
  552. struct dp_peer *peer);
  553. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  554. void dp_peer_ast_send_multi_wds_del(
  555. struct dp_soc *soc, uint8_t vdev_id,
  556. struct peer_del_multi_wds_entries *wds_list);
  557. #endif
  558. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  559. struct cdp_soc *dp_soc,
  560. void *cookie,
  561. enum cdp_ast_free_status status);
  562. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  563. struct dp_ast_entry *ase);
  564. void dp_peer_free_ast_entry(struct dp_soc *soc,
  565. struct dp_ast_entry *ast_entry);
  566. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  567. struct dp_ast_entry *ast_entry,
  568. struct dp_peer *peer);
  569. /**
  570. * dp_peer_mec_detach_entry() - Detach the MEC entry
  571. * @soc: SoC handle
  572. * @mecentry: MEC entry of the node
  573. * @ptr: pointer to free list
  574. *
  575. * The MEC entry is detached from MEC table and added to free_list
  576. * to free the object outside lock
  577. *
  578. * Return: None
  579. */
  580. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  581. void *ptr);
  582. /**
  583. * dp_peer_mec_free_list() - free the MEC entry from free_list
  584. * @soc: SoC handle
  585. * @ptr: pointer to free list
  586. *
  587. * Return: None
  588. */
  589. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  590. /**
  591. * dp_peer_mec_add_entry()
  592. * @soc: SoC handle
  593. * @vdev: vdev to which mec node belongs
  594. * @mac_addr: MAC address of mec node
  595. *
  596. * This function allocates and adds MEC entry to MEC table.
  597. * It assumes caller has taken the mec lock to protect the access to these
  598. * tables
  599. *
  600. * Return: QDF_STATUS
  601. */
  602. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  603. struct dp_vdev *vdev,
  604. uint8_t *mac_addr);
  605. /**
  606. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  607. * within pdev
  608. * @soc: SoC handle
  609. *
  610. * It assumes caller has taken the mec_lock to protect the access to
  611. * MEC hash table
  612. *
  613. * Return: MEC entry
  614. */
  615. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  616. uint8_t pdev_id,
  617. uint8_t *mec_mac_addr);
  618. #define DP_AST_ASSERT(_condition) \
  619. do { \
  620. if (!(_condition)) { \
  621. dp_print_ast_stats(soc);\
  622. QDF_BUG(_condition); \
  623. } \
  624. } while (0)
  625. /**
  626. * dp_peer_update_inactive_time - Update inactive time for peer
  627. * @pdev: pdev object
  628. * @tag_type: htt_tlv_tag type
  629. * #tag_buf: buf message
  630. */
  631. void
  632. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  633. uint32_t *tag_buf);
  634. #ifndef QCA_MULTIPASS_SUPPORT
  635. /**
  636. * dp_peer_set_vlan_id: set vlan_id for this peer
  637. * @cdp_soc: soc handle
  638. * @vdev_id: id of vdev object
  639. * @peer_mac: mac address
  640. * @vlan_id: vlan id for peer
  641. *
  642. * return: void
  643. */
  644. static inline
  645. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  646. uint8_t vdev_id, uint8_t *peer_mac,
  647. uint16_t vlan_id)
  648. {
  649. }
  650. /**
  651. * dp_set_vlan_groupkey: set vlan map for vdev
  652. * @soc: pointer to soc
  653. * @vdev_id: id of vdev handle
  654. * @vlan_id: vlan_id
  655. * @group_key: group key for vlan
  656. *
  657. * return: set success/failure
  658. */
  659. static inline
  660. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  661. uint16_t vlan_id, uint16_t group_key)
  662. {
  663. return QDF_STATUS_SUCCESS;
  664. }
  665. /**
  666. * dp_peer_multipass_list_init: initialize multipass peer list
  667. * @vdev: pointer to vdev
  668. *
  669. * return: void
  670. */
  671. static inline
  672. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  673. {
  674. }
  675. /**
  676. * dp_peer_multipass_list_remove: remove peer from special peer list
  677. * @peer: peer handle
  678. *
  679. * return: void
  680. */
  681. static inline
  682. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  683. {
  684. }
  685. #else
  686. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  687. uint8_t vdev_id, uint8_t *peer_mac,
  688. uint16_t vlan_id);
  689. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  690. uint16_t vlan_id, uint16_t group_key);
  691. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  692. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  693. #endif
  694. #ifndef QCA_PEER_MULTIQ_SUPPORT
  695. /**
  696. * dp_peer_reset_flowq_map() - reset peer flowq map table
  697. * @peer - dp peer handle
  698. *
  699. * Return: none
  700. */
  701. static inline
  702. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  703. {
  704. }
  705. /**
  706. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  707. * @soc - genereic soc handle
  708. * @is_wds - flag to indicate if peer is wds
  709. * @peer_id - peer_id from htt peer map message
  710. * @peer_mac_addr - mac address of the peer
  711. * @ast_info - ast flow override information from peer map
  712. *
  713. * Return: none
  714. */
  715. static inline
  716. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  717. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  718. struct dp_ast_flow_override_info *ast_info)
  719. {
  720. }
  721. #else
  722. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  723. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  724. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  725. struct dp_ast_flow_override_info *ast_info);
  726. #endif
  727. /*
  728. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  729. * after deleting the entries (ie., setting valid=0)
  730. *
  731. * @soc: DP SOC handle
  732. * @cb_ctxt: Callback context
  733. * @reo_status: REO command status
  734. */
  735. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  736. void *cb_ctxt,
  737. union hal_reo_status *reo_status);
  738. #ifdef QCA_PEER_EXT_STATS
  739. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  740. struct dp_txrx_peer *txrx_peer);
  741. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  742. struct dp_txrx_peer *txrx_peer);
  743. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  744. #else
  745. static inline
  746. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  747. struct dp_txrx_peer *txrx_peer)
  748. {
  749. return QDF_STATUS_SUCCESS;
  750. }
  751. static inline
  752. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  753. struct dp_txrx_peer *txrx_peer)
  754. {
  755. }
  756. static inline
  757. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  758. {
  759. }
  760. #endif
  761. #ifdef WLAN_PEER_JITTER
  762. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  763. struct dp_txrx_peer *txrx_peer);
  764. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  765. struct dp_txrx_peer *txrx_peer);
  766. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  767. #else
  768. static inline
  769. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  770. struct dp_txrx_peer *txrx_peer)
  771. {
  772. return QDF_STATUS_SUCCESS;
  773. }
  774. static inline
  775. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  776. struct dp_txrx_peer *txrx_peer)
  777. {
  778. }
  779. static inline
  780. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  781. {
  782. }
  783. #endif
  784. #ifndef CONFIG_SAWF_DEF_QUEUES
  785. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  786. struct dp_peer *peer)
  787. {
  788. return QDF_STATUS_SUCCESS;
  789. }
  790. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  791. struct dp_peer *peer)
  792. {
  793. return QDF_STATUS_SUCCESS;
  794. }
  795. #endif
  796. #ifndef CONFIG_SAWF
  797. static inline
  798. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  799. struct dp_txrx_peer *txrx_peer)
  800. {
  801. return QDF_STATUS_SUCCESS;
  802. }
  803. static inline
  804. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  805. struct dp_txrx_peer *txrx_peer)
  806. {
  807. return QDF_STATUS_SUCCESS;
  808. }
  809. #endif
  810. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  811. struct dp_vdev *vdev,
  812. enum dp_mod_id mod_id);
  813. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  814. struct dp_vdev *vdev,
  815. enum dp_mod_id mod_id);
  816. void dp_peer_ast_table_detach(struct dp_soc *soc);
  817. void dp_peer_find_map_detach(struct dp_soc *soc);
  818. void dp_soc_wds_detach(struct dp_soc *soc);
  819. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  820. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  821. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  822. void dp_soc_wds_attach(struct dp_soc *soc);
  823. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  824. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  825. #ifdef FEATURE_AST
  826. /*
  827. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  828. * @soc - datapath soc handle
  829. * @peer - datapath peer handle
  830. *
  831. * Delete the AST entries belonging to a peer
  832. */
  833. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  834. struct dp_peer *peer)
  835. {
  836. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  837. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  838. /*
  839. * Delete peer self ast entry. This is done to handle scenarios
  840. * where peer is freed before peer map is received(for ex in case
  841. * of auth disallow due to ACL) in such cases self ast is not added
  842. * to peer->ast_list.
  843. */
  844. if (peer->self_ast_entry) {
  845. dp_peer_del_ast(soc, peer->self_ast_entry);
  846. peer->self_ast_entry = NULL;
  847. }
  848. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  849. dp_peer_del_ast(soc, ast_entry);
  850. }
  851. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  852. void *arg);
  853. #else
  854. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  855. struct dp_peer *peer, void *arg)
  856. {
  857. }
  858. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  859. struct dp_peer *peer)
  860. {
  861. }
  862. #endif
  863. #ifdef FEATURE_MEC
  864. /**
  865. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  866. * @soc: SoC handle
  867. *
  868. * Return: none
  869. */
  870. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  871. /**
  872. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  873. * @soc: SoC handle
  874. *
  875. * Return: none
  876. */
  877. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  878. /**
  879. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  880. * @soc: Datapath SOC
  881. *
  882. * Return: None
  883. */
  884. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  885. #else
  886. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  887. {
  888. }
  889. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  890. {
  891. }
  892. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  893. {
  894. }
  895. #endif
  896. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  897. /**
  898. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  899. * @soc : dp_soc handle
  900. * @peer: peer
  901. *
  902. * This function is used to send cache flush cmd to reo and
  903. * to register the callback to handle the dumping of the reo
  904. * queue stas from DDR
  905. *
  906. * Return: none
  907. */
  908. void dp_send_cache_flush_for_rx_tid(
  909. struct dp_soc *soc, struct dp_peer *peer);
  910. /**
  911. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  912. * @soc : cdp_soc_t handle
  913. * @vdev_id: vdev id
  914. *
  915. * Handler to get rx tid info from DDR after h/w cache is
  916. * invalidated first using the cache flush cmd.
  917. *
  918. * Return: none
  919. */
  920. void dp_get_rx_reo_queue_info(
  921. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  922. /**
  923. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  924. * @soc : dp_soc handle
  925. * @cb_ctxt - callback context
  926. * @reo_status: vdev id
  927. *
  928. * This is the callback function registered after sending the reo cmd
  929. * to flush the h/w cache and invalidate it. In the callback the reo
  930. * queue desc info is dumped from DDR.
  931. *
  932. * Return: none
  933. */
  934. void dp_dump_rx_reo_queue_info(
  935. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  936. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  937. static inline void dp_get_rx_reo_queue_info(
  938. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  939. {
  940. }
  941. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  942. static inline int dp_peer_find_mac_addr_cmp(
  943. union dp_align_mac_addr *mac_addr1,
  944. union dp_align_mac_addr *mac_addr2)
  945. {
  946. /*
  947. * Intentionally use & rather than &&.
  948. * because the operands are binary rather than generic boolean,
  949. * the functionality is equivalent.
  950. * Using && has the advantage of short-circuited evaluation,
  951. * but using & has the advantage of no conditional branching,
  952. * which is a more significant benefit.
  953. */
  954. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  955. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  956. }
  957. /**
  958. * dp_peer_delete() - delete DP peer
  959. *
  960. * @soc: Datatpath soc
  961. * @peer: Datapath peer
  962. * @arg: argument to iter function
  963. *
  964. * Return: void
  965. */
  966. void dp_peer_delete(struct dp_soc *soc,
  967. struct dp_peer *peer,
  968. void *arg);
  969. /**
  970. * dp_mlo_peer_delete() - delete MLO DP peer
  971. *
  972. * @soc: Datapath soc
  973. * @peer: Datapath peer
  974. * @arg: argument to iter function
  975. *
  976. * Return: void
  977. */
  978. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  979. #ifdef WLAN_FEATURE_11BE_MLO
  980. /* is MLO connection mld peer */
  981. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  982. /* set peer type */
  983. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  984. ((_peer)->peer_type = (_type_val))
  985. /* is legacy peer */
  986. #define IS_DP_LEGACY_PEER(_peer) \
  987. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  988. /* is MLO connection link peer */
  989. #define IS_MLO_DP_LINK_PEER(_peer) \
  990. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  991. /* is MLO connection mld peer */
  992. #define IS_MLO_DP_MLD_PEER(_peer) \
  993. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  994. /* Get Mld peer from link peer */
  995. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  996. ((link_peer)->mld_peer)
  997. #ifdef WLAN_MLO_MULTI_CHIP
  998. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  999. struct dp_peer *
  1000. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1001. uint8_t *peer_mac_addr,
  1002. int mac_addr_is_aligned,
  1003. uint8_t vdev_id,
  1004. uint8_t chip_id,
  1005. enum dp_mod_id mod_id);
  1006. #else
  1007. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1008. {
  1009. return 0;
  1010. }
  1011. static inline struct dp_peer *
  1012. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1013. uint8_t *peer_mac_addr,
  1014. int mac_addr_is_aligned,
  1015. uint8_t vdev_id,
  1016. uint8_t chip_id,
  1017. enum dp_mod_id mod_id)
  1018. {
  1019. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1020. mac_addr_is_aligned,
  1021. vdev_id, mod_id);
  1022. }
  1023. #endif
  1024. /*
  1025. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1026. * matching mac_address
  1027. * @soc: soc handle
  1028. * @peer_mac_addr: mld peer mac address
  1029. * @mac_addr_is_aligned: is mac addr alligned
  1030. * @vdev_id: vdev_id
  1031. * @mod_id: id of module requesting reference
  1032. *
  1033. * return: peer in sucsess
  1034. * NULL in failure
  1035. */
  1036. static inline
  1037. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1038. uint8_t *peer_mac_addr,
  1039. int mac_addr_is_aligned,
  1040. uint8_t vdev_id,
  1041. enum dp_mod_id mod_id)
  1042. {
  1043. if (soc->arch_ops.mlo_peer_find_hash_find)
  1044. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1045. peer_mac_addr,
  1046. mac_addr_is_aligned,
  1047. mod_id, vdev_id);
  1048. return NULL;
  1049. }
  1050. /**
  1051. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1052. peer_type
  1053. * @soc: DP SOC handle
  1054. * @peer_info: peer information for hash find
  1055. * @mod_id: ID of module requesting reference
  1056. *
  1057. * Return: peer hanlde
  1058. */
  1059. static inline
  1060. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1061. struct cdp_peer_info *peer_info,
  1062. enum dp_mod_id mod_id)
  1063. {
  1064. struct dp_peer *peer = NULL;
  1065. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1066. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1067. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1068. peer_info->mac_addr_is_aligned,
  1069. peer_info->vdev_id,
  1070. mod_id);
  1071. if (peer)
  1072. return peer;
  1073. }
  1074. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1075. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1076. peer = dp_mld_peer_find_hash_find(
  1077. soc, peer_info->mac_addr,
  1078. peer_info->mac_addr_is_aligned,
  1079. peer_info->vdev_id,
  1080. mod_id);
  1081. return peer;
  1082. }
  1083. /**
  1084. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1085. increase mld peer ref_cnt
  1086. * @link_peer: link peer pointer
  1087. * @mld_peer: mld peer pointer
  1088. *
  1089. * Return: none
  1090. */
  1091. static inline
  1092. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1093. struct dp_peer *mld_peer)
  1094. {
  1095. /* increase mld_peer ref_cnt */
  1096. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1097. link_peer->mld_peer = mld_peer;
  1098. }
  1099. /**
  1100. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1101. decrease mld peer ref_cnt
  1102. * @link_peer: link peer pointer
  1103. *
  1104. * Return: None
  1105. */
  1106. static inline
  1107. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1108. {
  1109. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1110. link_peer->mld_peer = NULL;
  1111. }
  1112. /**
  1113. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1114. * @mld_peer: mld peer pointer
  1115. *
  1116. * Return: None
  1117. */
  1118. static inline
  1119. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1120. {
  1121. int i;
  1122. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1123. mld_peer->num_links = 0;
  1124. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1125. mld_peer->link_peers[i].is_valid = false;
  1126. }
  1127. /**
  1128. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1129. * @mld_peer: mld peer pointer
  1130. *
  1131. * Return: None
  1132. */
  1133. static inline
  1134. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1135. {
  1136. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1137. }
  1138. /**
  1139. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1140. * @mld_peer: mld dp peer pointer
  1141. * @link_peer: link dp peer pointer
  1142. *
  1143. * Return: None
  1144. */
  1145. static inline
  1146. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1147. struct dp_peer *link_peer)
  1148. {
  1149. int i;
  1150. struct dp_peer_link_info *link_peer_info;
  1151. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1152. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1153. link_peer_info = &mld_peer->link_peers[i];
  1154. if (!link_peer_info->is_valid) {
  1155. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1156. link_peer->mac_addr.raw,
  1157. QDF_MAC_ADDR_SIZE);
  1158. link_peer_info->is_valid = true;
  1159. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1160. link_peer_info->chip_id =
  1161. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1162. mld_peer->num_links++;
  1163. break;
  1164. }
  1165. }
  1166. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1167. if (i == DP_MAX_MLO_LINKS)
  1168. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1169. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1170. }
  1171. /**
  1172. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1173. * @mld_peer: MLD dp peer pointer
  1174. * @link_peer: link dp peer pointer
  1175. *
  1176. * Return: number of links left after deletion
  1177. */
  1178. static inline
  1179. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1180. struct dp_peer *link_peer)
  1181. {
  1182. int i;
  1183. struct dp_peer_link_info *link_peer_info;
  1184. uint8_t num_links;
  1185. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1186. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1187. link_peer_info = &mld_peer->link_peers[i];
  1188. if (link_peer_info->is_valid &&
  1189. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1190. &link_peer_info->mac_addr)) {
  1191. link_peer_info->is_valid = false;
  1192. mld_peer->num_links--;
  1193. break;
  1194. }
  1195. }
  1196. num_links = mld_peer->num_links;
  1197. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1198. if (i == DP_MAX_MLO_LINKS)
  1199. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1200. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1201. return num_links;
  1202. }
  1203. /**
  1204. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1205. increase link peers ref_cnt
  1206. * @soc: dp_soc handle
  1207. * @mld_peer: dp mld peer pointer
  1208. * @mld_link_peers: structure that hold links peers ponter array and number
  1209. * @mod_id: id of module requesting reference
  1210. *
  1211. * Return: None
  1212. */
  1213. static inline
  1214. void dp_get_link_peers_ref_from_mld_peer(
  1215. struct dp_soc *soc,
  1216. struct dp_peer *mld_peer,
  1217. struct dp_mld_link_peers *mld_link_peers,
  1218. enum dp_mod_id mod_id)
  1219. {
  1220. struct dp_peer *peer;
  1221. uint8_t i = 0, j = 0;
  1222. struct dp_peer_link_info *link_peer_info;
  1223. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1224. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1225. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1226. link_peer_info = &mld_peer->link_peers[i];
  1227. if (link_peer_info->is_valid) {
  1228. peer = dp_link_peer_hash_find_by_chip_id(
  1229. soc,
  1230. link_peer_info->mac_addr.raw,
  1231. true,
  1232. link_peer_info->vdev_id,
  1233. link_peer_info->chip_id,
  1234. mod_id);
  1235. if (peer)
  1236. mld_link_peers->link_peers[j++] = peer;
  1237. }
  1238. }
  1239. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1240. mld_link_peers->num_links = j;
  1241. }
  1242. /**
  1243. * dp_release_link_peers_ref() - release all link peers reference
  1244. * @mld_link_peers: structure that hold links peers ponter array and number
  1245. * @mod_id: id of module requesting reference
  1246. *
  1247. * Return: None.
  1248. */
  1249. static inline
  1250. void dp_release_link_peers_ref(
  1251. struct dp_mld_link_peers *mld_link_peers,
  1252. enum dp_mod_id mod_id)
  1253. {
  1254. struct dp_peer *peer;
  1255. uint8_t i;
  1256. for (i = 0; i < mld_link_peers->num_links; i++) {
  1257. peer = mld_link_peers->link_peers[i];
  1258. if (peer)
  1259. dp_peer_unref_delete(peer, mod_id);
  1260. mld_link_peers->link_peers[i] = NULL;
  1261. }
  1262. mld_link_peers->num_links = 0;
  1263. }
  1264. /**
  1265. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1266. * @soc: Datapath soc handle
  1267. * @peer_id: peer id
  1268. * @lmac_id: lmac id to find the link peer on given lmac
  1269. *
  1270. * Return: peer_id of link peer if found
  1271. * else return HTT_INVALID_PEER
  1272. */
  1273. static inline
  1274. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1275. uint8_t lmac_id)
  1276. {
  1277. uint8_t i;
  1278. struct dp_peer *peer;
  1279. struct dp_peer *link_peer;
  1280. struct dp_soc *link_peer_soc;
  1281. struct dp_mld_link_peers link_peers_info;
  1282. uint16_t link_peer_id = HTT_INVALID_PEER;
  1283. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1284. if (!peer)
  1285. return HTT_INVALID_PEER;
  1286. if (IS_MLO_DP_MLD_PEER(peer)) {
  1287. /* get link peers with reference */
  1288. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1289. DP_MOD_ID_CDP);
  1290. for (i = 0; i < link_peers_info.num_links; i++) {
  1291. link_peer = link_peers_info.link_peers[i];
  1292. link_peer_soc = link_peer->vdev->pdev->soc;
  1293. if ((link_peer_soc == soc) &&
  1294. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1295. link_peer_id = link_peer->peer_id;
  1296. break;
  1297. }
  1298. }
  1299. /* release link peers reference */
  1300. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1301. } else {
  1302. link_peer_id = peer_id;
  1303. }
  1304. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1305. return link_peer_id;
  1306. }
  1307. /**
  1308. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1309. * @soc: soc handle
  1310. * @peer_mac_addr: peer mac address
  1311. * @mac_addr_is_aligned: is mac addr alligned
  1312. * @vdev_id: vdev_id
  1313. * @mod_id: id of module requesting reference
  1314. *
  1315. * for MLO connection, get corresponding MLD peer,
  1316. * otherwise get link peer for non-MLO case.
  1317. *
  1318. * return: peer in success
  1319. * NULL in failure
  1320. */
  1321. static inline
  1322. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1323. uint8_t *peer_mac,
  1324. int mac_addr_is_aligned,
  1325. uint8_t vdev_id,
  1326. enum dp_mod_id mod_id)
  1327. {
  1328. struct dp_peer *ta_peer = NULL;
  1329. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1330. peer_mac, 0, vdev_id,
  1331. mod_id);
  1332. if (peer) {
  1333. /* mlo connection link peer, get mld peer with reference */
  1334. if (IS_MLO_DP_LINK_PEER(peer)) {
  1335. /* increase mld peer ref_cnt */
  1336. if (QDF_STATUS_SUCCESS ==
  1337. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1338. ta_peer = peer->mld_peer;
  1339. else
  1340. ta_peer = NULL;
  1341. /* relese peer reference that added by hash find */
  1342. dp_peer_unref_delete(peer, mod_id);
  1343. } else {
  1344. /* mlo MLD peer or non-mlo link peer */
  1345. ta_peer = peer;
  1346. }
  1347. } else {
  1348. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1349. QDF_MAC_ADDR_REF(peer_mac));
  1350. }
  1351. return ta_peer;
  1352. }
  1353. /**
  1354. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1355. * @soc : core DP soc context
  1356. * @peer_id : peer id from peer object can be retrieved
  1357. * @mod_id : ID ot module requesting reference
  1358. *
  1359. * for MLO connection, get corresponding MLD peer,
  1360. * otherwise get link peer for non-MLO case.
  1361. *
  1362. * return: peer in success
  1363. * NULL in failure
  1364. */
  1365. static inline
  1366. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1367. uint16_t peer_id,
  1368. enum dp_mod_id mod_id)
  1369. {
  1370. struct dp_peer *ta_peer = NULL;
  1371. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1372. if (peer) {
  1373. /* mlo connection link peer, get mld peer with reference */
  1374. if (IS_MLO_DP_LINK_PEER(peer)) {
  1375. /* increase mld peer ref_cnt */
  1376. if (QDF_STATUS_SUCCESS ==
  1377. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1378. ta_peer = peer->mld_peer;
  1379. else
  1380. ta_peer = NULL;
  1381. /* relese peer reference that added by hash find */
  1382. dp_peer_unref_delete(peer, mod_id);
  1383. } else {
  1384. /* mlo MLD peer or non-mlo link peer */
  1385. ta_peer = peer;
  1386. }
  1387. }
  1388. return ta_peer;
  1389. }
  1390. /**
  1391. * dp_peer_mlo_delete() - peer MLO related delete operation
  1392. * @peer: DP peer handle
  1393. * Return: None
  1394. */
  1395. static inline
  1396. void dp_peer_mlo_delete(struct dp_peer *peer)
  1397. {
  1398. struct dp_peer *ml_peer;
  1399. struct dp_soc *soc;
  1400. /* MLO connection link peer */
  1401. if (IS_MLO_DP_LINK_PEER(peer)) {
  1402. ml_peer = peer->mld_peer;
  1403. soc = ml_peer->vdev->pdev->soc;
  1404. /* if last link peer deletion, delete MLD peer */
  1405. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1406. dp_peer_delete(soc, peer->mld_peer, NULL);
  1407. }
  1408. }
  1409. /**
  1410. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1411. * @soc: Soc handle
  1412. * @vdev_id: Vdev ID
  1413. * @peer_setup_info: peer setup information for MLO
  1414. */
  1415. QDF_STATUS dp_peer_mlo_setup(
  1416. struct dp_soc *soc,
  1417. struct dp_peer *peer,
  1418. uint8_t vdev_id,
  1419. struct cdp_peer_setup_info *setup_info);
  1420. /**
  1421. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1422. * @peer: datapath peer
  1423. *
  1424. * Return: MLD peer in case of MLO Link peer
  1425. * Peer itself in other cases
  1426. */
  1427. static inline
  1428. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1429. {
  1430. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1431. }
  1432. /**
  1433. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1434. * peer id
  1435. * @soc: core DP soc context
  1436. * @peer_id: peer id
  1437. * @mod_id: ID of module requesting reference
  1438. *
  1439. * Return: primary link peer for the MLO peer
  1440. * legacy peer itself in case of legacy peer
  1441. */
  1442. static inline
  1443. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1444. uint16_t peer_id,
  1445. enum dp_mod_id mod_id)
  1446. {
  1447. uint8_t i;
  1448. struct dp_mld_link_peers link_peers_info;
  1449. struct dp_peer *peer;
  1450. struct dp_peer *link_peer;
  1451. struct dp_peer *primary_peer = NULL;
  1452. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1453. if (!peer)
  1454. return NULL;
  1455. if (IS_MLO_DP_MLD_PEER(peer)) {
  1456. /* get link peers with reference */
  1457. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1458. mod_id);
  1459. for (i = 0; i < link_peers_info.num_links; i++) {
  1460. link_peer = link_peers_info.link_peers[i];
  1461. if (link_peer->primary_link) {
  1462. primary_peer = link_peer;
  1463. /*
  1464. * Take additional reference over
  1465. * primary link peer.
  1466. */
  1467. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1468. break;
  1469. }
  1470. }
  1471. /* release link peers reference */
  1472. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1473. dp_peer_unref_delete(peer, mod_id);
  1474. } else {
  1475. primary_peer = peer;
  1476. }
  1477. return primary_peer;
  1478. }
  1479. /**
  1480. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1481. * @peer: Datapath peer
  1482. *
  1483. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1484. * dp_txrx_peer from peer itself for other cases
  1485. */
  1486. static inline
  1487. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1488. {
  1489. return IS_MLO_DP_LINK_PEER(peer) ?
  1490. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1491. }
  1492. /**
  1493. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1494. * @peer: Datapath peer
  1495. *
  1496. * Return: true if peer is primary link peer or legacy peer
  1497. * false otherwise
  1498. */
  1499. static inline
  1500. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1501. {
  1502. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1503. return true;
  1504. else if (IS_DP_LEGACY_PEER(peer))
  1505. return true;
  1506. else
  1507. return false;
  1508. }
  1509. /**
  1510. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1511. *
  1512. * @soc : core DP soc context
  1513. * @peer_id : peer id from peer object can be retrieved
  1514. * @handle : reference handle
  1515. * @mod_id : ID ot module requesting reference
  1516. *
  1517. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1518. */
  1519. static inline struct dp_txrx_peer *
  1520. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1521. uint16_t peer_id,
  1522. dp_txrx_ref_handle *handle,
  1523. enum dp_mod_id mod_id)
  1524. {
  1525. struct dp_peer *peer;
  1526. struct dp_txrx_peer *txrx_peer;
  1527. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1528. if (!peer)
  1529. return NULL;
  1530. txrx_peer = dp_get_txrx_peer(peer);
  1531. if (txrx_peer) {
  1532. *handle = (dp_txrx_ref_handle)peer;
  1533. return txrx_peer;
  1534. }
  1535. dp_peer_unref_delete(peer, mod_id);
  1536. return NULL;
  1537. }
  1538. /**
  1539. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1540. *
  1541. * @soc : core DP soc context
  1542. *
  1543. * Return: void
  1544. */
  1545. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1546. #else
  1547. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1548. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1549. /* is legacy peer */
  1550. #define IS_DP_LEGACY_PEER(_peer) true
  1551. #define IS_MLO_DP_LINK_PEER(_peer) false
  1552. #define IS_MLO_DP_MLD_PEER(_peer) false
  1553. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1554. static inline
  1555. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1556. struct cdp_peer_info *peer_info,
  1557. enum dp_mod_id mod_id)
  1558. {
  1559. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1560. peer_info->mac_addr_is_aligned,
  1561. peer_info->vdev_id,
  1562. mod_id);
  1563. }
  1564. static inline
  1565. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1566. uint8_t *peer_mac,
  1567. int mac_addr_is_aligned,
  1568. uint8_t vdev_id,
  1569. enum dp_mod_id mod_id)
  1570. {
  1571. return dp_peer_find_hash_find(soc, peer_mac,
  1572. mac_addr_is_aligned, vdev_id,
  1573. mod_id);
  1574. }
  1575. static inline
  1576. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1577. uint16_t peer_id,
  1578. enum dp_mod_id mod_id)
  1579. {
  1580. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1581. }
  1582. static inline
  1583. QDF_STATUS dp_peer_mlo_setup(
  1584. struct dp_soc *soc,
  1585. struct dp_peer *peer,
  1586. uint8_t vdev_id,
  1587. struct cdp_peer_setup_info *setup_info)
  1588. {
  1589. return QDF_STATUS_SUCCESS;
  1590. }
  1591. static inline
  1592. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1593. {
  1594. }
  1595. static inline
  1596. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1597. {
  1598. }
  1599. static inline
  1600. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1601. {
  1602. }
  1603. static inline
  1604. void dp_peer_mlo_delete(struct dp_peer *peer)
  1605. {
  1606. }
  1607. static inline
  1608. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1609. struct dp_peer *link_peer)
  1610. {
  1611. }
  1612. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1613. {
  1614. return 0;
  1615. }
  1616. static inline struct dp_peer *
  1617. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1618. uint8_t *peer_mac_addr,
  1619. int mac_addr_is_aligned,
  1620. uint8_t vdev_id,
  1621. uint8_t chip_id,
  1622. enum dp_mod_id mod_id)
  1623. {
  1624. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1625. mac_addr_is_aligned,
  1626. vdev_id, mod_id);
  1627. }
  1628. static inline
  1629. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1630. {
  1631. return peer;
  1632. }
  1633. static inline
  1634. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1635. uint16_t peer_id,
  1636. enum dp_mod_id mod_id)
  1637. {
  1638. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1639. }
  1640. static inline
  1641. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1642. {
  1643. return peer->txrx_peer;
  1644. }
  1645. static inline
  1646. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1647. {
  1648. return true;
  1649. }
  1650. /**
  1651. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1652. *
  1653. * @soc : core DP soc context
  1654. * @peer_id : peer id from peer object can be retrieved
  1655. * @handle : reference handle
  1656. * @mod_id : ID ot module requesting reference
  1657. *
  1658. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1659. */
  1660. static inline struct dp_txrx_peer *
  1661. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1662. uint16_t peer_id,
  1663. dp_txrx_ref_handle *handle,
  1664. enum dp_mod_id mod_id)
  1665. {
  1666. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1667. }
  1668. static inline
  1669. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1670. uint8_t lmac_id)
  1671. {
  1672. return peer_id;
  1673. }
  1674. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  1675. {
  1676. }
  1677. #endif /* WLAN_FEATURE_11BE_MLO */
  1678. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  1679. /**
  1680. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  1681. * @soc: Soc handle
  1682. * @peer: DP peer handle for ML peer
  1683. * @peer_id: peer_id
  1684. * Return: None
  1685. */
  1686. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1687. struct dp_peer *peer,
  1688. uint16_t peer_id);
  1689. /**
  1690. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  1691. * @soc: Soc handle
  1692. * @peer_id: peer_id
  1693. * Return: None
  1694. */
  1695. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1696. uint16_t peer_id);
  1697. #else
  1698. static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1699. struct dp_peer *peer,
  1700. uint16_t peer_id)
  1701. {
  1702. }
  1703. static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1704. uint16_t peer_id)
  1705. {
  1706. }
  1707. #endif
  1708. static inline
  1709. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1710. {
  1711. uint8_t i;
  1712. if (IS_MLO_DP_MLD_PEER(peer)) {
  1713. dp_peer_info("skip for mld peer");
  1714. return QDF_STATUS_SUCCESS;
  1715. }
  1716. if (peer->rx_tid) {
  1717. QDF_BUG(0);
  1718. dp_peer_err("peer rx_tid mem already exist");
  1719. return QDF_STATUS_E_FAILURE;
  1720. }
  1721. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1722. sizeof(struct dp_rx_tid));
  1723. if (!peer->rx_tid) {
  1724. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1725. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1726. return QDF_STATUS_E_NOMEM;
  1727. }
  1728. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1729. for (i = 0; i < DP_MAX_TIDS; i++)
  1730. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1731. return QDF_STATUS_SUCCESS;
  1732. }
  1733. static inline
  1734. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1735. {
  1736. uint8_t i;
  1737. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1738. for (i = 0; i < DP_MAX_TIDS; i++)
  1739. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1740. qdf_mem_free(peer->rx_tid);
  1741. }
  1742. peer->rx_tid = NULL;
  1743. }
  1744. static inline
  1745. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1746. {
  1747. uint8_t i;
  1748. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1749. sizeof(struct dp_rx_tid_defrag));
  1750. for (i = 0; i < DP_MAX_TIDS; i++)
  1751. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1752. }
  1753. static inline
  1754. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1755. {
  1756. uint8_t i;
  1757. for (i = 0; i < DP_MAX_TIDS; i++)
  1758. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1759. }
  1760. #ifdef PEER_CACHE_RX_PKTS
  1761. static inline
  1762. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1763. {
  1764. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  1765. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  1766. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  1767. DP_RX_CACHED_BUFQ_THRESH);
  1768. }
  1769. static inline
  1770. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1771. {
  1772. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  1773. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  1774. }
  1775. #else
  1776. static inline
  1777. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1778. {
  1779. }
  1780. static inline
  1781. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1782. {
  1783. }
  1784. #endif
  1785. /**
  1786. * dp_peer_update_state() - update dp peer state
  1787. *
  1788. * @soc : core DP soc context
  1789. * @peer : DP peer
  1790. * @state : new state
  1791. *
  1792. * Return: None
  1793. */
  1794. static inline void
  1795. dp_peer_update_state(struct dp_soc *soc,
  1796. struct dp_peer *peer,
  1797. enum dp_peer_state state)
  1798. {
  1799. uint8_t peer_state;
  1800. qdf_spin_lock_bh(&peer->peer_state_lock);
  1801. peer_state = peer->peer_state;
  1802. switch (state) {
  1803. case DP_PEER_STATE_INIT:
  1804. DP_PEER_STATE_ASSERT
  1805. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  1806. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  1807. break;
  1808. case DP_PEER_STATE_ACTIVE:
  1809. DP_PEER_STATE_ASSERT(peer, state,
  1810. (peer_state == DP_PEER_STATE_INIT));
  1811. break;
  1812. case DP_PEER_STATE_LOGICAL_DELETE:
  1813. DP_PEER_STATE_ASSERT(peer, state,
  1814. (peer_state == DP_PEER_STATE_ACTIVE) ||
  1815. (peer_state == DP_PEER_STATE_INIT));
  1816. break;
  1817. case DP_PEER_STATE_INACTIVE:
  1818. if (IS_MLO_DP_MLD_PEER(peer))
  1819. DP_PEER_STATE_ASSERT
  1820. (peer, state,
  1821. (peer_state == DP_PEER_STATE_ACTIVE));
  1822. else
  1823. DP_PEER_STATE_ASSERT
  1824. (peer, state,
  1825. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  1826. break;
  1827. case DP_PEER_STATE_FREED:
  1828. if (peer->sta_self_peer)
  1829. DP_PEER_STATE_ASSERT
  1830. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  1831. else
  1832. DP_PEER_STATE_ASSERT
  1833. (peer, state,
  1834. (peer_state == DP_PEER_STATE_INACTIVE) ||
  1835. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  1836. break;
  1837. default:
  1838. qdf_spin_unlock_bh(&peer->peer_state_lock);
  1839. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  1840. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1841. return;
  1842. }
  1843. peer->peer_state = state;
  1844. qdf_spin_unlock_bh(&peer->peer_state_lock);
  1845. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  1846. peer_state, state,
  1847. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1848. }
  1849. #ifdef REO_SHARED_QREF_TABLE_EN
  1850. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1851. struct dp_peer *peer);
  1852. #else
  1853. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1854. struct dp_peer *peer) {}
  1855. #endif
  1856. #endif /* _DP_PEER_H_ */