dp_peer.h 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  25. #include "hal_reo.h"
  26. #endif
  27. #define DP_INVALID_PEER_ID 0xffff
  28. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  29. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  30. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  31. #define DP_PEER_HASH_LOAD_MULT 2
  32. #define DP_PEER_HASH_LOAD_SHIFT 0
  33. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  34. #define DP_RX_CACHED_BUFQ_THRESH 64
  35. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  36. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_info(params...) \
  39. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  40. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  41. #ifdef REO_QDESC_HISTORY
  42. enum reo_qdesc_event_type {
  43. REO_QDESC_UPDATE_CB = 0,
  44. REO_QDESC_FREE,
  45. };
  46. struct reo_qdesc_event {
  47. qdf_dma_addr_t qdesc_addr;
  48. uint64_t ts;
  49. enum reo_qdesc_event_type type;
  50. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  51. };
  52. #endif
  53. struct ast_del_ctxt {
  54. bool age;
  55. int del_count;
  56. };
  57. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  58. void *arg);
  59. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  60. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  61. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  62. uint8_t *peer_mac_addr,
  63. int mac_addr_is_aligned,
  64. uint8_t vdev_id,
  65. enum dp_mod_id id);
  66. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  67. #ifdef DP_UMAC_HW_RESET_SUPPORT
  68. void dp_reset_tid_q_setup(struct dp_soc *soc);
  69. #endif
  70. /**
  71. * dp_peer_get_ref() - Returns peer object given the peer id
  72. *
  73. * @soc : core DP soc context
  74. * @peer : DP peer
  75. * @mod_id : id of module requesting the reference
  76. *
  77. * Return: QDF_STATUS_SUCCESS if reference held successfully
  78. * else QDF_STATUS_E_INVAL
  79. */
  80. static inline
  81. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  82. struct dp_peer *peer,
  83. enum dp_mod_id mod_id)
  84. {
  85. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  86. return QDF_STATUS_E_INVAL;
  87. if (mod_id > DP_MOD_ID_RX)
  88. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  89. return QDF_STATUS_SUCCESS;
  90. }
  91. /**
  92. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  93. *
  94. * @soc : core DP soc context
  95. * @peer_id : peer id from peer object can be retrieved
  96. * @mod_id : module id
  97. *
  98. * Return: struct dp_peer*: Pointer to DP peer object
  99. */
  100. static inline struct dp_peer *
  101. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  102. uint16_t peer_id,
  103. enum dp_mod_id mod_id)
  104. {
  105. struct dp_peer *peer;
  106. qdf_spin_lock_bh(&soc->peer_map_lock);
  107. peer = (peer_id >= soc->max_peer_id) ? NULL :
  108. soc->peer_id_to_obj_map[peer_id];
  109. if (!peer ||
  110. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  111. qdf_spin_unlock_bh(&soc->peer_map_lock);
  112. return NULL;
  113. }
  114. qdf_spin_unlock_bh(&soc->peer_map_lock);
  115. return peer;
  116. }
  117. /**
  118. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  119. * if peer state is active
  120. *
  121. * @soc : core DP soc context
  122. * @peer_id : peer id from peer object can be retrieved
  123. * @mod_id : ID ot module requesting reference
  124. *
  125. * Return: struct dp_peer*: Pointer to DP peer object
  126. */
  127. static inline
  128. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  129. uint16_t peer_id,
  130. enum dp_mod_id mod_id)
  131. {
  132. struct dp_peer *peer;
  133. qdf_spin_lock_bh(&soc->peer_map_lock);
  134. peer = (peer_id >= soc->max_peer_id) ? NULL :
  135. soc->peer_id_to_obj_map[peer_id];
  136. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  137. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  138. qdf_spin_unlock_bh(&soc->peer_map_lock);
  139. return NULL;
  140. }
  141. qdf_spin_unlock_bh(&soc->peer_map_lock);
  142. return peer;
  143. }
  144. /**
  145. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  146. *
  147. * @soc : core DP soc context
  148. * @peer_id : peer id from peer object can be retrieved
  149. * @handle : reference handle
  150. * @mod_id : ID ot module requesting reference
  151. *
  152. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  153. */
  154. static inline struct dp_txrx_peer *
  155. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  156. uint16_t peer_id,
  157. dp_txrx_ref_handle *handle,
  158. enum dp_mod_id mod_id)
  159. {
  160. struct dp_peer *peer;
  161. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  162. if (!peer)
  163. return NULL;
  164. if (!peer->txrx_peer) {
  165. dp_peer_unref_delete(peer, mod_id);
  166. return NULL;
  167. }
  168. *handle = (dp_txrx_ref_handle)peer;
  169. return peer->txrx_peer;
  170. }
  171. #ifdef PEER_CACHE_RX_PKTS
  172. /**
  173. * dp_rx_flush_rx_cached() - flush cached rx frames
  174. * @peer: peer
  175. * @drop: set flag to drop frames
  176. *
  177. * Return: None
  178. */
  179. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  180. #else
  181. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  182. {
  183. }
  184. #endif
  185. static inline void
  186. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  187. {
  188. qdf_spin_lock_bh(&peer->peer_info_lock);
  189. peer->state = OL_TXRX_PEER_STATE_DISC;
  190. qdf_spin_unlock_bh(&peer->peer_info_lock);
  191. dp_rx_flush_rx_cached(peer, true);
  192. }
  193. /**
  194. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  195. *
  196. * @vdev : DP vdev context
  197. * @func : function to be called for each peer
  198. * @arg : argument need to be passed to func
  199. * @mod_id : module_id
  200. *
  201. * Return: void
  202. */
  203. static inline void
  204. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  205. enum dp_mod_id mod_id)
  206. {
  207. struct dp_peer *peer;
  208. struct dp_peer *tmp_peer;
  209. struct dp_soc *soc = NULL;
  210. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  211. return;
  212. soc = vdev->pdev->soc;
  213. qdf_spin_lock_bh(&vdev->peer_list_lock);
  214. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  215. peer_list_elem,
  216. tmp_peer) {
  217. if (dp_peer_get_ref(soc, peer, mod_id) ==
  218. QDF_STATUS_SUCCESS) {
  219. (*func)(soc, peer, arg);
  220. dp_peer_unref_delete(peer, mod_id);
  221. }
  222. }
  223. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  224. }
  225. /**
  226. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  227. *
  228. * @pdev : DP pdev context
  229. * @func : function to be called for each peer
  230. * @arg : argument need to be passed to func
  231. * @mod_id : module_id
  232. *
  233. * Return: void
  234. */
  235. static inline void
  236. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  237. enum dp_mod_id mod_id)
  238. {
  239. struct dp_vdev *vdev;
  240. if (!pdev)
  241. return;
  242. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  243. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  244. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  245. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  246. }
  247. /**
  248. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  249. *
  250. * @soc : DP soc context
  251. * @func : function to be called for each peer
  252. * @arg : argument need to be passed to func
  253. * @mod_id : module_id
  254. *
  255. * Return: void
  256. */
  257. static inline void
  258. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  259. enum dp_mod_id mod_id)
  260. {
  261. struct dp_pdev *pdev;
  262. int i;
  263. if (!soc)
  264. return;
  265. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  266. pdev = soc->pdev_list[i];
  267. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  268. }
  269. }
  270. /**
  271. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  272. *
  273. * This API will cache the peers in local allocated memory and calls
  274. * iterate function outside the lock.
  275. *
  276. * As this API is allocating new memory it is suggested to use this
  277. * only when lock cannot be held
  278. *
  279. * @vdev : DP vdev context
  280. * @func : function to be called for each peer
  281. * @arg : argument need to be passed to func
  282. * @mod_id : module_id
  283. *
  284. * Return: void
  285. */
  286. static inline void
  287. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  288. dp_peer_iter_func *func,
  289. void *arg,
  290. enum dp_mod_id mod_id)
  291. {
  292. struct dp_peer *peer;
  293. struct dp_peer *tmp_peer;
  294. struct dp_soc *soc = NULL;
  295. struct dp_peer **peer_array = NULL;
  296. int i = 0;
  297. uint32_t num_peers = 0;
  298. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  299. return;
  300. num_peers = vdev->num_peers;
  301. soc = vdev->pdev->soc;
  302. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  303. if (!peer_array)
  304. return;
  305. qdf_spin_lock_bh(&vdev->peer_list_lock);
  306. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  307. peer_list_elem,
  308. tmp_peer) {
  309. if (i >= num_peers)
  310. break;
  311. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  312. peer_array[i] = peer;
  313. i = (i + 1);
  314. }
  315. }
  316. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  317. for (i = 0; i < num_peers; i++) {
  318. peer = peer_array[i];
  319. if (!peer)
  320. continue;
  321. (*func)(soc, peer, arg);
  322. dp_peer_unref_delete(peer, mod_id);
  323. }
  324. qdf_mem_free(peer_array);
  325. }
  326. /**
  327. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  328. *
  329. * This API will cache the peers in local allocated memory and calls
  330. * iterate function outside the lock.
  331. *
  332. * As this API is allocating new memory it is suggested to use this
  333. * only when lock cannot be held
  334. *
  335. * @pdev : DP pdev context
  336. * @func : function to be called for each peer
  337. * @arg : argument need to be passed to func
  338. * @mod_id : module_id
  339. *
  340. * Return: void
  341. */
  342. static inline void
  343. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  344. dp_peer_iter_func *func,
  345. void *arg,
  346. enum dp_mod_id mod_id)
  347. {
  348. struct dp_peer *peer;
  349. struct dp_peer *tmp_peer;
  350. struct dp_soc *soc = NULL;
  351. struct dp_vdev *vdev = NULL;
  352. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  353. int i = 0;
  354. int j = 0;
  355. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  356. if (!pdev || !pdev->soc)
  357. return;
  358. soc = pdev->soc;
  359. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  360. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  361. num_peers[i] = vdev->num_peers;
  362. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  363. sizeof(struct dp_peer *));
  364. if (!peer_array[i])
  365. break;
  366. qdf_spin_lock_bh(&vdev->peer_list_lock);
  367. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  368. peer_list_elem,
  369. tmp_peer) {
  370. if (j >= num_peers[i])
  371. break;
  372. if (dp_peer_get_ref(soc, peer, mod_id) ==
  373. QDF_STATUS_SUCCESS) {
  374. peer_array[i][j] = peer;
  375. j = (j + 1);
  376. }
  377. }
  378. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  379. i = (i + 1);
  380. }
  381. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  382. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  383. if (!peer_array[i])
  384. break;
  385. for (j = 0; j < num_peers[i]; j++) {
  386. peer = peer_array[i][j];
  387. if (!peer)
  388. continue;
  389. (*func)(soc, peer, arg);
  390. dp_peer_unref_delete(peer, mod_id);
  391. }
  392. qdf_mem_free(peer_array[i]);
  393. }
  394. }
  395. /**
  396. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  397. *
  398. * This API will cache the peers in local allocated memory and calls
  399. * iterate function outside the lock.
  400. *
  401. * As this API is allocating new memory it is suggested to use this
  402. * only when lock cannot be held
  403. *
  404. * @soc : DP soc context
  405. * @func : function to be called for each peer
  406. * @arg : argument need to be passed to func
  407. * @mod_id : module_id
  408. *
  409. * Return: void
  410. */
  411. static inline void
  412. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  413. dp_peer_iter_func *func,
  414. void *arg,
  415. enum dp_mod_id mod_id)
  416. {
  417. struct dp_pdev *pdev;
  418. int i;
  419. if (!soc)
  420. return;
  421. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  422. pdev = soc->pdev_list[i];
  423. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  424. }
  425. }
  426. #ifdef DP_PEER_STATE_DEBUG
  427. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  428. do { \
  429. if (!(_condition)) { \
  430. dp_alert("Invalid state shift from %u to %u peer " \
  431. QDF_MAC_ADDR_FMT, \
  432. (_peer)->peer_state, (_new_state), \
  433. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  434. QDF_ASSERT(0); \
  435. } \
  436. } while (0)
  437. #else
  438. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  439. do { \
  440. if (!(_condition)) { \
  441. dp_alert("Invalid state shift from %u to %u peer " \
  442. QDF_MAC_ADDR_FMT, \
  443. (_peer)->peer_state, (_new_state), \
  444. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  445. } \
  446. } while (0)
  447. #endif
  448. /**
  449. * dp_peer_state_cmp() - compare dp peer state
  450. *
  451. * @peer : DP peer
  452. * @state : state
  453. *
  454. * Return: true if state matches with peer state
  455. * false if it does not match
  456. */
  457. static inline bool
  458. dp_peer_state_cmp(struct dp_peer *peer,
  459. enum dp_peer_state state)
  460. {
  461. bool is_status_equal = false;
  462. qdf_spin_lock_bh(&peer->peer_state_lock);
  463. is_status_equal = (peer->peer_state == state);
  464. qdf_spin_unlock_bh(&peer->peer_state_lock);
  465. return is_status_equal;
  466. }
  467. void dp_print_ast_stats(struct dp_soc *soc);
  468. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  469. uint16_t hw_peer_id, uint8_t vdev_id,
  470. uint8_t *peer_mac_addr, uint16_t ast_hash,
  471. uint8_t is_wds);
  472. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  473. uint8_t vdev_id, uint8_t *peer_mac_addr,
  474. uint8_t is_wds, uint32_t free_wds_count);
  475. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  476. /**
  477. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  478. * @soc - dp soc pointer
  479. * @vdev_id - vdev id
  480. * @peer_mac_addr - mac address of the peer
  481. *
  482. * This function resets the roamed peer auth status and mac address
  483. * after peer map indication of same peer is received from firmware.
  484. *
  485. * Return: None
  486. */
  487. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  488. uint8_t *peer_mac_addr);
  489. #else
  490. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  491. uint8_t *peer_mac_addr)
  492. {
  493. }
  494. #endif
  495. #ifdef WLAN_FEATURE_11BE_MLO
  496. /**
  497. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  498. * @soc_handle - genereic soc handle
  499. * @peer_id - ML peer_id from firmware
  500. * @peer_mac_addr - mac address of the peer
  501. * @mlo_ast_flow_info: MLO AST flow info
  502. * @mlo_link_info - MLO link info
  503. *
  504. * associate the ML peer_id that firmware provided with peer entry
  505. * and update the ast table in the host with the hw_peer_id.
  506. *
  507. * Return: QDF_STATUS code
  508. */
  509. QDF_STATUS
  510. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  511. uint8_t *peer_mac_addr,
  512. struct dp_mlo_flow_override_info *mlo_flow_info,
  513. struct dp_mlo_link_info *mlo_link_info);
  514. /**
  515. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  516. * @soc_handle - genereic soc handle
  517. * @peeri_id - peer_id from firmware
  518. *
  519. * Return: none
  520. */
  521. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  522. #endif
  523. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  524. enum cdp_sec_type sec_type, int is_unicast,
  525. u_int32_t *michael_key, u_int32_t *rx_pn);
  526. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  527. uint8_t tid, uint16_t win_sz);
  528. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  529. uint16_t peer_id, uint8_t *peer_mac);
  530. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  531. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  532. uint32_t flags);
  533. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  534. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  535. struct dp_ast_entry *ast_entry);
  536. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  537. struct dp_ast_entry *ast_entry, uint32_t flags);
  538. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  539. uint8_t *ast_mac_addr,
  540. uint8_t pdev_id);
  541. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  542. uint8_t *ast_mac_addr,
  543. uint8_t vdev_id);
  544. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  545. uint8_t *ast_mac_addr);
  546. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  547. struct dp_ast_entry *ast_entry);
  548. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  549. struct dp_ast_entry *ast_entry);
  550. void dp_peer_ast_set_type(struct dp_soc *soc,
  551. struct dp_ast_entry *ast_entry,
  552. enum cdp_txrx_ast_entry_type type);
  553. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  554. struct dp_ast_entry *ast_entry,
  555. struct dp_peer *peer);
  556. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  557. void dp_peer_ast_send_multi_wds_del(
  558. struct dp_soc *soc, uint8_t vdev_id,
  559. struct peer_del_multi_wds_entries *wds_list);
  560. #endif
  561. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  562. struct cdp_soc *dp_soc,
  563. void *cookie,
  564. enum cdp_ast_free_status status);
  565. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  566. struct dp_ast_entry *ase);
  567. void dp_peer_free_ast_entry(struct dp_soc *soc,
  568. struct dp_ast_entry *ast_entry);
  569. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  570. struct dp_ast_entry *ast_entry,
  571. struct dp_peer *peer);
  572. /**
  573. * dp_peer_mec_detach_entry() - Detach the MEC entry
  574. * @soc: SoC handle
  575. * @mecentry: MEC entry of the node
  576. * @ptr: pointer to free list
  577. *
  578. * The MEC entry is detached from MEC table and added to free_list
  579. * to free the object outside lock
  580. *
  581. * Return: None
  582. */
  583. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  584. void *ptr);
  585. /**
  586. * dp_peer_mec_free_list() - free the MEC entry from free_list
  587. * @soc: SoC handle
  588. * @ptr: pointer to free list
  589. *
  590. * Return: None
  591. */
  592. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  593. /**
  594. * dp_peer_mec_add_entry()
  595. * @soc: SoC handle
  596. * @vdev: vdev to which mec node belongs
  597. * @mac_addr: MAC address of mec node
  598. *
  599. * This function allocates and adds MEC entry to MEC table.
  600. * It assumes caller has taken the mec lock to protect the access to these
  601. * tables
  602. *
  603. * Return: QDF_STATUS
  604. */
  605. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  606. struct dp_vdev *vdev,
  607. uint8_t *mac_addr);
  608. /**
  609. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  610. * within pdev
  611. * @soc: SoC handle
  612. *
  613. * It assumes caller has taken the mec_lock to protect the access to
  614. * MEC hash table
  615. *
  616. * Return: MEC entry
  617. */
  618. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  619. uint8_t pdev_id,
  620. uint8_t *mec_mac_addr);
  621. #define DP_AST_ASSERT(_condition) \
  622. do { \
  623. if (!(_condition)) { \
  624. dp_print_ast_stats(soc);\
  625. QDF_BUG(_condition); \
  626. } \
  627. } while (0)
  628. /**
  629. * dp_peer_update_inactive_time - Update inactive time for peer
  630. * @pdev: pdev object
  631. * @tag_type: htt_tlv_tag type
  632. * #tag_buf: buf message
  633. */
  634. void
  635. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  636. uint32_t *tag_buf);
  637. #ifndef QCA_MULTIPASS_SUPPORT
  638. /**
  639. * dp_peer_set_vlan_id: set vlan_id for this peer
  640. * @cdp_soc: soc handle
  641. * @vdev_id: id of vdev object
  642. * @peer_mac: mac address
  643. * @vlan_id: vlan id for peer
  644. *
  645. * return: void
  646. */
  647. static inline
  648. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  649. uint8_t vdev_id, uint8_t *peer_mac,
  650. uint16_t vlan_id)
  651. {
  652. }
  653. /**
  654. * dp_set_vlan_groupkey: set vlan map for vdev
  655. * @soc: pointer to soc
  656. * @vdev_id: id of vdev handle
  657. * @vlan_id: vlan_id
  658. * @group_key: group key for vlan
  659. *
  660. * return: set success/failure
  661. */
  662. static inline
  663. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  664. uint16_t vlan_id, uint16_t group_key)
  665. {
  666. return QDF_STATUS_SUCCESS;
  667. }
  668. /**
  669. * dp_peer_multipass_list_init: initialize multipass peer list
  670. * @vdev: pointer to vdev
  671. *
  672. * return: void
  673. */
  674. static inline
  675. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  676. {
  677. }
  678. /**
  679. * dp_peer_multipass_list_remove: remove peer from special peer list
  680. * @peer: peer handle
  681. *
  682. * return: void
  683. */
  684. static inline
  685. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  686. {
  687. }
  688. #else
  689. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  690. uint8_t vdev_id, uint8_t *peer_mac,
  691. uint16_t vlan_id);
  692. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  693. uint16_t vlan_id, uint16_t group_key);
  694. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  695. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  696. #endif
  697. #ifndef QCA_PEER_MULTIQ_SUPPORT
  698. /**
  699. * dp_peer_reset_flowq_map() - reset peer flowq map table
  700. * @peer - dp peer handle
  701. *
  702. * Return: none
  703. */
  704. static inline
  705. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  706. {
  707. }
  708. /**
  709. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  710. * @soc - genereic soc handle
  711. * @is_wds - flag to indicate if peer is wds
  712. * @peer_id - peer_id from htt peer map message
  713. * @peer_mac_addr - mac address of the peer
  714. * @ast_info - ast flow override information from peer map
  715. *
  716. * Return: none
  717. */
  718. static inline
  719. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  720. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  721. struct dp_ast_flow_override_info *ast_info)
  722. {
  723. }
  724. #else
  725. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  726. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  727. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  728. struct dp_ast_flow_override_info *ast_info);
  729. #endif
  730. /*
  731. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  732. * after deleting the entries (ie., setting valid=0)
  733. *
  734. * @soc: DP SOC handle
  735. * @cb_ctxt: Callback context
  736. * @reo_status: REO command status
  737. */
  738. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  739. void *cb_ctxt,
  740. union hal_reo_status *reo_status);
  741. #ifdef QCA_PEER_EXT_STATS
  742. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  743. struct dp_txrx_peer *txrx_peer);
  744. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  745. struct dp_txrx_peer *txrx_peer);
  746. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  747. #else
  748. static inline
  749. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  750. struct dp_txrx_peer *txrx_peer)
  751. {
  752. return QDF_STATUS_SUCCESS;
  753. }
  754. static inline
  755. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  756. struct dp_txrx_peer *txrx_peer)
  757. {
  758. }
  759. static inline
  760. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  761. {
  762. }
  763. #endif
  764. #ifdef WLAN_PEER_JITTER
  765. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  766. struct dp_txrx_peer *txrx_peer);
  767. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  768. struct dp_txrx_peer *txrx_peer);
  769. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  770. #else
  771. static inline
  772. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  773. struct dp_txrx_peer *txrx_peer)
  774. {
  775. return QDF_STATUS_SUCCESS;
  776. }
  777. static inline
  778. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  779. struct dp_txrx_peer *txrx_peer)
  780. {
  781. }
  782. static inline
  783. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  784. {
  785. }
  786. #endif
  787. #ifndef CONFIG_SAWF_DEF_QUEUES
  788. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  789. struct dp_peer *peer)
  790. {
  791. return QDF_STATUS_SUCCESS;
  792. }
  793. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  794. struct dp_peer *peer)
  795. {
  796. return QDF_STATUS_SUCCESS;
  797. }
  798. #endif
  799. #ifndef CONFIG_SAWF
  800. static inline
  801. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  802. struct dp_txrx_peer *txrx_peer)
  803. {
  804. return QDF_STATUS_SUCCESS;
  805. }
  806. static inline
  807. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  808. struct dp_txrx_peer *txrx_peer)
  809. {
  810. return QDF_STATUS_SUCCESS;
  811. }
  812. #endif
  813. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  814. struct dp_vdev *vdev,
  815. enum dp_mod_id mod_id);
  816. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  817. struct dp_vdev *vdev,
  818. enum dp_mod_id mod_id);
  819. void dp_peer_ast_table_detach(struct dp_soc *soc);
  820. void dp_peer_find_map_detach(struct dp_soc *soc);
  821. void dp_soc_wds_detach(struct dp_soc *soc);
  822. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  823. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  824. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  825. void dp_soc_wds_attach(struct dp_soc *soc);
  826. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  827. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  828. #ifdef FEATURE_AST
  829. /*
  830. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  831. * @soc - datapath soc handle
  832. * @peer - datapath peer handle
  833. *
  834. * Delete the AST entries belonging to a peer
  835. */
  836. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  837. struct dp_peer *peer)
  838. {
  839. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  840. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  841. /*
  842. * Delete peer self ast entry. This is done to handle scenarios
  843. * where peer is freed before peer map is received(for ex in case
  844. * of auth disallow due to ACL) in such cases self ast is not added
  845. * to peer->ast_list.
  846. */
  847. if (peer->self_ast_entry) {
  848. dp_peer_del_ast(soc, peer->self_ast_entry);
  849. peer->self_ast_entry = NULL;
  850. }
  851. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  852. dp_peer_del_ast(soc, ast_entry);
  853. }
  854. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  855. void *arg);
  856. #else
  857. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  858. struct dp_peer *peer, void *arg)
  859. {
  860. }
  861. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  862. struct dp_peer *peer)
  863. {
  864. }
  865. #endif
  866. #ifdef FEATURE_MEC
  867. /**
  868. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  869. * @soc: SoC handle
  870. *
  871. * Return: none
  872. */
  873. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  874. /**
  875. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  876. * @soc: SoC handle
  877. *
  878. * Return: none
  879. */
  880. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  881. /**
  882. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  883. * @soc: Datapath SOC
  884. *
  885. * Return: None
  886. */
  887. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  888. #else
  889. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  890. {
  891. }
  892. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  893. {
  894. }
  895. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  896. {
  897. }
  898. #endif
  899. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  900. /**
  901. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  902. * @soc : dp_soc handle
  903. * @peer: peer
  904. *
  905. * This function is used to send cache flush cmd to reo and
  906. * to register the callback to handle the dumping of the reo
  907. * queue stas from DDR
  908. *
  909. * Return: none
  910. */
  911. void dp_send_cache_flush_for_rx_tid(
  912. struct dp_soc *soc, struct dp_peer *peer);
  913. /**
  914. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  915. * @soc : cdp_soc_t handle
  916. * @vdev_id: vdev id
  917. *
  918. * Handler to get rx tid info from DDR after h/w cache is
  919. * invalidated first using the cache flush cmd.
  920. *
  921. * Return: none
  922. */
  923. void dp_get_rx_reo_queue_info(
  924. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  925. /**
  926. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  927. * @soc : dp_soc handle
  928. * @cb_ctxt - callback context
  929. * @reo_status: vdev id
  930. *
  931. * This is the callback function registered after sending the reo cmd
  932. * to flush the h/w cache and invalidate it. In the callback the reo
  933. * queue desc info is dumped from DDR.
  934. *
  935. * Return: none
  936. */
  937. void dp_dump_rx_reo_queue_info(
  938. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  939. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  940. static inline void dp_get_rx_reo_queue_info(
  941. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  942. {
  943. }
  944. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  945. static inline int dp_peer_find_mac_addr_cmp(
  946. union dp_align_mac_addr *mac_addr1,
  947. union dp_align_mac_addr *mac_addr2)
  948. {
  949. /*
  950. * Intentionally use & rather than &&.
  951. * because the operands are binary rather than generic boolean,
  952. * the functionality is equivalent.
  953. * Using && has the advantage of short-circuited evaluation,
  954. * but using & has the advantage of no conditional branching,
  955. * which is a more significant benefit.
  956. */
  957. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  958. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  959. }
  960. /**
  961. * dp_peer_delete() - delete DP peer
  962. *
  963. * @soc: Datatpath soc
  964. * @peer: Datapath peer
  965. * @arg: argument to iter function
  966. *
  967. * Return: void
  968. */
  969. void dp_peer_delete(struct dp_soc *soc,
  970. struct dp_peer *peer,
  971. void *arg);
  972. /**
  973. * dp_mlo_peer_delete() - delete MLO DP peer
  974. *
  975. * @soc: Datapath soc
  976. * @peer: Datapath peer
  977. * @arg: argument to iter function
  978. *
  979. * Return: void
  980. */
  981. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  982. #ifdef WLAN_FEATURE_11BE_MLO
  983. /* is MLO connection mld peer */
  984. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  985. /* set peer type */
  986. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  987. ((_peer)->peer_type = (_type_val))
  988. /* is legacy peer */
  989. #define IS_DP_LEGACY_PEER(_peer) \
  990. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  991. /* is MLO connection link peer */
  992. #define IS_MLO_DP_LINK_PEER(_peer) \
  993. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  994. /* is MLO connection mld peer */
  995. #define IS_MLO_DP_MLD_PEER(_peer) \
  996. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  997. /* Get Mld peer from link peer */
  998. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  999. ((link_peer)->mld_peer)
  1000. #ifdef WLAN_MLO_MULTI_CHIP
  1001. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1002. struct dp_peer *
  1003. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1004. uint8_t *peer_mac_addr,
  1005. int mac_addr_is_aligned,
  1006. uint8_t vdev_id,
  1007. uint8_t chip_id,
  1008. enum dp_mod_id mod_id);
  1009. #else
  1010. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1011. {
  1012. return 0;
  1013. }
  1014. static inline struct dp_peer *
  1015. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1016. uint8_t *peer_mac_addr,
  1017. int mac_addr_is_aligned,
  1018. uint8_t vdev_id,
  1019. uint8_t chip_id,
  1020. enum dp_mod_id mod_id)
  1021. {
  1022. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1023. mac_addr_is_aligned,
  1024. vdev_id, mod_id);
  1025. }
  1026. #endif
  1027. /*
  1028. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1029. * matching mac_address
  1030. * @soc: soc handle
  1031. * @peer_mac_addr: mld peer mac address
  1032. * @mac_addr_is_aligned: is mac addr alligned
  1033. * @vdev_id: vdev_id
  1034. * @mod_id: id of module requesting reference
  1035. *
  1036. * return: peer in sucsess
  1037. * NULL in failure
  1038. */
  1039. static inline
  1040. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1041. uint8_t *peer_mac_addr,
  1042. int mac_addr_is_aligned,
  1043. uint8_t vdev_id,
  1044. enum dp_mod_id mod_id)
  1045. {
  1046. if (soc->arch_ops.mlo_peer_find_hash_find)
  1047. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1048. peer_mac_addr,
  1049. mac_addr_is_aligned,
  1050. mod_id, vdev_id);
  1051. return NULL;
  1052. }
  1053. /**
  1054. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1055. peer_type
  1056. * @soc: DP SOC handle
  1057. * @peer_info: peer information for hash find
  1058. * @mod_id: ID of module requesting reference
  1059. *
  1060. * Return: peer hanlde
  1061. */
  1062. static inline
  1063. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1064. struct cdp_peer_info *peer_info,
  1065. enum dp_mod_id mod_id)
  1066. {
  1067. struct dp_peer *peer = NULL;
  1068. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1069. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1070. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1071. peer_info->mac_addr_is_aligned,
  1072. peer_info->vdev_id,
  1073. mod_id);
  1074. if (peer)
  1075. return peer;
  1076. }
  1077. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1078. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1079. peer = dp_mld_peer_find_hash_find(
  1080. soc, peer_info->mac_addr,
  1081. peer_info->mac_addr_is_aligned,
  1082. peer_info->vdev_id,
  1083. mod_id);
  1084. return peer;
  1085. }
  1086. /**
  1087. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1088. increase mld peer ref_cnt
  1089. * @link_peer: link peer pointer
  1090. * @mld_peer: mld peer pointer
  1091. *
  1092. * Return: none
  1093. */
  1094. static inline
  1095. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1096. struct dp_peer *mld_peer)
  1097. {
  1098. /* increase mld_peer ref_cnt */
  1099. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1100. link_peer->mld_peer = mld_peer;
  1101. }
  1102. /**
  1103. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1104. decrease mld peer ref_cnt
  1105. * @link_peer: link peer pointer
  1106. *
  1107. * Return: None
  1108. */
  1109. static inline
  1110. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1111. {
  1112. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1113. link_peer->mld_peer = NULL;
  1114. }
  1115. /**
  1116. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1117. * @mld_peer: mld peer pointer
  1118. *
  1119. * Return: None
  1120. */
  1121. static inline
  1122. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1123. {
  1124. int i;
  1125. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1126. mld_peer->num_links = 0;
  1127. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1128. mld_peer->link_peers[i].is_valid = false;
  1129. }
  1130. /**
  1131. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1132. * @mld_peer: mld peer pointer
  1133. *
  1134. * Return: None
  1135. */
  1136. static inline
  1137. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1138. {
  1139. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1140. }
  1141. /**
  1142. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1143. * @mld_peer: mld dp peer pointer
  1144. * @link_peer: link dp peer pointer
  1145. *
  1146. * Return: None
  1147. */
  1148. static inline
  1149. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1150. struct dp_peer *link_peer)
  1151. {
  1152. int i;
  1153. struct dp_peer_link_info *link_peer_info;
  1154. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1155. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1156. link_peer_info = &mld_peer->link_peers[i];
  1157. if (!link_peer_info->is_valid) {
  1158. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1159. link_peer->mac_addr.raw,
  1160. QDF_MAC_ADDR_SIZE);
  1161. link_peer_info->is_valid = true;
  1162. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1163. link_peer_info->chip_id =
  1164. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1165. mld_peer->num_links++;
  1166. break;
  1167. }
  1168. }
  1169. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1170. if (i == DP_MAX_MLO_LINKS)
  1171. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1172. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1173. }
  1174. /**
  1175. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1176. * @mld_peer: MLD dp peer pointer
  1177. * @link_peer: link dp peer pointer
  1178. *
  1179. * Return: number of links left after deletion
  1180. */
  1181. static inline
  1182. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1183. struct dp_peer *link_peer)
  1184. {
  1185. int i;
  1186. struct dp_peer_link_info *link_peer_info;
  1187. uint8_t num_links;
  1188. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1189. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1190. link_peer_info = &mld_peer->link_peers[i];
  1191. if (link_peer_info->is_valid &&
  1192. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1193. &link_peer_info->mac_addr)) {
  1194. link_peer_info->is_valid = false;
  1195. mld_peer->num_links--;
  1196. break;
  1197. }
  1198. }
  1199. num_links = mld_peer->num_links;
  1200. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1201. if (i == DP_MAX_MLO_LINKS)
  1202. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1203. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1204. return num_links;
  1205. }
  1206. /**
  1207. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1208. increase link peers ref_cnt
  1209. * @soc: dp_soc handle
  1210. * @mld_peer: dp mld peer pointer
  1211. * @mld_link_peers: structure that hold links peers ponter array and number
  1212. * @mod_id: id of module requesting reference
  1213. *
  1214. * Return: None
  1215. */
  1216. static inline
  1217. void dp_get_link_peers_ref_from_mld_peer(
  1218. struct dp_soc *soc,
  1219. struct dp_peer *mld_peer,
  1220. struct dp_mld_link_peers *mld_link_peers,
  1221. enum dp_mod_id mod_id)
  1222. {
  1223. struct dp_peer *peer;
  1224. uint8_t i = 0, j = 0;
  1225. struct dp_peer_link_info *link_peer_info;
  1226. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1227. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1228. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1229. link_peer_info = &mld_peer->link_peers[i];
  1230. if (link_peer_info->is_valid) {
  1231. peer = dp_link_peer_hash_find_by_chip_id(
  1232. soc,
  1233. link_peer_info->mac_addr.raw,
  1234. true,
  1235. link_peer_info->vdev_id,
  1236. link_peer_info->chip_id,
  1237. mod_id);
  1238. if (peer)
  1239. mld_link_peers->link_peers[j++] = peer;
  1240. }
  1241. }
  1242. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1243. mld_link_peers->num_links = j;
  1244. }
  1245. /**
  1246. * dp_release_link_peers_ref() - release all link peers reference
  1247. * @mld_link_peers: structure that hold links peers ponter array and number
  1248. * @mod_id: id of module requesting reference
  1249. *
  1250. * Return: None.
  1251. */
  1252. static inline
  1253. void dp_release_link_peers_ref(
  1254. struct dp_mld_link_peers *mld_link_peers,
  1255. enum dp_mod_id mod_id)
  1256. {
  1257. struct dp_peer *peer;
  1258. uint8_t i;
  1259. for (i = 0; i < mld_link_peers->num_links; i++) {
  1260. peer = mld_link_peers->link_peers[i];
  1261. if (peer)
  1262. dp_peer_unref_delete(peer, mod_id);
  1263. mld_link_peers->link_peers[i] = NULL;
  1264. }
  1265. mld_link_peers->num_links = 0;
  1266. }
  1267. /**
  1268. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1269. * @soc: Datapath soc handle
  1270. * @peer_id: peer id
  1271. * @lmac_id: lmac id to find the link peer on given lmac
  1272. *
  1273. * Return: peer_id of link peer if found
  1274. * else return HTT_INVALID_PEER
  1275. */
  1276. static inline
  1277. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1278. uint8_t lmac_id)
  1279. {
  1280. uint8_t i;
  1281. struct dp_peer *peer;
  1282. struct dp_peer *link_peer;
  1283. struct dp_soc *link_peer_soc;
  1284. struct dp_mld_link_peers link_peers_info;
  1285. uint16_t link_peer_id = HTT_INVALID_PEER;
  1286. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1287. if (!peer)
  1288. return HTT_INVALID_PEER;
  1289. if (IS_MLO_DP_MLD_PEER(peer)) {
  1290. /* get link peers with reference */
  1291. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1292. DP_MOD_ID_CDP);
  1293. for (i = 0; i < link_peers_info.num_links; i++) {
  1294. link_peer = link_peers_info.link_peers[i];
  1295. link_peer_soc = link_peer->vdev->pdev->soc;
  1296. if ((link_peer_soc == soc) &&
  1297. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1298. link_peer_id = link_peer->peer_id;
  1299. break;
  1300. }
  1301. }
  1302. /* release link peers reference */
  1303. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1304. } else {
  1305. link_peer_id = peer_id;
  1306. }
  1307. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1308. return link_peer_id;
  1309. }
  1310. /**
  1311. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1312. * @soc: soc handle
  1313. * @peer_mac_addr: peer mac address
  1314. * @mac_addr_is_aligned: is mac addr alligned
  1315. * @vdev_id: vdev_id
  1316. * @mod_id: id of module requesting reference
  1317. *
  1318. * for MLO connection, get corresponding MLD peer,
  1319. * otherwise get link peer for non-MLO case.
  1320. *
  1321. * return: peer in success
  1322. * NULL in failure
  1323. */
  1324. static inline
  1325. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1326. uint8_t *peer_mac,
  1327. int mac_addr_is_aligned,
  1328. uint8_t vdev_id,
  1329. enum dp_mod_id mod_id)
  1330. {
  1331. struct dp_peer *ta_peer = NULL;
  1332. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1333. peer_mac, 0, vdev_id,
  1334. mod_id);
  1335. if (peer) {
  1336. /* mlo connection link peer, get mld peer with reference */
  1337. if (IS_MLO_DP_LINK_PEER(peer)) {
  1338. /* increase mld peer ref_cnt */
  1339. if (QDF_STATUS_SUCCESS ==
  1340. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1341. ta_peer = peer->mld_peer;
  1342. else
  1343. ta_peer = NULL;
  1344. /* relese peer reference that added by hash find */
  1345. dp_peer_unref_delete(peer, mod_id);
  1346. } else {
  1347. /* mlo MLD peer or non-mlo link peer */
  1348. ta_peer = peer;
  1349. }
  1350. } else {
  1351. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1352. QDF_MAC_ADDR_REF(peer_mac));
  1353. }
  1354. return ta_peer;
  1355. }
  1356. /**
  1357. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1358. * @soc : core DP soc context
  1359. * @peer_id : peer id from peer object can be retrieved
  1360. * @mod_id : ID ot module requesting reference
  1361. *
  1362. * for MLO connection, get corresponding MLD peer,
  1363. * otherwise get link peer for non-MLO case.
  1364. *
  1365. * return: peer in success
  1366. * NULL in failure
  1367. */
  1368. static inline
  1369. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1370. uint16_t peer_id,
  1371. enum dp_mod_id mod_id)
  1372. {
  1373. struct dp_peer *ta_peer = NULL;
  1374. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1375. if (peer) {
  1376. /* mlo connection link peer, get mld peer with reference */
  1377. if (IS_MLO_DP_LINK_PEER(peer)) {
  1378. /* increase mld peer ref_cnt */
  1379. if (QDF_STATUS_SUCCESS ==
  1380. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1381. ta_peer = peer->mld_peer;
  1382. else
  1383. ta_peer = NULL;
  1384. /* relese peer reference that added by hash find */
  1385. dp_peer_unref_delete(peer, mod_id);
  1386. } else {
  1387. /* mlo MLD peer or non-mlo link peer */
  1388. ta_peer = peer;
  1389. }
  1390. }
  1391. return ta_peer;
  1392. }
  1393. /**
  1394. * dp_peer_mlo_delete() - peer MLO related delete operation
  1395. * @peer: DP peer handle
  1396. * Return: None
  1397. */
  1398. static inline
  1399. void dp_peer_mlo_delete(struct dp_peer *peer)
  1400. {
  1401. struct dp_peer *ml_peer;
  1402. struct dp_soc *soc;
  1403. /* MLO connection link peer */
  1404. if (IS_MLO_DP_LINK_PEER(peer)) {
  1405. ml_peer = peer->mld_peer;
  1406. soc = ml_peer->vdev->pdev->soc;
  1407. /* if last link peer deletion, delete MLD peer */
  1408. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1409. dp_peer_delete(soc, peer->mld_peer, NULL);
  1410. }
  1411. }
  1412. /**
  1413. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1414. * @soc: Soc handle
  1415. * @vdev_id: Vdev ID
  1416. * @peer_setup_info: peer setup information for MLO
  1417. */
  1418. QDF_STATUS dp_peer_mlo_setup(
  1419. struct dp_soc *soc,
  1420. struct dp_peer *peer,
  1421. uint8_t vdev_id,
  1422. struct cdp_peer_setup_info *setup_info);
  1423. /**
  1424. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1425. * @peer: datapath peer
  1426. *
  1427. * Return: MLD peer in case of MLO Link peer
  1428. * Peer itself in other cases
  1429. */
  1430. static inline
  1431. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1432. {
  1433. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1434. }
  1435. /**
  1436. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1437. * peer id
  1438. * @soc: core DP soc context
  1439. * @peer_id: peer id
  1440. * @mod_id: ID of module requesting reference
  1441. *
  1442. * Return: primary link peer for the MLO peer
  1443. * legacy peer itself in case of legacy peer
  1444. */
  1445. static inline
  1446. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1447. uint16_t peer_id,
  1448. enum dp_mod_id mod_id)
  1449. {
  1450. uint8_t i;
  1451. struct dp_mld_link_peers link_peers_info;
  1452. struct dp_peer *peer;
  1453. struct dp_peer *link_peer;
  1454. struct dp_peer *primary_peer = NULL;
  1455. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1456. if (!peer)
  1457. return NULL;
  1458. if (IS_MLO_DP_MLD_PEER(peer)) {
  1459. /* get link peers with reference */
  1460. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1461. mod_id);
  1462. for (i = 0; i < link_peers_info.num_links; i++) {
  1463. link_peer = link_peers_info.link_peers[i];
  1464. if (link_peer->primary_link) {
  1465. primary_peer = link_peer;
  1466. /*
  1467. * Take additional reference over
  1468. * primary link peer.
  1469. */
  1470. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1471. break;
  1472. }
  1473. }
  1474. /* release link peers reference */
  1475. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1476. dp_peer_unref_delete(peer, mod_id);
  1477. } else {
  1478. primary_peer = peer;
  1479. }
  1480. return primary_peer;
  1481. }
  1482. /**
  1483. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1484. * @peer: Datapath peer
  1485. *
  1486. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1487. * dp_txrx_peer from peer itself for other cases
  1488. */
  1489. static inline
  1490. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1491. {
  1492. return IS_MLO_DP_LINK_PEER(peer) ?
  1493. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1494. }
  1495. /**
  1496. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1497. * @peer: Datapath peer
  1498. *
  1499. * Return: true if peer is primary link peer or legacy peer
  1500. * false otherwise
  1501. */
  1502. static inline
  1503. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1504. {
  1505. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1506. return true;
  1507. else if (IS_DP_LEGACY_PEER(peer))
  1508. return true;
  1509. else
  1510. return false;
  1511. }
  1512. /**
  1513. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1514. *
  1515. * @soc : core DP soc context
  1516. * @peer_id : peer id from peer object can be retrieved
  1517. * @handle : reference handle
  1518. * @mod_id : ID ot module requesting reference
  1519. *
  1520. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1521. */
  1522. static inline struct dp_txrx_peer *
  1523. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1524. uint16_t peer_id,
  1525. dp_txrx_ref_handle *handle,
  1526. enum dp_mod_id mod_id)
  1527. {
  1528. struct dp_peer *peer;
  1529. struct dp_txrx_peer *txrx_peer;
  1530. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1531. if (!peer)
  1532. return NULL;
  1533. txrx_peer = dp_get_txrx_peer(peer);
  1534. if (txrx_peer) {
  1535. *handle = (dp_txrx_ref_handle)peer;
  1536. return txrx_peer;
  1537. }
  1538. dp_peer_unref_delete(peer, mod_id);
  1539. return NULL;
  1540. }
  1541. /**
  1542. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1543. *
  1544. * @soc : core DP soc context
  1545. *
  1546. * Return: void
  1547. */
  1548. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1549. #else
  1550. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1551. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1552. /* is legacy peer */
  1553. #define IS_DP_LEGACY_PEER(_peer) true
  1554. #define IS_MLO_DP_LINK_PEER(_peer) false
  1555. #define IS_MLO_DP_MLD_PEER(_peer) false
  1556. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1557. static inline
  1558. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1559. struct cdp_peer_info *peer_info,
  1560. enum dp_mod_id mod_id)
  1561. {
  1562. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1563. peer_info->mac_addr_is_aligned,
  1564. peer_info->vdev_id,
  1565. mod_id);
  1566. }
  1567. static inline
  1568. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1569. uint8_t *peer_mac,
  1570. int mac_addr_is_aligned,
  1571. uint8_t vdev_id,
  1572. enum dp_mod_id mod_id)
  1573. {
  1574. return dp_peer_find_hash_find(soc, peer_mac,
  1575. mac_addr_is_aligned, vdev_id,
  1576. mod_id);
  1577. }
  1578. static inline
  1579. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1580. uint16_t peer_id,
  1581. enum dp_mod_id mod_id)
  1582. {
  1583. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1584. }
  1585. static inline
  1586. QDF_STATUS dp_peer_mlo_setup(
  1587. struct dp_soc *soc,
  1588. struct dp_peer *peer,
  1589. uint8_t vdev_id,
  1590. struct cdp_peer_setup_info *setup_info)
  1591. {
  1592. return QDF_STATUS_SUCCESS;
  1593. }
  1594. static inline
  1595. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1596. {
  1597. }
  1598. static inline
  1599. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1600. {
  1601. }
  1602. static inline
  1603. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1604. {
  1605. }
  1606. static inline
  1607. void dp_peer_mlo_delete(struct dp_peer *peer)
  1608. {
  1609. }
  1610. static inline
  1611. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1612. struct dp_peer *link_peer)
  1613. {
  1614. }
  1615. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1616. {
  1617. return 0;
  1618. }
  1619. static inline struct dp_peer *
  1620. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1621. uint8_t *peer_mac_addr,
  1622. int mac_addr_is_aligned,
  1623. uint8_t vdev_id,
  1624. uint8_t chip_id,
  1625. enum dp_mod_id mod_id)
  1626. {
  1627. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1628. mac_addr_is_aligned,
  1629. vdev_id, mod_id);
  1630. }
  1631. static inline
  1632. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1633. {
  1634. return peer;
  1635. }
  1636. static inline
  1637. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1638. uint16_t peer_id,
  1639. enum dp_mod_id mod_id)
  1640. {
  1641. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1642. }
  1643. static inline
  1644. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1645. {
  1646. return peer->txrx_peer;
  1647. }
  1648. static inline
  1649. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1650. {
  1651. return true;
  1652. }
  1653. /**
  1654. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1655. *
  1656. * @soc : core DP soc context
  1657. * @peer_id : peer id from peer object can be retrieved
  1658. * @handle : reference handle
  1659. * @mod_id : ID ot module requesting reference
  1660. *
  1661. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1662. */
  1663. static inline struct dp_txrx_peer *
  1664. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1665. uint16_t peer_id,
  1666. dp_txrx_ref_handle *handle,
  1667. enum dp_mod_id mod_id)
  1668. {
  1669. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1670. }
  1671. static inline
  1672. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1673. uint8_t lmac_id)
  1674. {
  1675. return peer_id;
  1676. }
  1677. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  1678. {
  1679. }
  1680. #endif /* WLAN_FEATURE_11BE_MLO */
  1681. static inline
  1682. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1683. {
  1684. uint8_t i;
  1685. if (IS_MLO_DP_MLD_PEER(peer)) {
  1686. dp_peer_info("skip for mld peer");
  1687. return QDF_STATUS_SUCCESS;
  1688. }
  1689. if (peer->rx_tid) {
  1690. QDF_BUG(0);
  1691. dp_peer_err("peer rx_tid mem already exist");
  1692. return QDF_STATUS_E_FAILURE;
  1693. }
  1694. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1695. sizeof(struct dp_rx_tid));
  1696. if (!peer->rx_tid) {
  1697. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1698. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1699. return QDF_STATUS_E_NOMEM;
  1700. }
  1701. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1702. for (i = 0; i < DP_MAX_TIDS; i++)
  1703. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1704. return QDF_STATUS_SUCCESS;
  1705. }
  1706. static inline
  1707. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1708. {
  1709. uint8_t i;
  1710. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1711. for (i = 0; i < DP_MAX_TIDS; i++)
  1712. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1713. qdf_mem_free(peer->rx_tid);
  1714. }
  1715. peer->rx_tid = NULL;
  1716. }
  1717. static inline
  1718. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1719. {
  1720. uint8_t i;
  1721. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1722. sizeof(struct dp_rx_tid_defrag));
  1723. for (i = 0; i < DP_MAX_TIDS; i++)
  1724. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1725. }
  1726. static inline
  1727. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1728. {
  1729. uint8_t i;
  1730. for (i = 0; i < DP_MAX_TIDS; i++)
  1731. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1732. }
  1733. #ifdef PEER_CACHE_RX_PKTS
  1734. static inline
  1735. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1736. {
  1737. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  1738. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  1739. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  1740. DP_RX_CACHED_BUFQ_THRESH);
  1741. }
  1742. static inline
  1743. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1744. {
  1745. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  1746. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  1747. }
  1748. #else
  1749. static inline
  1750. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1751. {
  1752. }
  1753. static inline
  1754. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1755. {
  1756. }
  1757. #endif
  1758. /**
  1759. * dp_peer_update_state() - update dp peer state
  1760. *
  1761. * @soc : core DP soc context
  1762. * @peer : DP peer
  1763. * @state : new state
  1764. *
  1765. * Return: None
  1766. */
  1767. static inline void
  1768. dp_peer_update_state(struct dp_soc *soc,
  1769. struct dp_peer *peer,
  1770. enum dp_peer_state state)
  1771. {
  1772. uint8_t peer_state;
  1773. qdf_spin_lock_bh(&peer->peer_state_lock);
  1774. peer_state = peer->peer_state;
  1775. switch (state) {
  1776. case DP_PEER_STATE_INIT:
  1777. DP_PEER_STATE_ASSERT
  1778. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  1779. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  1780. break;
  1781. case DP_PEER_STATE_ACTIVE:
  1782. DP_PEER_STATE_ASSERT(peer, state,
  1783. (peer_state == DP_PEER_STATE_INIT));
  1784. break;
  1785. case DP_PEER_STATE_LOGICAL_DELETE:
  1786. DP_PEER_STATE_ASSERT(peer, state,
  1787. (peer_state == DP_PEER_STATE_ACTIVE) ||
  1788. (peer_state == DP_PEER_STATE_INIT));
  1789. break;
  1790. case DP_PEER_STATE_INACTIVE:
  1791. if (IS_MLO_DP_MLD_PEER(peer))
  1792. DP_PEER_STATE_ASSERT
  1793. (peer, state,
  1794. (peer_state == DP_PEER_STATE_ACTIVE));
  1795. else
  1796. DP_PEER_STATE_ASSERT
  1797. (peer, state,
  1798. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  1799. break;
  1800. case DP_PEER_STATE_FREED:
  1801. if (peer->sta_self_peer)
  1802. DP_PEER_STATE_ASSERT
  1803. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  1804. else
  1805. DP_PEER_STATE_ASSERT
  1806. (peer, state,
  1807. (peer_state == DP_PEER_STATE_INACTIVE) ||
  1808. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  1809. break;
  1810. default:
  1811. qdf_spin_unlock_bh(&peer->peer_state_lock);
  1812. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  1813. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1814. return;
  1815. }
  1816. peer->peer_state = state;
  1817. qdf_spin_unlock_bh(&peer->peer_state_lock);
  1818. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  1819. peer_state, state,
  1820. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1821. }
  1822. #ifdef REO_SHARED_QREF_TABLE_EN
  1823. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1824. struct dp_peer *peer);
  1825. #else
  1826. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1827. struct dp_peer *peer) {}
  1828. #endif
  1829. #endif /* _DP_PEER_H_ */