dp_peer.h 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  25. #include "hal_reo.h"
  26. #endif
  27. #define DP_INVALID_PEER_ID 0xffff
  28. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  29. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  30. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  31. #define DP_PEER_HASH_LOAD_MULT 2
  32. #define DP_PEER_HASH_LOAD_SHIFT 0
  33. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  34. #define DP_RX_CACHED_BUFQ_THRESH 64
  35. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  36. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_info(params...) \
  39. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  40. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  41. #ifdef REO_QDESC_HISTORY
  42. enum reo_qdesc_event_type {
  43. REO_QDESC_UPDATE_CB = 0,
  44. REO_QDESC_FREE,
  45. };
  46. struct reo_qdesc_event {
  47. qdf_dma_addr_t qdesc_addr;
  48. uint64_t ts;
  49. enum reo_qdesc_event_type type;
  50. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  51. };
  52. #endif
  53. struct ast_del_ctxt {
  54. bool age;
  55. int del_count;
  56. };
  57. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  58. void *arg);
  59. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  60. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  61. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  62. uint8_t *peer_mac_addr,
  63. int mac_addr_is_aligned,
  64. uint8_t vdev_id,
  65. enum dp_mod_id id);
  66. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  67. /**
  68. * dp_peer_get_ref() - Returns peer object given the peer id
  69. *
  70. * @soc : core DP soc context
  71. * @peer : DP peer
  72. * @mod_id : id of module requesting the reference
  73. *
  74. * Return: QDF_STATUS_SUCCESS if reference held successfully
  75. * else QDF_STATUS_E_INVAL
  76. */
  77. static inline
  78. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  79. struct dp_peer *peer,
  80. enum dp_mod_id mod_id)
  81. {
  82. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  83. return QDF_STATUS_E_INVAL;
  84. if (mod_id > DP_MOD_ID_RX)
  85. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  86. return QDF_STATUS_SUCCESS;
  87. }
  88. /**
  89. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  90. *
  91. * @soc : core DP soc context
  92. * @peer_id : peer id from peer object can be retrieved
  93. * @mod_id : module id
  94. *
  95. * Return: struct dp_peer*: Pointer to DP peer object
  96. */
  97. static inline struct dp_peer *
  98. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  99. uint16_t peer_id,
  100. enum dp_mod_id mod_id)
  101. {
  102. struct dp_peer *peer;
  103. qdf_spin_lock_bh(&soc->peer_map_lock);
  104. peer = (peer_id >= soc->max_peer_id) ? NULL :
  105. soc->peer_id_to_obj_map[peer_id];
  106. if (!peer ||
  107. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  108. qdf_spin_unlock_bh(&soc->peer_map_lock);
  109. return NULL;
  110. }
  111. qdf_spin_unlock_bh(&soc->peer_map_lock);
  112. return peer;
  113. }
  114. /**
  115. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  116. * if peer state is active
  117. *
  118. * @soc : core DP soc context
  119. * @peer_id : peer id from peer object can be retrieved
  120. * @mod_id : ID ot module requesting reference
  121. *
  122. * Return: struct dp_peer*: Pointer to DP peer object
  123. */
  124. static inline
  125. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  126. uint16_t peer_id,
  127. enum dp_mod_id mod_id)
  128. {
  129. struct dp_peer *peer;
  130. qdf_spin_lock_bh(&soc->peer_map_lock);
  131. peer = (peer_id >= soc->max_peer_id) ? NULL :
  132. soc->peer_id_to_obj_map[peer_id];
  133. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  134. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  135. qdf_spin_unlock_bh(&soc->peer_map_lock);
  136. return NULL;
  137. }
  138. qdf_spin_unlock_bh(&soc->peer_map_lock);
  139. return peer;
  140. }
  141. /**
  142. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  143. *
  144. * @soc : core DP soc context
  145. * @peer_id : peer id from peer object can be retrieved
  146. * @handle : reference handle
  147. * @mod_id : ID ot module requesting reference
  148. *
  149. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  150. */
  151. static inline struct dp_txrx_peer *
  152. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  153. uint16_t peer_id,
  154. dp_txrx_ref_handle *handle,
  155. enum dp_mod_id mod_id)
  156. {
  157. struct dp_peer *peer;
  158. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  159. if (!peer)
  160. return NULL;
  161. if (!peer->txrx_peer) {
  162. dp_peer_unref_delete(peer, mod_id);
  163. return NULL;
  164. }
  165. *handle = (dp_txrx_ref_handle)peer;
  166. return peer->txrx_peer;
  167. }
  168. #ifdef PEER_CACHE_RX_PKTS
  169. /**
  170. * dp_rx_flush_rx_cached() - flush cached rx frames
  171. * @peer: peer
  172. * @drop: set flag to drop frames
  173. *
  174. * Return: None
  175. */
  176. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  177. #else
  178. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  179. {
  180. }
  181. #endif
  182. static inline void
  183. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  184. {
  185. qdf_spin_lock_bh(&peer->peer_info_lock);
  186. peer->state = OL_TXRX_PEER_STATE_DISC;
  187. qdf_spin_unlock_bh(&peer->peer_info_lock);
  188. dp_rx_flush_rx_cached(peer, true);
  189. }
  190. /**
  191. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  192. *
  193. * @vdev : DP vdev context
  194. * @func : function to be called for each peer
  195. * @arg : argument need to be passed to func
  196. * @mod_id : module_id
  197. *
  198. * Return: void
  199. */
  200. static inline void
  201. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  202. enum dp_mod_id mod_id)
  203. {
  204. struct dp_peer *peer;
  205. struct dp_peer *tmp_peer;
  206. struct dp_soc *soc = NULL;
  207. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  208. return;
  209. soc = vdev->pdev->soc;
  210. qdf_spin_lock_bh(&vdev->peer_list_lock);
  211. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  212. peer_list_elem,
  213. tmp_peer) {
  214. if (dp_peer_get_ref(soc, peer, mod_id) ==
  215. QDF_STATUS_SUCCESS) {
  216. (*func)(soc, peer, arg);
  217. dp_peer_unref_delete(peer, mod_id);
  218. }
  219. }
  220. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  221. }
  222. /**
  223. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  224. *
  225. * @pdev : DP pdev context
  226. * @func : function to be called for each peer
  227. * @arg : argument need to be passed to func
  228. * @mod_id : module_id
  229. *
  230. * Return: void
  231. */
  232. static inline void
  233. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  234. enum dp_mod_id mod_id)
  235. {
  236. struct dp_vdev *vdev;
  237. if (!pdev)
  238. return;
  239. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  240. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  241. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  242. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  243. }
  244. /**
  245. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  246. *
  247. * @soc : DP soc context
  248. * @func : function to be called for each peer
  249. * @arg : argument need to be passed to func
  250. * @mod_id : module_id
  251. *
  252. * Return: void
  253. */
  254. static inline void
  255. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  256. enum dp_mod_id mod_id)
  257. {
  258. struct dp_pdev *pdev;
  259. int i;
  260. if (!soc)
  261. return;
  262. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  263. pdev = soc->pdev_list[i];
  264. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  265. }
  266. }
  267. /**
  268. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  269. *
  270. * This API will cache the peers in local allocated memory and calls
  271. * iterate function outside the lock.
  272. *
  273. * As this API is allocating new memory it is suggested to use this
  274. * only when lock cannot be held
  275. *
  276. * @vdev : DP vdev context
  277. * @func : function to be called for each peer
  278. * @arg : argument need to be passed to func
  279. * @mod_id : module_id
  280. *
  281. * Return: void
  282. */
  283. static inline void
  284. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  285. dp_peer_iter_func *func,
  286. void *arg,
  287. enum dp_mod_id mod_id)
  288. {
  289. struct dp_peer *peer;
  290. struct dp_peer *tmp_peer;
  291. struct dp_soc *soc = NULL;
  292. struct dp_peer **peer_array = NULL;
  293. int i = 0;
  294. uint32_t num_peers = 0;
  295. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  296. return;
  297. num_peers = vdev->num_peers;
  298. soc = vdev->pdev->soc;
  299. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  300. if (!peer_array)
  301. return;
  302. qdf_spin_lock_bh(&vdev->peer_list_lock);
  303. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  304. peer_list_elem,
  305. tmp_peer) {
  306. if (i >= num_peers)
  307. break;
  308. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  309. peer_array[i] = peer;
  310. i = (i + 1);
  311. }
  312. }
  313. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  314. for (i = 0; i < num_peers; i++) {
  315. peer = peer_array[i];
  316. if (!peer)
  317. continue;
  318. (*func)(soc, peer, arg);
  319. dp_peer_unref_delete(peer, mod_id);
  320. }
  321. qdf_mem_free(peer_array);
  322. }
  323. /**
  324. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  325. *
  326. * This API will cache the peers in local allocated memory and calls
  327. * iterate function outside the lock.
  328. *
  329. * As this API is allocating new memory it is suggested to use this
  330. * only when lock cannot be held
  331. *
  332. * @pdev : DP pdev context
  333. * @func : function to be called for each peer
  334. * @arg : argument need to be passed to func
  335. * @mod_id : module_id
  336. *
  337. * Return: void
  338. */
  339. static inline void
  340. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  341. dp_peer_iter_func *func,
  342. void *arg,
  343. enum dp_mod_id mod_id)
  344. {
  345. struct dp_peer *peer;
  346. struct dp_peer *tmp_peer;
  347. struct dp_soc *soc = NULL;
  348. struct dp_vdev *vdev = NULL;
  349. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  350. int i = 0;
  351. int j = 0;
  352. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  353. if (!pdev || !pdev->soc)
  354. return;
  355. soc = pdev->soc;
  356. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  357. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  358. num_peers[i] = vdev->num_peers;
  359. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  360. sizeof(struct dp_peer *));
  361. if (!peer_array[i])
  362. break;
  363. qdf_spin_lock_bh(&vdev->peer_list_lock);
  364. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  365. peer_list_elem,
  366. tmp_peer) {
  367. if (j >= num_peers[i])
  368. break;
  369. if (dp_peer_get_ref(soc, peer, mod_id) ==
  370. QDF_STATUS_SUCCESS) {
  371. peer_array[i][j] = peer;
  372. j = (j + 1);
  373. }
  374. }
  375. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  376. i = (i + 1);
  377. }
  378. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  379. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  380. if (!peer_array[i])
  381. break;
  382. for (j = 0; j < num_peers[i]; j++) {
  383. peer = peer_array[i][j];
  384. if (!peer)
  385. continue;
  386. (*func)(soc, peer, arg);
  387. dp_peer_unref_delete(peer, mod_id);
  388. }
  389. qdf_mem_free(peer_array[i]);
  390. }
  391. }
  392. /**
  393. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  394. *
  395. * This API will cache the peers in local allocated memory and calls
  396. * iterate function outside the lock.
  397. *
  398. * As this API is allocating new memory it is suggested to use this
  399. * only when lock cannot be held
  400. *
  401. * @soc : DP soc context
  402. * @func : function to be called for each peer
  403. * @arg : argument need to be passed to func
  404. * @mod_id : module_id
  405. *
  406. * Return: void
  407. */
  408. static inline void
  409. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  410. dp_peer_iter_func *func,
  411. void *arg,
  412. enum dp_mod_id mod_id)
  413. {
  414. struct dp_pdev *pdev;
  415. int i;
  416. if (!soc)
  417. return;
  418. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  419. pdev = soc->pdev_list[i];
  420. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  421. }
  422. }
  423. #ifdef DP_PEER_STATE_DEBUG
  424. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  425. do { \
  426. if (!(_condition)) { \
  427. dp_alert("Invalid state shift from %u to %u peer " \
  428. QDF_MAC_ADDR_FMT, \
  429. (_peer)->peer_state, (_new_state), \
  430. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  431. QDF_ASSERT(0); \
  432. } \
  433. } while (0)
  434. #else
  435. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  436. do { \
  437. if (!(_condition)) { \
  438. dp_alert("Invalid state shift from %u to %u peer " \
  439. QDF_MAC_ADDR_FMT, \
  440. (_peer)->peer_state, (_new_state), \
  441. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  442. } \
  443. } while (0)
  444. #endif
  445. /**
  446. * dp_peer_state_cmp() - compare dp peer state
  447. *
  448. * @peer : DP peer
  449. * @state : state
  450. *
  451. * Return: true if state matches with peer state
  452. * false if it does not match
  453. */
  454. static inline bool
  455. dp_peer_state_cmp(struct dp_peer *peer,
  456. enum dp_peer_state state)
  457. {
  458. bool is_status_equal = false;
  459. qdf_spin_lock_bh(&peer->peer_state_lock);
  460. is_status_equal = (peer->peer_state == state);
  461. qdf_spin_unlock_bh(&peer->peer_state_lock);
  462. return is_status_equal;
  463. }
  464. /**
  465. * dp_peer_update_state() - update dp peer state
  466. *
  467. * @soc : core DP soc context
  468. * @peer : DP peer
  469. * @state : new state
  470. *
  471. * Return: None
  472. */
  473. static inline void
  474. dp_peer_update_state(struct dp_soc *soc,
  475. struct dp_peer *peer,
  476. enum dp_peer_state state)
  477. {
  478. uint8_t peer_state;
  479. qdf_spin_lock_bh(&peer->peer_state_lock);
  480. peer_state = peer->peer_state;
  481. switch (state) {
  482. case DP_PEER_STATE_INIT:
  483. DP_PEER_STATE_ASSERT
  484. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  485. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  486. break;
  487. case DP_PEER_STATE_ACTIVE:
  488. DP_PEER_STATE_ASSERT(peer, state,
  489. (peer_state == DP_PEER_STATE_INIT));
  490. break;
  491. case DP_PEER_STATE_LOGICAL_DELETE:
  492. DP_PEER_STATE_ASSERT(peer, state,
  493. (peer_state == DP_PEER_STATE_ACTIVE) ||
  494. (peer_state == DP_PEER_STATE_INIT));
  495. break;
  496. case DP_PEER_STATE_INACTIVE:
  497. DP_PEER_STATE_ASSERT
  498. (peer, state,
  499. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  500. break;
  501. case DP_PEER_STATE_FREED:
  502. if (peer->sta_self_peer)
  503. DP_PEER_STATE_ASSERT
  504. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  505. else
  506. DP_PEER_STATE_ASSERT
  507. (peer, state,
  508. (peer_state == DP_PEER_STATE_INACTIVE) ||
  509. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  510. break;
  511. default:
  512. qdf_spin_unlock_bh(&peer->peer_state_lock);
  513. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  514. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  515. return;
  516. }
  517. peer->peer_state = state;
  518. qdf_spin_unlock_bh(&peer->peer_state_lock);
  519. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  520. peer_state, state,
  521. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  522. }
  523. void dp_print_ast_stats(struct dp_soc *soc);
  524. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  525. uint16_t hw_peer_id, uint8_t vdev_id,
  526. uint8_t *peer_mac_addr, uint16_t ast_hash,
  527. uint8_t is_wds);
  528. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  529. uint8_t vdev_id, uint8_t *peer_mac_addr,
  530. uint8_t is_wds, uint32_t free_wds_count);
  531. #ifdef WLAN_FEATURE_11BE_MLO
  532. /**
  533. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  534. * @soc_handle - genereic soc handle
  535. * @peer_id - ML peer_id from firmware
  536. * @peer_mac_addr - mac address of the peer
  537. * @mlo_ast_flow_info: MLO AST flow info
  538. *
  539. * associate the ML peer_id that firmware provided with peer entry
  540. * and update the ast table in the host with the hw_peer_id.
  541. *
  542. * Return: QDF_STATUS code
  543. */
  544. QDF_STATUS
  545. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  546. uint8_t *peer_mac_addr,
  547. struct dp_mlo_flow_override_info *mlo_flow_info);
  548. /**
  549. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  550. * @soc_handle - genereic soc handle
  551. * @peeri_id - peer_id from firmware
  552. *
  553. * Return: none
  554. */
  555. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  556. #endif
  557. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  558. enum cdp_sec_type sec_type, int is_unicast,
  559. u_int32_t *michael_key, u_int32_t *rx_pn);
  560. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  561. uint8_t tid, uint16_t win_sz);
  562. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  563. uint16_t peer_id, uint8_t *peer_mac);
  564. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  565. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  566. uint32_t flags);
  567. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  568. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  569. struct dp_ast_entry *ast_entry);
  570. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  571. struct dp_ast_entry *ast_entry, uint32_t flags);
  572. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  573. uint8_t *ast_mac_addr,
  574. uint8_t pdev_id);
  575. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  576. uint8_t *ast_mac_addr,
  577. uint8_t vdev_id);
  578. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  579. uint8_t *ast_mac_addr);
  580. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  581. struct dp_ast_entry *ast_entry);
  582. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  583. struct dp_ast_entry *ast_entry);
  584. void dp_peer_ast_set_type(struct dp_soc *soc,
  585. struct dp_ast_entry *ast_entry,
  586. enum cdp_txrx_ast_entry_type type);
  587. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  588. struct dp_ast_entry *ast_entry,
  589. struct dp_peer *peer);
  590. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  591. void dp_peer_ast_send_multi_wds_del(
  592. struct dp_soc *soc, uint8_t vdev_id,
  593. struct peer_del_multi_wds_entries *wds_list);
  594. #endif
  595. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  596. struct cdp_soc *dp_soc,
  597. void *cookie,
  598. enum cdp_ast_free_status status);
  599. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  600. struct dp_ast_entry *ase);
  601. void dp_peer_free_ast_entry(struct dp_soc *soc,
  602. struct dp_ast_entry *ast_entry);
  603. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  604. struct dp_ast_entry *ast_entry,
  605. struct dp_peer *peer);
  606. /**
  607. * dp_peer_mec_detach_entry() - Detach the MEC entry
  608. * @soc: SoC handle
  609. * @mecentry: MEC entry of the node
  610. * @ptr: pointer to free list
  611. *
  612. * The MEC entry is detached from MEC table and added to free_list
  613. * to free the object outside lock
  614. *
  615. * Return: None
  616. */
  617. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  618. void *ptr);
  619. /**
  620. * dp_peer_mec_free_list() - free the MEC entry from free_list
  621. * @soc: SoC handle
  622. * @ptr: pointer to free list
  623. *
  624. * Return: None
  625. */
  626. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  627. /**
  628. * dp_peer_mec_add_entry()
  629. * @soc: SoC handle
  630. * @vdev: vdev to which mec node belongs
  631. * @mac_addr: MAC address of mec node
  632. *
  633. * This function allocates and adds MEC entry to MEC table.
  634. * It assumes caller has taken the mec lock to protect the access to these
  635. * tables
  636. *
  637. * Return: QDF_STATUS
  638. */
  639. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  640. struct dp_vdev *vdev,
  641. uint8_t *mac_addr);
  642. /**
  643. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  644. * within pdev
  645. * @soc: SoC handle
  646. *
  647. * It assumes caller has taken the mec_lock to protect the access to
  648. * MEC hash table
  649. *
  650. * Return: MEC entry
  651. */
  652. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  653. uint8_t pdev_id,
  654. uint8_t *mec_mac_addr);
  655. #define DP_AST_ASSERT(_condition) \
  656. do { \
  657. if (!(_condition)) { \
  658. dp_print_ast_stats(soc);\
  659. QDF_BUG(_condition); \
  660. } \
  661. } while (0)
  662. /**
  663. * dp_peer_update_inactive_time - Update inactive time for peer
  664. * @pdev: pdev object
  665. * @tag_type: htt_tlv_tag type
  666. * #tag_buf: buf message
  667. */
  668. void
  669. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  670. uint32_t *tag_buf);
  671. #ifndef QCA_MULTIPASS_SUPPORT
  672. /**
  673. * dp_peer_set_vlan_id: set vlan_id for this peer
  674. * @cdp_soc: soc handle
  675. * @vdev_id: id of vdev object
  676. * @peer_mac: mac address
  677. * @vlan_id: vlan id for peer
  678. *
  679. * return: void
  680. */
  681. static inline
  682. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  683. uint8_t vdev_id, uint8_t *peer_mac,
  684. uint16_t vlan_id)
  685. {
  686. }
  687. /**
  688. * dp_set_vlan_groupkey: set vlan map for vdev
  689. * @soc: pointer to soc
  690. * @vdev_id: id of vdev handle
  691. * @vlan_id: vlan_id
  692. * @group_key: group key for vlan
  693. *
  694. * return: set success/failure
  695. */
  696. static inline
  697. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  698. uint16_t vlan_id, uint16_t group_key)
  699. {
  700. return QDF_STATUS_SUCCESS;
  701. }
  702. /**
  703. * dp_peer_multipass_list_init: initialize multipass peer list
  704. * @vdev: pointer to vdev
  705. *
  706. * return: void
  707. */
  708. static inline
  709. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  710. {
  711. }
  712. /**
  713. * dp_peer_multipass_list_remove: remove peer from special peer list
  714. * @peer: peer handle
  715. *
  716. * return: void
  717. */
  718. static inline
  719. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  720. {
  721. }
  722. #else
  723. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  724. uint8_t vdev_id, uint8_t *peer_mac,
  725. uint16_t vlan_id);
  726. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  727. uint16_t vlan_id, uint16_t group_key);
  728. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  729. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  730. #endif
  731. #ifndef QCA_PEER_MULTIQ_SUPPORT
  732. /**
  733. * dp_peer_reset_flowq_map() - reset peer flowq map table
  734. * @peer - dp peer handle
  735. *
  736. * Return: none
  737. */
  738. static inline
  739. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  740. {
  741. }
  742. /**
  743. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  744. * @soc - genereic soc handle
  745. * @is_wds - flag to indicate if peer is wds
  746. * @peer_id - peer_id from htt peer map message
  747. * @peer_mac_addr - mac address of the peer
  748. * @ast_info - ast flow override information from peer map
  749. *
  750. * Return: none
  751. */
  752. static inline
  753. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  754. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  755. struct dp_ast_flow_override_info *ast_info)
  756. {
  757. }
  758. #else
  759. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  760. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  761. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  762. struct dp_ast_flow_override_info *ast_info);
  763. #endif
  764. /*
  765. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  766. * after deleting the entries (ie., setting valid=0)
  767. *
  768. * @soc: DP SOC handle
  769. * @cb_ctxt: Callback context
  770. * @reo_status: REO command status
  771. */
  772. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  773. void *cb_ctxt,
  774. union hal_reo_status *reo_status);
  775. #ifdef QCA_PEER_EXT_STATS
  776. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  777. struct dp_txrx_peer *txrx_peer);
  778. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  779. struct dp_txrx_peer *txrx_peer);
  780. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  781. #else
  782. static inline
  783. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  784. struct dp_txrx_peer *txrx_peer)
  785. {
  786. return QDF_STATUS_SUCCESS;
  787. }
  788. static inline
  789. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  790. struct dp_txrx_peer *txrx_peer)
  791. {
  792. }
  793. static inline
  794. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  795. {
  796. }
  797. #endif
  798. #ifdef WLAN_PEER_JITTER
  799. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  800. struct dp_txrx_peer *txrx_peer);
  801. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  802. struct dp_txrx_peer *txrx_peer);
  803. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  804. #else
  805. static inline
  806. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  807. struct dp_txrx_peer *txrx_peer)
  808. {
  809. return QDF_STATUS_SUCCESS;
  810. }
  811. static inline
  812. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  813. struct dp_txrx_peer *txrx_peer)
  814. {
  815. }
  816. static inline
  817. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  818. {
  819. }
  820. #endif
  821. #ifndef CONFIG_SAWF_DEF_QUEUES
  822. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  823. struct dp_peer *peer)
  824. {
  825. return QDF_STATUS_SUCCESS;
  826. }
  827. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  828. struct dp_peer *peer)
  829. {
  830. return QDF_STATUS_SUCCESS;
  831. }
  832. #endif
  833. #ifndef CONFIG_SAWF
  834. static inline
  835. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  836. struct dp_txrx_peer *txrx_peer)
  837. {
  838. return QDF_STATUS_SUCCESS;
  839. }
  840. static inline
  841. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  842. struct dp_txrx_peer *txrx_peer)
  843. {
  844. return QDF_STATUS_SUCCESS;
  845. }
  846. #endif
  847. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  848. struct dp_vdev *vdev,
  849. enum dp_mod_id mod_id);
  850. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  851. struct dp_vdev *vdev,
  852. enum dp_mod_id mod_id);
  853. void dp_peer_ast_table_detach(struct dp_soc *soc);
  854. void dp_peer_find_map_detach(struct dp_soc *soc);
  855. void dp_soc_wds_detach(struct dp_soc *soc);
  856. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  857. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  858. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  859. void dp_soc_wds_attach(struct dp_soc *soc);
  860. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  861. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  862. #ifdef FEATURE_AST
  863. /*
  864. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  865. * @soc - datapath soc handle
  866. * @peer - datapath peer handle
  867. *
  868. * Delete the AST entries belonging to a peer
  869. */
  870. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  871. struct dp_peer *peer)
  872. {
  873. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  874. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  875. /*
  876. * Delete peer self ast entry. This is done to handle scenarios
  877. * where peer is freed before peer map is received(for ex in case
  878. * of auth disallow due to ACL) in such cases self ast is not added
  879. * to peer->ast_list.
  880. */
  881. if (peer->self_ast_entry) {
  882. dp_peer_del_ast(soc, peer->self_ast_entry);
  883. peer->self_ast_entry = NULL;
  884. }
  885. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  886. dp_peer_del_ast(soc, ast_entry);
  887. }
  888. #else
  889. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  890. struct dp_peer *peer)
  891. {
  892. }
  893. #endif
  894. #ifdef FEATURE_MEC
  895. /**
  896. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  897. * @soc: SoC handle
  898. *
  899. * Return: none
  900. */
  901. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  902. /**
  903. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  904. * @soc: SoC handle
  905. *
  906. * Return: none
  907. */
  908. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  909. /**
  910. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  911. * @soc: Datapath SOC
  912. *
  913. * Return: None
  914. */
  915. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  916. #else
  917. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  918. {
  919. }
  920. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  921. {
  922. }
  923. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  924. {
  925. }
  926. #endif
  927. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  928. /**
  929. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  930. * @soc : dp_soc handle
  931. * @peer: peer
  932. *
  933. * This function is used to send cache flush cmd to reo and
  934. * to register the callback to handle the dumping of the reo
  935. * queue stas from DDR
  936. *
  937. * Return: none
  938. */
  939. void dp_send_cache_flush_for_rx_tid(
  940. struct dp_soc *soc, struct dp_peer *peer);
  941. /**
  942. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  943. * @soc : cdp_soc_t handle
  944. * @vdev_id: vdev id
  945. *
  946. * Handler to get rx tid info from DDR after h/w cache is
  947. * invalidated first using the cache flush cmd.
  948. *
  949. * Return: none
  950. */
  951. void dp_get_rx_reo_queue_info(
  952. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  953. /**
  954. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  955. * @soc : dp_soc handle
  956. * @cb_ctxt - callback context
  957. * @reo_status: vdev id
  958. *
  959. * This is the callback function registered after sending the reo cmd
  960. * to flush the h/w cache and invalidate it. In the callback the reo
  961. * queue desc info is dumped from DDR.
  962. *
  963. * Return: none
  964. */
  965. void dp_dump_rx_reo_queue_info(
  966. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  967. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  968. static inline void dp_get_rx_reo_queue_info(
  969. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  970. {
  971. }
  972. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  973. static inline int dp_peer_find_mac_addr_cmp(
  974. union dp_align_mac_addr *mac_addr1,
  975. union dp_align_mac_addr *mac_addr2)
  976. {
  977. /*
  978. * Intentionally use & rather than &&.
  979. * because the operands are binary rather than generic boolean,
  980. * the functionality is equivalent.
  981. * Using && has the advantage of short-circuited evaluation,
  982. * but using & has the advantage of no conditional branching,
  983. * which is a more significant benefit.
  984. */
  985. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  986. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  987. }
  988. /**
  989. * dp_peer_delete() - delete DP peer
  990. *
  991. * @soc: Datatpath soc
  992. * @peer: Datapath peer
  993. * @arg: argument to iter function
  994. *
  995. * Return: void
  996. */
  997. void dp_peer_delete(struct dp_soc *soc,
  998. struct dp_peer *peer,
  999. void *arg);
  1000. #ifdef WLAN_FEATURE_11BE_MLO
  1001. /* is MLO connection mld peer */
  1002. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1003. /* set peer type */
  1004. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1005. ((_peer)->peer_type = (_type_val))
  1006. /* is legacy peer */
  1007. #define IS_DP_LEGACY_PEER(_peer) \
  1008. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1009. /* is MLO connection link peer */
  1010. #define IS_MLO_DP_LINK_PEER(_peer) \
  1011. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1012. /* is MLO connection mld peer */
  1013. #define IS_MLO_DP_MLD_PEER(_peer) \
  1014. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1015. /* Get Mld peer from link peer */
  1016. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1017. ((link_peer)->mld_peer)
  1018. #ifdef WLAN_MLO_MULTI_CHIP
  1019. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1020. struct dp_peer *
  1021. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1022. uint8_t *peer_mac_addr,
  1023. int mac_addr_is_aligned,
  1024. uint8_t vdev_id,
  1025. uint8_t chip_id,
  1026. enum dp_mod_id mod_id);
  1027. #else
  1028. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1029. {
  1030. return 0;
  1031. }
  1032. static inline struct dp_peer *
  1033. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1034. uint8_t *peer_mac_addr,
  1035. int mac_addr_is_aligned,
  1036. uint8_t vdev_id,
  1037. uint8_t chip_id,
  1038. enum dp_mod_id mod_id)
  1039. {
  1040. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1041. mac_addr_is_aligned,
  1042. vdev_id, mod_id);
  1043. }
  1044. #endif
  1045. /**
  1046. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1047. increase mld peer ref_cnt
  1048. * @link_peer: link peer pointer
  1049. * @mld_peer: mld peer pointer
  1050. *
  1051. * Return: none
  1052. */
  1053. static inline
  1054. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1055. struct dp_peer *mld_peer)
  1056. {
  1057. /* increase mld_peer ref_cnt */
  1058. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1059. link_peer->mld_peer = mld_peer;
  1060. }
  1061. /**
  1062. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1063. decrease mld peer ref_cnt
  1064. * @link_peer: link peer pointer
  1065. *
  1066. * Return: None
  1067. */
  1068. static inline
  1069. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1070. {
  1071. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1072. link_peer->mld_peer = NULL;
  1073. }
  1074. /**
  1075. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1076. * @mld_peer: mld peer pointer
  1077. *
  1078. * Return: None
  1079. */
  1080. static inline
  1081. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1082. {
  1083. int i;
  1084. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1085. mld_peer->num_links = 0;
  1086. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1087. mld_peer->link_peers[i].is_valid = false;
  1088. }
  1089. /**
  1090. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1091. * @mld_peer: mld peer pointer
  1092. *
  1093. * Return: None
  1094. */
  1095. static inline
  1096. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1097. {
  1098. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1099. }
  1100. /**
  1101. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1102. * @mld_peer: mld dp peer pointer
  1103. * @link_peer: link dp peer pointer
  1104. *
  1105. * Return: None
  1106. */
  1107. static inline
  1108. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1109. struct dp_peer *link_peer)
  1110. {
  1111. int i;
  1112. struct dp_peer_link_info *link_peer_info;
  1113. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1114. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1115. link_peer_info = &mld_peer->link_peers[i];
  1116. if (!link_peer_info->is_valid) {
  1117. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1118. link_peer->mac_addr.raw,
  1119. QDF_MAC_ADDR_SIZE);
  1120. link_peer_info->is_valid = true;
  1121. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1122. link_peer_info->chip_id =
  1123. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1124. mld_peer->num_links++;
  1125. break;
  1126. }
  1127. }
  1128. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1129. if (i == DP_MAX_MLO_LINKS)
  1130. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1131. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1132. }
  1133. /**
  1134. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1135. * @mld_peer: MLD dp peer pointer
  1136. * @link_peer: link dp peer pointer
  1137. *
  1138. * Return: number of links left after deletion
  1139. */
  1140. static inline
  1141. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1142. struct dp_peer *link_peer)
  1143. {
  1144. int i;
  1145. struct dp_peer_link_info *link_peer_info;
  1146. uint8_t num_links;
  1147. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1148. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1149. link_peer_info = &mld_peer->link_peers[i];
  1150. if (link_peer_info->is_valid &&
  1151. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1152. &link_peer_info->mac_addr)) {
  1153. link_peer_info->is_valid = false;
  1154. mld_peer->num_links--;
  1155. break;
  1156. }
  1157. }
  1158. num_links = mld_peer->num_links;
  1159. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1160. if (i == DP_MAX_MLO_LINKS)
  1161. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1162. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1163. return num_links;
  1164. }
  1165. /**
  1166. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1167. increase link peers ref_cnt
  1168. * @soc: dp_soc handle
  1169. * @mld_peer: dp mld peer pointer
  1170. * @mld_link_peers: structure that hold links peers ponter array and number
  1171. * @mod_id: id of module requesting reference
  1172. *
  1173. * Return: None
  1174. */
  1175. static inline
  1176. void dp_get_link_peers_ref_from_mld_peer(
  1177. struct dp_soc *soc,
  1178. struct dp_peer *mld_peer,
  1179. struct dp_mld_link_peers *mld_link_peers,
  1180. enum dp_mod_id mod_id)
  1181. {
  1182. struct dp_peer *peer;
  1183. uint8_t i = 0, j = 0;
  1184. struct dp_peer_link_info *link_peer_info;
  1185. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1186. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1187. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1188. link_peer_info = &mld_peer->link_peers[i];
  1189. if (link_peer_info->is_valid) {
  1190. peer = dp_link_peer_hash_find_by_chip_id(
  1191. soc,
  1192. link_peer_info->mac_addr.raw,
  1193. true,
  1194. link_peer_info->vdev_id,
  1195. link_peer_info->chip_id,
  1196. mod_id);
  1197. if (peer)
  1198. mld_link_peers->link_peers[j++] = peer;
  1199. }
  1200. }
  1201. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1202. mld_link_peers->num_links = j;
  1203. }
  1204. /**
  1205. * dp_release_link_peers_ref() - release all link peers reference
  1206. * @mld_link_peers: structure that hold links peers ponter array and number
  1207. * @mod_id: id of module requesting reference
  1208. *
  1209. * Return: None.
  1210. */
  1211. static inline
  1212. void dp_release_link_peers_ref(
  1213. struct dp_mld_link_peers *mld_link_peers,
  1214. enum dp_mod_id mod_id)
  1215. {
  1216. struct dp_peer *peer;
  1217. uint8_t i;
  1218. for (i = 0; i < mld_link_peers->num_links; i++) {
  1219. peer = mld_link_peers->link_peers[i];
  1220. if (peer)
  1221. dp_peer_unref_delete(peer, mod_id);
  1222. mld_link_peers->link_peers[i] = NULL;
  1223. }
  1224. mld_link_peers->num_links = 0;
  1225. }
  1226. /**
  1227. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1228. * @soc: Datapath soc handle
  1229. * @peer_id: peer id
  1230. * @lmac_id: lmac id to find the link peer on given lmac
  1231. *
  1232. * Return: peer_id of link peer if found
  1233. * else return HTT_INVALID_PEER
  1234. */
  1235. static inline
  1236. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1237. uint8_t lmac_id)
  1238. {
  1239. uint8_t i;
  1240. struct dp_peer *peer;
  1241. struct dp_peer *link_peer;
  1242. struct dp_soc *link_peer_soc;
  1243. struct dp_mld_link_peers link_peers_info;
  1244. uint16_t link_peer_id = HTT_INVALID_PEER;
  1245. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1246. if (!peer)
  1247. return HTT_INVALID_PEER;
  1248. if (IS_MLO_DP_MLD_PEER(peer)) {
  1249. /* get link peers with reference */
  1250. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1251. DP_MOD_ID_CDP);
  1252. for (i = 0; i < link_peers_info.num_links; i++) {
  1253. link_peer = link_peers_info.link_peers[i];
  1254. link_peer_soc = link_peer->vdev->pdev->soc;
  1255. if ((link_peer_soc == soc) &&
  1256. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1257. link_peer_id = link_peer->peer_id;
  1258. break;
  1259. }
  1260. }
  1261. /* release link peers reference */
  1262. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1263. } else {
  1264. link_peer_id = peer_id;
  1265. }
  1266. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1267. return link_peer_id;
  1268. }
  1269. /**
  1270. * dp_peer_get_tgt_peer_hash_find() - get MLD dp_peer handle
  1271. for processing
  1272. * @soc: soc handle
  1273. * @peer_mac_addr: peer mac address
  1274. * @mac_addr_is_aligned: is mac addr alligned
  1275. * @vdev_id: vdev_id
  1276. * @mod_id: id of module requesting reference
  1277. *
  1278. * for MLO connection, get corresponding MLD peer,
  1279. * otherwise get link peer for non-MLO case.
  1280. *
  1281. * return: peer in success
  1282. * NULL in failure
  1283. */
  1284. static inline
  1285. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1286. uint8_t *peer_mac,
  1287. int mac_addr_is_aligned,
  1288. uint8_t vdev_id,
  1289. enum dp_mod_id mod_id)
  1290. {
  1291. struct dp_peer *ta_peer = NULL;
  1292. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1293. peer_mac, 0, vdev_id,
  1294. mod_id);
  1295. if (peer) {
  1296. /* mlo connection link peer, get mld peer with reference */
  1297. if (IS_MLO_DP_LINK_PEER(peer)) {
  1298. /* increase mld peer ref_cnt */
  1299. if (QDF_STATUS_SUCCESS ==
  1300. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1301. ta_peer = peer->mld_peer;
  1302. else
  1303. ta_peer = NULL;
  1304. /* relese peer reference that added by hash find */
  1305. dp_peer_unref_delete(peer, mod_id);
  1306. } else {
  1307. /* mlo MLD peer or non-mlo link peer */
  1308. ta_peer = peer;
  1309. }
  1310. }
  1311. return ta_peer;
  1312. }
  1313. /**
  1314. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1315. * @soc : core DP soc context
  1316. * @peer_id : peer id from peer object can be retrieved
  1317. * @mod_id : ID ot module requesting reference
  1318. *
  1319. * for MLO connection, get corresponding MLD peer,
  1320. * otherwise get link peer for non-MLO case.
  1321. *
  1322. * return: peer in success
  1323. * NULL in failure
  1324. */
  1325. static inline
  1326. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1327. uint16_t peer_id,
  1328. enum dp_mod_id mod_id)
  1329. {
  1330. struct dp_peer *ta_peer = NULL;
  1331. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1332. if (peer) {
  1333. /* mlo connection link peer, get mld peer with reference */
  1334. if (IS_MLO_DP_LINK_PEER(peer)) {
  1335. /* increase mld peer ref_cnt */
  1336. if (QDF_STATUS_SUCCESS ==
  1337. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1338. ta_peer = peer->mld_peer;
  1339. else
  1340. ta_peer = NULL;
  1341. /* relese peer reference that added by hash find */
  1342. dp_peer_unref_delete(peer, mod_id);
  1343. } else {
  1344. /* mlo MLD peer or non-mlo link peer */
  1345. ta_peer = peer;
  1346. }
  1347. }
  1348. return ta_peer;
  1349. }
  1350. /**
  1351. * dp_peer_mlo_delete() - peer MLO related delete operation
  1352. * @peer: DP peer handle
  1353. * Return: None
  1354. */
  1355. static inline
  1356. void dp_peer_mlo_delete(struct dp_peer *peer)
  1357. {
  1358. struct dp_peer *ml_peer;
  1359. struct dp_soc *soc;
  1360. /* MLO connection link peer */
  1361. if (IS_MLO_DP_LINK_PEER(peer)) {
  1362. ml_peer = peer->mld_peer;
  1363. soc = ml_peer->vdev->pdev->soc;
  1364. /* if last link peer deletion, delete MLD peer */
  1365. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1366. dp_peer_delete(soc, peer->mld_peer, NULL);
  1367. }
  1368. }
  1369. /**
  1370. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1371. * @soc: Soc handle
  1372. * @vdev_id: Vdev ID
  1373. * @peer_setup_info: peer setup information for MLO
  1374. */
  1375. QDF_STATUS dp_peer_mlo_setup(
  1376. struct dp_soc *soc,
  1377. struct dp_peer *peer,
  1378. uint8_t vdev_id,
  1379. struct cdp_peer_setup_info *setup_info);
  1380. /**
  1381. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1382. * @peer: datapath peer
  1383. *
  1384. * Return: MLD peer in case of MLO Link peer
  1385. * Peer itself in other cases
  1386. */
  1387. static inline
  1388. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1389. {
  1390. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1391. }
  1392. /**
  1393. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1394. * peer id
  1395. * @soc: core DP soc context
  1396. * @peer_id: peer id
  1397. * @mod_id: ID of module requesting reference
  1398. *
  1399. * Return: primary link peer for the MLO peer
  1400. * legacy peer itself in case of legacy peer
  1401. */
  1402. static inline
  1403. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1404. uint16_t peer_id,
  1405. enum dp_mod_id mod_id)
  1406. {
  1407. uint8_t i;
  1408. struct dp_mld_link_peers link_peers_info;
  1409. struct dp_peer *peer;
  1410. struct dp_peer *link_peer;
  1411. struct dp_peer *primary_peer = NULL;
  1412. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1413. if (!peer)
  1414. return NULL;
  1415. if (IS_MLO_DP_MLD_PEER(peer)) {
  1416. /* get link peers with reference */
  1417. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1418. mod_id);
  1419. for (i = 0; i < link_peers_info.num_links; i++) {
  1420. link_peer = link_peers_info.link_peers[i];
  1421. if (link_peer->primary_link) {
  1422. primary_peer = link_peer;
  1423. /*
  1424. * Take additional reference over
  1425. * primary link peer.
  1426. */
  1427. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1428. break;
  1429. }
  1430. }
  1431. /* release link peers reference */
  1432. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1433. dp_peer_unref_delete(peer, mod_id);
  1434. } else {
  1435. primary_peer = peer;
  1436. }
  1437. return primary_peer;
  1438. }
  1439. /**
  1440. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1441. * @peer: Datapath peer
  1442. *
  1443. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1444. * dp_txrx_peer from peer itself for other cases
  1445. */
  1446. static inline
  1447. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1448. {
  1449. return IS_MLO_DP_LINK_PEER(peer) ?
  1450. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1451. }
  1452. /**
  1453. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1454. * @peer: Datapath peer
  1455. *
  1456. * Return: true if peer is primary link peer or legacy peer
  1457. * false otherwise
  1458. */
  1459. static inline
  1460. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1461. {
  1462. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1463. return true;
  1464. else if (IS_DP_LEGACY_PEER(peer))
  1465. return true;
  1466. else
  1467. return false;
  1468. }
  1469. /**
  1470. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1471. *
  1472. * @soc : core DP soc context
  1473. * @peer_id : peer id from peer object can be retrieved
  1474. * @handle : reference handle
  1475. * @mod_id : ID ot module requesting reference
  1476. *
  1477. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1478. */
  1479. static inline struct dp_txrx_peer *
  1480. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1481. uint16_t peer_id,
  1482. dp_txrx_ref_handle *handle,
  1483. enum dp_mod_id mod_id)
  1484. {
  1485. struct dp_peer *peer;
  1486. struct dp_txrx_peer *txrx_peer;
  1487. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1488. if (!peer)
  1489. return NULL;
  1490. txrx_peer = dp_get_txrx_peer(peer);
  1491. if (txrx_peer) {
  1492. *handle = (dp_txrx_ref_handle)peer;
  1493. return txrx_peer;
  1494. }
  1495. dp_peer_unref_delete(peer, mod_id);
  1496. return NULL;
  1497. }
  1498. #else
  1499. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1500. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1501. /* is legacy peer */
  1502. #define IS_DP_LEGACY_PEER(_peer) true
  1503. #define IS_MLO_DP_LINK_PEER(_peer) false
  1504. #define IS_MLO_DP_MLD_PEER(_peer) false
  1505. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1506. static inline
  1507. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1508. uint8_t *peer_mac,
  1509. int mac_addr_is_aligned,
  1510. uint8_t vdev_id,
  1511. enum dp_mod_id mod_id)
  1512. {
  1513. return dp_peer_find_hash_find(soc, peer_mac,
  1514. mac_addr_is_aligned, vdev_id,
  1515. mod_id);
  1516. }
  1517. static inline
  1518. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1519. uint16_t peer_id,
  1520. enum dp_mod_id mod_id)
  1521. {
  1522. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1523. }
  1524. static inline
  1525. QDF_STATUS dp_peer_mlo_setup(
  1526. struct dp_soc *soc,
  1527. struct dp_peer *peer,
  1528. uint8_t vdev_id,
  1529. struct cdp_peer_setup_info *setup_info)
  1530. {
  1531. return QDF_STATUS_SUCCESS;
  1532. }
  1533. static inline
  1534. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1535. {
  1536. }
  1537. static inline
  1538. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1539. {
  1540. }
  1541. static inline
  1542. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1543. {
  1544. }
  1545. static inline
  1546. void dp_peer_mlo_delete(struct dp_peer *peer)
  1547. {
  1548. }
  1549. static inline
  1550. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1551. struct dp_peer *link_peer)
  1552. {
  1553. }
  1554. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1555. {
  1556. return 0;
  1557. }
  1558. static inline struct dp_peer *
  1559. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1560. uint8_t *peer_mac_addr,
  1561. int mac_addr_is_aligned,
  1562. uint8_t vdev_id,
  1563. uint8_t chip_id,
  1564. enum dp_mod_id mod_id)
  1565. {
  1566. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1567. mac_addr_is_aligned,
  1568. vdev_id, mod_id);
  1569. }
  1570. static inline
  1571. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1572. {
  1573. return peer;
  1574. }
  1575. static inline
  1576. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1577. uint16_t peer_id,
  1578. enum dp_mod_id mod_id)
  1579. {
  1580. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1581. }
  1582. static inline
  1583. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1584. {
  1585. return peer->txrx_peer;
  1586. }
  1587. static inline
  1588. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1589. {
  1590. return true;
  1591. }
  1592. /**
  1593. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1594. *
  1595. * @soc : core DP soc context
  1596. * @peer_id : peer id from peer object can be retrieved
  1597. * @handle : reference handle
  1598. * @mod_id : ID ot module requesting reference
  1599. *
  1600. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1601. */
  1602. static inline struct dp_txrx_peer *
  1603. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1604. uint16_t peer_id,
  1605. dp_txrx_ref_handle *handle,
  1606. enum dp_mod_id mod_id)
  1607. {
  1608. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1609. }
  1610. static inline
  1611. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1612. uint8_t lmac_id)
  1613. {
  1614. return peer_id;
  1615. }
  1616. #endif /* WLAN_FEATURE_11BE_MLO */
  1617. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  1618. /**
  1619. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  1620. * @soc: Soc handle
  1621. * @peer: DP peer handle for ML peer
  1622. * @peer_id: peer_id
  1623. * Return: None
  1624. */
  1625. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1626. struct dp_peer *peer,
  1627. uint16_t peer_id);
  1628. /**
  1629. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  1630. * @soc: Soc handle
  1631. * @peer_id: peer_id
  1632. * Return: None
  1633. */
  1634. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1635. uint16_t peer_id);
  1636. #else
  1637. static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1638. struct dp_peer *peer,
  1639. uint16_t peer_id)
  1640. {
  1641. }
  1642. static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1643. uint16_t peer_id)
  1644. {
  1645. }
  1646. #endif
  1647. static inline
  1648. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1649. {
  1650. uint8_t i;
  1651. if (IS_MLO_DP_MLD_PEER(peer)) {
  1652. dp_peer_info("skip for mld peer");
  1653. return QDF_STATUS_SUCCESS;
  1654. }
  1655. if (peer->rx_tid) {
  1656. QDF_BUG(0);
  1657. dp_peer_err("peer rx_tid mem already exist");
  1658. return QDF_STATUS_E_FAILURE;
  1659. }
  1660. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1661. sizeof(struct dp_rx_tid));
  1662. if (!peer->rx_tid) {
  1663. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1664. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1665. return QDF_STATUS_E_NOMEM;
  1666. }
  1667. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1668. for (i = 0; i < DP_MAX_TIDS; i++)
  1669. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1670. return QDF_STATUS_SUCCESS;
  1671. }
  1672. static inline
  1673. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1674. {
  1675. uint8_t i;
  1676. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1677. for (i = 0; i < DP_MAX_TIDS; i++)
  1678. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1679. qdf_mem_free(peer->rx_tid);
  1680. }
  1681. peer->rx_tid = NULL;
  1682. }
  1683. static inline
  1684. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1685. {
  1686. uint8_t i;
  1687. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1688. sizeof(struct dp_rx_tid_defrag));
  1689. for (i = 0; i < DP_MAX_TIDS; i++)
  1690. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1691. }
  1692. static inline
  1693. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1694. {
  1695. uint8_t i;
  1696. for (i = 0; i < DP_MAX_TIDS; i++)
  1697. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1698. }
  1699. #ifdef PEER_CACHE_RX_PKTS
  1700. static inline
  1701. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1702. {
  1703. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  1704. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  1705. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  1706. DP_RX_CACHED_BUFQ_THRESH);
  1707. }
  1708. static inline
  1709. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1710. {
  1711. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  1712. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  1713. }
  1714. #else
  1715. static inline
  1716. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1717. {
  1718. }
  1719. static inline
  1720. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1721. {
  1722. }
  1723. #endif
  1724. #ifdef REO_SHARED_QREF_TABLE_EN
  1725. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1726. struct dp_peer *peer);
  1727. #else
  1728. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1729. struct dp_peer *peer) {}
  1730. #endif
  1731. #endif /* _DP_PEER_H_ */