dp_peer.h 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  25. #include "hal_reo.h"
  26. #endif
  27. #define DP_INVALID_PEER_ID 0xffff
  28. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  29. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  30. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  31. #define DP_PEER_HASH_LOAD_MULT 2
  32. #define DP_PEER_HASH_LOAD_SHIFT 0
  33. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  34. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  35. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  36. #define dp_peer_info(params...) \
  37. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  38. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  39. #ifdef REO_QDESC_HISTORY
  40. enum reo_qdesc_event_type {
  41. REO_QDESC_UPDATE_CB = 0,
  42. REO_QDESC_FREE,
  43. };
  44. struct reo_qdesc_event {
  45. qdf_dma_addr_t qdesc_addr;
  46. uint64_t ts;
  47. enum reo_qdesc_event_type type;
  48. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  49. };
  50. #endif
  51. struct ast_del_ctxt {
  52. bool age;
  53. int del_count;
  54. };
  55. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  56. void *arg);
  57. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  58. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  59. uint8_t *peer_mac_addr,
  60. int mac_addr_is_aligned,
  61. uint8_t vdev_id,
  62. enum dp_mod_id id);
  63. /**
  64. * dp_peer_get_ref() - Returns peer object given the peer id
  65. *
  66. * @soc : core DP soc context
  67. * @peer : DP peer
  68. * @mod_id : id of module requesting the reference
  69. *
  70. * Return: QDF_STATUS_SUCCESS if reference held successfully
  71. * else QDF_STATUS_E_INVAL
  72. */
  73. static inline
  74. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  75. struct dp_peer *peer,
  76. enum dp_mod_id mod_id)
  77. {
  78. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  79. return QDF_STATUS_E_INVAL;
  80. if (mod_id > DP_MOD_ID_RX)
  81. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  82. return QDF_STATUS_SUCCESS;
  83. }
  84. /**
  85. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  86. *
  87. * @soc : core DP soc context
  88. * @peer_id : peer id from peer object can be retrieved
  89. * @mod_id : module id
  90. *
  91. * Return: struct dp_peer*: Pointer to DP peer object
  92. */
  93. static inline struct dp_peer *
  94. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  95. uint16_t peer_id,
  96. enum dp_mod_id mod_id)
  97. {
  98. struct dp_peer *peer;
  99. qdf_spin_lock_bh(&soc->peer_map_lock);
  100. peer = (peer_id >= soc->max_peer_id) ? NULL :
  101. soc->peer_id_to_obj_map[peer_id];
  102. if (!peer ||
  103. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  104. qdf_spin_unlock_bh(&soc->peer_map_lock);
  105. return NULL;
  106. }
  107. qdf_spin_unlock_bh(&soc->peer_map_lock);
  108. return peer;
  109. }
  110. /**
  111. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  112. * if peer state is active
  113. *
  114. * @soc : core DP soc context
  115. * @peer_id : peer id from peer object can be retrieved
  116. * @mod_id : ID ot module requesting reference
  117. *
  118. * Return: struct dp_peer*: Pointer to DP peer object
  119. */
  120. static inline
  121. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  122. uint16_t peer_id,
  123. enum dp_mod_id mod_id)
  124. {
  125. struct dp_peer *peer;
  126. qdf_spin_lock_bh(&soc->peer_map_lock);
  127. peer = (peer_id >= soc->max_peer_id) ? NULL :
  128. soc->peer_id_to_obj_map[peer_id];
  129. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  130. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  131. qdf_spin_unlock_bh(&soc->peer_map_lock);
  132. return NULL;
  133. }
  134. qdf_spin_unlock_bh(&soc->peer_map_lock);
  135. return peer;
  136. }
  137. #ifdef PEER_CACHE_RX_PKTS
  138. /**
  139. * dp_rx_flush_rx_cached() - flush cached rx frames
  140. * @peer: peer
  141. * @drop: set flag to drop frames
  142. *
  143. * Return: None
  144. */
  145. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  146. #else
  147. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  148. {
  149. }
  150. #endif
  151. static inline void
  152. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  153. {
  154. qdf_spin_lock_bh(&peer->peer_info_lock);
  155. peer->state = OL_TXRX_PEER_STATE_DISC;
  156. qdf_spin_unlock_bh(&peer->peer_info_lock);
  157. dp_rx_flush_rx_cached(peer, true);
  158. }
  159. /**
  160. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  161. *
  162. * @vdev : DP vdev context
  163. * @func : function to be called for each peer
  164. * @arg : argument need to be passed to func
  165. * @mod_id : module_id
  166. *
  167. * Return: void
  168. */
  169. static inline void
  170. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  171. enum dp_mod_id mod_id)
  172. {
  173. struct dp_peer *peer;
  174. struct dp_peer *tmp_peer;
  175. struct dp_soc *soc = NULL;
  176. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  177. return;
  178. soc = vdev->pdev->soc;
  179. qdf_spin_lock_bh(&vdev->peer_list_lock);
  180. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  181. peer_list_elem,
  182. tmp_peer) {
  183. if (dp_peer_get_ref(soc, peer, mod_id) ==
  184. QDF_STATUS_SUCCESS) {
  185. (*func)(soc, peer, arg);
  186. dp_peer_unref_delete(peer, mod_id);
  187. }
  188. }
  189. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  190. }
  191. /**
  192. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  193. *
  194. * @pdev : DP pdev context
  195. * @func : function to be called for each peer
  196. * @arg : argument need to be passed to func
  197. * @mod_id : module_id
  198. *
  199. * Return: void
  200. */
  201. static inline void
  202. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  203. enum dp_mod_id mod_id)
  204. {
  205. struct dp_vdev *vdev;
  206. if (!pdev)
  207. return;
  208. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  209. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  210. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  211. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  212. }
  213. /**
  214. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  215. *
  216. * @soc : DP soc context
  217. * @func : function to be called for each peer
  218. * @arg : argument need to be passed to func
  219. * @mod_id : module_id
  220. *
  221. * Return: void
  222. */
  223. static inline void
  224. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  225. enum dp_mod_id mod_id)
  226. {
  227. struct dp_pdev *pdev;
  228. int i;
  229. if (!soc)
  230. return;
  231. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  232. pdev = soc->pdev_list[i];
  233. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  234. }
  235. }
  236. /**
  237. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  238. *
  239. * This API will cache the peers in local allocated memory and calls
  240. * iterate function outside the lock.
  241. *
  242. * As this API is allocating new memory it is suggested to use this
  243. * only when lock cannot be held
  244. *
  245. * @vdev : DP vdev context
  246. * @func : function to be called for each peer
  247. * @arg : argument need to be passed to func
  248. * @mod_id : module_id
  249. *
  250. * Return: void
  251. */
  252. static inline void
  253. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  254. dp_peer_iter_func *func,
  255. void *arg,
  256. enum dp_mod_id mod_id)
  257. {
  258. struct dp_peer *peer;
  259. struct dp_peer *tmp_peer;
  260. struct dp_soc *soc = NULL;
  261. struct dp_peer **peer_array = NULL;
  262. int i = 0;
  263. uint32_t num_peers = 0;
  264. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  265. return;
  266. num_peers = vdev->num_peers;
  267. soc = vdev->pdev->soc;
  268. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  269. if (!peer_array)
  270. return;
  271. qdf_spin_lock_bh(&vdev->peer_list_lock);
  272. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  273. peer_list_elem,
  274. tmp_peer) {
  275. if (i >= num_peers)
  276. break;
  277. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  278. peer_array[i] = peer;
  279. i = (i + 1);
  280. }
  281. }
  282. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  283. for (i = 0; i < num_peers; i++) {
  284. peer = peer_array[i];
  285. if (!peer)
  286. continue;
  287. (*func)(soc, peer, arg);
  288. dp_peer_unref_delete(peer, mod_id);
  289. }
  290. qdf_mem_free(peer_array);
  291. }
  292. /**
  293. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  294. *
  295. * This API will cache the peers in local allocated memory and calls
  296. * iterate function outside the lock.
  297. *
  298. * As this API is allocating new memory it is suggested to use this
  299. * only when lock cannot be held
  300. *
  301. * @pdev : DP pdev context
  302. * @func : function to be called for each peer
  303. * @arg : argument need to be passed to func
  304. * @mod_id : module_id
  305. *
  306. * Return: void
  307. */
  308. static inline void
  309. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  310. dp_peer_iter_func *func,
  311. void *arg,
  312. enum dp_mod_id mod_id)
  313. {
  314. struct dp_peer *peer;
  315. struct dp_peer *tmp_peer;
  316. struct dp_soc *soc = NULL;
  317. struct dp_vdev *vdev = NULL;
  318. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  319. int i = 0;
  320. int j = 0;
  321. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  322. if (!pdev || !pdev->soc)
  323. return;
  324. soc = pdev->soc;
  325. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  326. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  327. num_peers[i] = vdev->num_peers;
  328. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  329. sizeof(struct dp_peer *));
  330. if (!peer_array[i])
  331. break;
  332. qdf_spin_lock_bh(&vdev->peer_list_lock);
  333. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  334. peer_list_elem,
  335. tmp_peer) {
  336. if (j >= num_peers[i])
  337. break;
  338. if (dp_peer_get_ref(soc, peer, mod_id) ==
  339. QDF_STATUS_SUCCESS) {
  340. peer_array[i][j] = peer;
  341. j = (j + 1);
  342. }
  343. }
  344. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  345. i = (i + 1);
  346. }
  347. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  348. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  349. if (!peer_array[i])
  350. break;
  351. for (j = 0; j < num_peers[i]; j++) {
  352. peer = peer_array[i][j];
  353. if (!peer)
  354. continue;
  355. (*func)(soc, peer, arg);
  356. dp_peer_unref_delete(peer, mod_id);
  357. }
  358. qdf_mem_free(peer_array[i]);
  359. }
  360. }
  361. /**
  362. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  363. *
  364. * This API will cache the peers in local allocated memory and calls
  365. * iterate function outside the lock.
  366. *
  367. * As this API is allocating new memory it is suggested to use this
  368. * only when lock cannot be held
  369. *
  370. * @soc : DP soc context
  371. * @func : function to be called for each peer
  372. * @arg : argument need to be passed to func
  373. * @mod_id : module_id
  374. *
  375. * Return: void
  376. */
  377. static inline void
  378. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  379. dp_peer_iter_func *func,
  380. void *arg,
  381. enum dp_mod_id mod_id)
  382. {
  383. struct dp_pdev *pdev;
  384. int i;
  385. if (!soc)
  386. return;
  387. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  388. pdev = soc->pdev_list[i];
  389. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  390. }
  391. }
  392. #ifdef DP_PEER_STATE_DEBUG
  393. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  394. do { \
  395. if (!(_condition)) { \
  396. dp_alert("Invalid state shift from %u to %u peer " \
  397. QDF_MAC_ADDR_FMT, \
  398. (_peer)->peer_state, (_new_state), \
  399. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  400. QDF_ASSERT(0); \
  401. } \
  402. } while (0)
  403. #else
  404. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  405. do { \
  406. if (!(_condition)) { \
  407. dp_alert("Invalid state shift from %u to %u peer " \
  408. QDF_MAC_ADDR_FMT, \
  409. (_peer)->peer_state, (_new_state), \
  410. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  411. } \
  412. } while (0)
  413. #endif
  414. /**
  415. * dp_peer_state_cmp() - compare dp peer state
  416. *
  417. * @peer : DP peer
  418. * @state : state
  419. *
  420. * Return: true if state matches with peer state
  421. * false if it does not match
  422. */
  423. static inline bool
  424. dp_peer_state_cmp(struct dp_peer *peer,
  425. enum dp_peer_state state)
  426. {
  427. bool is_status_equal = false;
  428. qdf_spin_lock_bh(&peer->peer_state_lock);
  429. is_status_equal = (peer->peer_state == state);
  430. qdf_spin_unlock_bh(&peer->peer_state_lock);
  431. return is_status_equal;
  432. }
  433. /**
  434. * dp_peer_update_state() - update dp peer state
  435. *
  436. * @soc : core DP soc context
  437. * @peer : DP peer
  438. * @state : new state
  439. *
  440. * Return: None
  441. */
  442. static inline void
  443. dp_peer_update_state(struct dp_soc *soc,
  444. struct dp_peer *peer,
  445. enum dp_peer_state state)
  446. {
  447. uint8_t peer_state;
  448. qdf_spin_lock_bh(&peer->peer_state_lock);
  449. peer_state = peer->peer_state;
  450. switch (state) {
  451. case DP_PEER_STATE_INIT:
  452. DP_PEER_STATE_ASSERT
  453. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  454. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  455. break;
  456. case DP_PEER_STATE_ACTIVE:
  457. DP_PEER_STATE_ASSERT(peer, state,
  458. (peer_state == DP_PEER_STATE_INIT));
  459. break;
  460. case DP_PEER_STATE_LOGICAL_DELETE:
  461. DP_PEER_STATE_ASSERT(peer, state,
  462. (peer_state == DP_PEER_STATE_ACTIVE) ||
  463. (peer_state == DP_PEER_STATE_INIT));
  464. break;
  465. case DP_PEER_STATE_INACTIVE:
  466. DP_PEER_STATE_ASSERT
  467. (peer, state,
  468. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  469. break;
  470. case DP_PEER_STATE_FREED:
  471. if (peer->sta_self_peer)
  472. DP_PEER_STATE_ASSERT
  473. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  474. else
  475. DP_PEER_STATE_ASSERT
  476. (peer, state,
  477. (peer_state == DP_PEER_STATE_INACTIVE) ||
  478. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  479. break;
  480. default:
  481. qdf_spin_unlock_bh(&peer->peer_state_lock);
  482. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  483. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  484. return;
  485. }
  486. peer->peer_state = state;
  487. qdf_spin_unlock_bh(&peer->peer_state_lock);
  488. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  489. peer_state, state,
  490. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  491. }
  492. void dp_print_ast_stats(struct dp_soc *soc);
  493. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  494. uint16_t hw_peer_id, uint8_t vdev_id,
  495. uint8_t *peer_mac_addr, uint16_t ast_hash,
  496. uint8_t is_wds);
  497. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  498. uint8_t vdev_id, uint8_t *peer_mac_addr,
  499. uint8_t is_wds, uint32_t free_wds_count);
  500. #ifdef WLAN_FEATURE_11BE_MLO
  501. /**
  502. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  503. * @soc_handle - genereic soc handle
  504. * @peer_id - ML peer_id from firmware
  505. * @peer_mac_addr - mac address of the peer
  506. * @mlo_ast_flow_info: MLO AST flow info
  507. *
  508. * associate the ML peer_id that firmware provided with peer entry
  509. * and update the ast table in the host with the hw_peer_id.
  510. *
  511. * Return: QDF_STATUS code
  512. */
  513. QDF_STATUS
  514. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  515. uint8_t *peer_mac_addr,
  516. struct dp_mlo_flow_override_info *mlo_flow_info);
  517. /**
  518. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  519. * @soc_handle - genereic soc handle
  520. * @peeri_id - peer_id from firmware
  521. *
  522. * Return: none
  523. */
  524. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  525. #endif
  526. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  527. enum cdp_sec_type sec_type, int is_unicast,
  528. u_int32_t *michael_key, u_int32_t *rx_pn);
  529. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  530. uint8_t tid, uint16_t win_sz);
  531. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  532. uint16_t peer_id, uint8_t *peer_mac);
  533. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  534. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  535. uint32_t flags);
  536. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  537. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  538. struct dp_ast_entry *ast_entry);
  539. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  540. struct dp_ast_entry *ast_entry, uint32_t flags);
  541. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  542. uint8_t *ast_mac_addr,
  543. uint8_t pdev_id);
  544. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  545. uint8_t *ast_mac_addr,
  546. uint8_t vdev_id);
  547. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  548. uint8_t *ast_mac_addr);
  549. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  550. struct dp_ast_entry *ast_entry);
  551. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  552. struct dp_ast_entry *ast_entry);
  553. void dp_peer_ast_set_type(struct dp_soc *soc,
  554. struct dp_ast_entry *ast_entry,
  555. enum cdp_txrx_ast_entry_type type);
  556. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  557. struct dp_ast_entry *ast_entry,
  558. struct dp_peer *peer);
  559. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  560. struct cdp_soc *dp_soc,
  561. void *cookie,
  562. enum cdp_ast_free_status status);
  563. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  564. struct dp_ast_entry *ase);
  565. void dp_peer_free_ast_entry(struct dp_soc *soc,
  566. struct dp_ast_entry *ast_entry);
  567. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  568. struct dp_ast_entry *ast_entry,
  569. struct dp_peer *peer);
  570. /**
  571. * dp_peer_mec_detach_entry() - Detach the MEC entry
  572. * @soc: SoC handle
  573. * @mecentry: MEC entry of the node
  574. * @ptr: pointer to free list
  575. *
  576. * The MEC entry is detached from MEC table and added to free_list
  577. * to free the object outside lock
  578. *
  579. * Return: None
  580. */
  581. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  582. void *ptr);
  583. /**
  584. * dp_peer_mec_free_list() - free the MEC entry from free_list
  585. * @soc: SoC handle
  586. * @ptr: pointer to free list
  587. *
  588. * Return: None
  589. */
  590. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  591. /**
  592. * dp_peer_mec_add_entry()
  593. * @soc: SoC handle
  594. * @vdev: vdev to which mec node belongs
  595. * @mac_addr: MAC address of mec node
  596. *
  597. * This function allocates and adds MEC entry to MEC table.
  598. * It assumes caller has taken the mec lock to protect the access to these
  599. * tables
  600. *
  601. * Return: QDF_STATUS
  602. */
  603. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  604. struct dp_vdev *vdev,
  605. uint8_t *mac_addr);
  606. /**
  607. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  608. * within pdev
  609. * @soc: SoC handle
  610. *
  611. * It assumes caller has taken the mec_lock to protect the access to
  612. * MEC hash table
  613. *
  614. * Return: MEC entry
  615. */
  616. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  617. uint8_t pdev_id,
  618. uint8_t *mec_mac_addr);
  619. #define DP_AST_ASSERT(_condition) \
  620. do { \
  621. if (!(_condition)) { \
  622. dp_print_ast_stats(soc);\
  623. QDF_BUG(_condition); \
  624. } \
  625. } while (0)
  626. /**
  627. * dp_peer_update_inactive_time - Update inactive time for peer
  628. * @pdev: pdev object
  629. * @tag_type: htt_tlv_tag type
  630. * #tag_buf: buf message
  631. */
  632. void
  633. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  634. uint32_t *tag_buf);
  635. #ifndef QCA_MULTIPASS_SUPPORT
  636. /**
  637. * dp_peer_set_vlan_id: set vlan_id for this peer
  638. * @cdp_soc: soc handle
  639. * @vdev_id: id of vdev object
  640. * @peer_mac: mac address
  641. * @vlan_id: vlan id for peer
  642. *
  643. * return: void
  644. */
  645. static inline
  646. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  647. uint8_t vdev_id, uint8_t *peer_mac,
  648. uint16_t vlan_id)
  649. {
  650. }
  651. /**
  652. * dp_set_vlan_groupkey: set vlan map for vdev
  653. * @soc: pointer to soc
  654. * @vdev_id: id of vdev handle
  655. * @vlan_id: vlan_id
  656. * @group_key: group key for vlan
  657. *
  658. * return: set success/failure
  659. */
  660. static inline
  661. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  662. uint16_t vlan_id, uint16_t group_key)
  663. {
  664. return QDF_STATUS_SUCCESS;
  665. }
  666. /**
  667. * dp_peer_multipass_list_init: initialize multipass peer list
  668. * @vdev: pointer to vdev
  669. *
  670. * return: void
  671. */
  672. static inline
  673. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  674. {
  675. }
  676. /**
  677. * dp_peer_multipass_list_remove: remove peer from special peer list
  678. * @peer: peer handle
  679. *
  680. * return: void
  681. */
  682. static inline
  683. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  684. {
  685. }
  686. #else
  687. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  688. uint8_t vdev_id, uint8_t *peer_mac,
  689. uint16_t vlan_id);
  690. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  691. uint16_t vlan_id, uint16_t group_key);
  692. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  693. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  694. #endif
  695. #ifndef QCA_PEER_MULTIQ_SUPPORT
  696. /**
  697. * dp_peer_reset_flowq_map() - reset peer flowq map table
  698. * @peer - dp peer handle
  699. *
  700. * Return: none
  701. */
  702. static inline
  703. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  704. {
  705. }
  706. /**
  707. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  708. * @soc - genereic soc handle
  709. * @is_wds - flag to indicate if peer is wds
  710. * @peer_id - peer_id from htt peer map message
  711. * @peer_mac_addr - mac address of the peer
  712. * @ast_info - ast flow override information from peer map
  713. *
  714. * Return: none
  715. */
  716. static inline
  717. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  718. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  719. struct dp_ast_flow_override_info *ast_info)
  720. {
  721. }
  722. #else
  723. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  724. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  725. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  726. struct dp_ast_flow_override_info *ast_info);
  727. #endif
  728. /*
  729. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  730. * after deleting the entries (ie., setting valid=0)
  731. *
  732. * @soc: DP SOC handle
  733. * @cb_ctxt: Callback context
  734. * @reo_status: REO command status
  735. */
  736. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  737. void *cb_ctxt,
  738. union hal_reo_status *reo_status);
  739. #ifdef QCA_PEER_EXT_STATS
  740. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  741. struct dp_peer *peer);
  742. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  743. struct dp_peer *peer);
  744. #else
  745. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  746. struct dp_peer *peer)
  747. {
  748. return QDF_STATUS_SUCCESS;
  749. }
  750. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  751. struct dp_peer *peer)
  752. {
  753. }
  754. #endif
  755. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  756. struct dp_vdev *vdev,
  757. enum dp_mod_id mod_id);
  758. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  759. struct dp_vdev *vdev,
  760. enum dp_mod_id mod_id);
  761. void dp_peer_ast_table_detach(struct dp_soc *soc);
  762. void dp_peer_find_map_detach(struct dp_soc *soc);
  763. void dp_soc_wds_detach(struct dp_soc *soc);
  764. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  765. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  766. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  767. void dp_soc_wds_attach(struct dp_soc *soc);
  768. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  769. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  770. #ifdef FEATURE_AST
  771. /*
  772. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  773. * @soc - datapath soc handle
  774. * @peer - datapath peer handle
  775. *
  776. * Delete the AST entries belonging to a peer
  777. */
  778. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  779. struct dp_peer *peer)
  780. {
  781. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  782. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  783. /*
  784. * Delete peer self ast entry. This is done to handle scenarios
  785. * where peer is freed before peer map is received(for ex in case
  786. * of auth disallow due to ACL) in such cases self ast is not added
  787. * to peer->ast_list.
  788. */
  789. if (peer->self_ast_entry) {
  790. dp_peer_del_ast(soc, peer->self_ast_entry);
  791. peer->self_ast_entry = NULL;
  792. }
  793. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  794. dp_peer_del_ast(soc, ast_entry);
  795. }
  796. #else
  797. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  798. struct dp_peer *peer)
  799. {
  800. }
  801. #endif
  802. #ifdef FEATURE_MEC
  803. /**
  804. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  805. * @soc: SoC handle
  806. *
  807. * Return: none
  808. */
  809. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  810. /**
  811. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  812. * @soc: SoC handle
  813. *
  814. * Return: none
  815. */
  816. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  817. /**
  818. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  819. * @soc: Datapath SOC
  820. *
  821. * Return: None
  822. */
  823. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  824. #else
  825. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  826. {
  827. }
  828. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  829. {
  830. }
  831. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  832. {
  833. }
  834. #endif
  835. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  836. /**
  837. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  838. * @soc : dp_soc handle
  839. * @peer: peer
  840. *
  841. * This function is used to send cache flush cmd to reo and
  842. * to register the callback to handle the dumping of the reo
  843. * queue stas from DDR
  844. *
  845. * Return: none
  846. */
  847. void dp_send_cache_flush_for_rx_tid(
  848. struct dp_soc *soc, struct dp_peer *peer);
  849. /**
  850. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  851. * @soc : cdp_soc_t handle
  852. * @vdev_id: vdev id
  853. *
  854. * Handler to get rx tid info from DDR after h/w cache is
  855. * invalidated first using the cache flush cmd.
  856. *
  857. * Return: none
  858. */
  859. void dp_get_rx_reo_queue_info(
  860. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  861. /**
  862. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  863. * @soc : dp_soc handle
  864. * @cb_ctxt - callback context
  865. * @reo_status: vdev id
  866. *
  867. * This is the callback function registered after sending the reo cmd
  868. * to flush the h/w cache and invalidate it. In the callback the reo
  869. * queue desc info is dumped from DDR.
  870. *
  871. * Return: none
  872. */
  873. void dp_dump_rx_reo_queue_info(
  874. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  875. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  876. static inline void dp_get_rx_reo_queue_info(
  877. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  878. {
  879. }
  880. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  881. static inline int dp_peer_find_mac_addr_cmp(
  882. union dp_align_mac_addr *mac_addr1,
  883. union dp_align_mac_addr *mac_addr2)
  884. {
  885. /*
  886. * Intentionally use & rather than &&.
  887. * because the operands are binary rather than generic boolean,
  888. * the functionality is equivalent.
  889. * Using && has the advantage of short-circuited evaluation,
  890. * but using & has the advantage of no conditional branching,
  891. * which is a more significant benefit.
  892. */
  893. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  894. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  895. }
  896. /**
  897. * dp_peer_delete() - delete DP peer
  898. *
  899. * @soc: Datatpath soc
  900. * @peer: Datapath peer
  901. * @arg: argument to iter function
  902. *
  903. * Return: void
  904. */
  905. void dp_peer_delete(struct dp_soc *soc,
  906. struct dp_peer *peer,
  907. void *arg);
  908. #ifdef WLAN_FEATURE_11BE_MLO
  909. /* set peer type */
  910. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  911. ((_peer)->peer_type = (_type_val))
  912. /* is MLO connection link peer */
  913. #define IS_MLO_DP_LINK_PEER(_peer) \
  914. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  915. /* is MLO connection mld peer */
  916. #define IS_MLO_DP_MLD_PEER(_peer) \
  917. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  918. #ifdef WLAN_MLO_MULTI_CHIP
  919. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  920. struct dp_peer *
  921. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  922. uint8_t *peer_mac_addr,
  923. int mac_addr_is_aligned,
  924. uint8_t vdev_id,
  925. uint8_t chip_id,
  926. enum dp_mod_id mod_id);
  927. #else
  928. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  929. {
  930. return 0;
  931. }
  932. static inline struct dp_peer *
  933. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  934. uint8_t *peer_mac_addr,
  935. int mac_addr_is_aligned,
  936. uint8_t vdev_id,
  937. uint8_t chip_id,
  938. enum dp_mod_id mod_id)
  939. {
  940. return dp_peer_find_hash_find(soc, peer_mac_addr,
  941. mac_addr_is_aligned,
  942. vdev_id, mod_id);
  943. }
  944. #endif
  945. /**
  946. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  947. increase mld peer ref_cnt
  948. * @link_peer: link peer pointer
  949. * @mld_peer: mld peer pointer
  950. *
  951. * Return: none
  952. */
  953. static inline
  954. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  955. struct dp_peer *mld_peer)
  956. {
  957. /* increase mld_peer ref_cnt */
  958. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  959. link_peer->mld_peer = mld_peer;
  960. }
  961. /**
  962. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  963. decrease mld peer ref_cnt
  964. * @link_peer: link peer pointer
  965. *
  966. * Return: None
  967. */
  968. static inline
  969. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  970. {
  971. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  972. link_peer->mld_peer = NULL;
  973. }
  974. /**
  975. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  976. * @mld_peer: mld peer pointer
  977. *
  978. * Return: None
  979. */
  980. static inline
  981. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  982. {
  983. int i;
  984. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  985. mld_peer->num_links = 0;
  986. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  987. mld_peer->link_peers[i].is_valid = false;
  988. }
  989. /**
  990. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  991. * @mld_peer: mld peer pointer
  992. *
  993. * Return: None
  994. */
  995. static inline
  996. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  997. {
  998. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  999. }
  1000. /**
  1001. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1002. * @mld_peer: mld dp peer pointer
  1003. * @link_peer: link dp peer pointer
  1004. *
  1005. * Return: None
  1006. */
  1007. static inline
  1008. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1009. struct dp_peer *link_peer)
  1010. {
  1011. int i;
  1012. struct dp_peer_link_info *link_peer_info;
  1013. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1014. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1015. link_peer_info = &mld_peer->link_peers[i];
  1016. if (!link_peer_info->is_valid) {
  1017. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1018. link_peer->mac_addr.raw,
  1019. QDF_MAC_ADDR_SIZE);
  1020. link_peer_info->is_valid = true;
  1021. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1022. link_peer_info->chip_id =
  1023. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1024. mld_peer->num_links++;
  1025. break;
  1026. }
  1027. }
  1028. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1029. if (i == DP_MAX_MLO_LINKS)
  1030. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1031. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1032. }
  1033. /**
  1034. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1035. * @mld_peer: MLD dp peer pointer
  1036. * @link_peer: link dp peer pointer
  1037. *
  1038. * Return: number of links left after deletion
  1039. */
  1040. static inline
  1041. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1042. struct dp_peer *link_peer)
  1043. {
  1044. int i;
  1045. struct dp_peer_link_info *link_peer_info;
  1046. uint8_t num_links;
  1047. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1048. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1049. link_peer_info = &mld_peer->link_peers[i];
  1050. if (link_peer_info->is_valid &&
  1051. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1052. &link_peer_info->mac_addr)) {
  1053. link_peer_info->is_valid = false;
  1054. mld_peer->num_links--;
  1055. break;
  1056. }
  1057. }
  1058. num_links = mld_peer->num_links;
  1059. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1060. if (i == DP_MAX_MLO_LINKS)
  1061. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1062. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1063. return num_links;
  1064. }
  1065. /**
  1066. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1067. increase link peers ref_cnt
  1068. * @soc: dp_soc handle
  1069. * @mld_peer: dp mld peer pointer
  1070. * @mld_link_peers: structure that hold links peers ponter array and number
  1071. * @mod_id: id of module requesting reference
  1072. *
  1073. * Return: None
  1074. */
  1075. static inline
  1076. void dp_get_link_peers_ref_from_mld_peer(
  1077. struct dp_soc *soc,
  1078. struct dp_peer *mld_peer,
  1079. struct dp_mld_link_peers *mld_link_peers,
  1080. enum dp_mod_id mod_id)
  1081. {
  1082. struct dp_peer *peer;
  1083. uint8_t i = 0, j = 0;
  1084. struct dp_peer_link_info *link_peer_info;
  1085. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1086. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1087. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1088. link_peer_info = &mld_peer->link_peers[i];
  1089. if (link_peer_info->is_valid) {
  1090. peer = dp_link_peer_hash_find_by_chip_id(
  1091. soc,
  1092. link_peer_info->mac_addr.raw,
  1093. true,
  1094. link_peer_info->vdev_id,
  1095. link_peer_info->chip_id,
  1096. mod_id);
  1097. if (peer)
  1098. mld_link_peers->link_peers[j++] = peer;
  1099. }
  1100. }
  1101. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1102. mld_link_peers->num_links = j;
  1103. }
  1104. /**
  1105. * dp_release_link_peers_ref() - release all link peers reference
  1106. * @mld_link_peers: structure that hold links peers ponter array and number
  1107. * @mod_id: id of module requesting reference
  1108. *
  1109. * Return: None.
  1110. */
  1111. static inline
  1112. void dp_release_link_peers_ref(
  1113. struct dp_mld_link_peers *mld_link_peers,
  1114. enum dp_mod_id mod_id)
  1115. {
  1116. struct dp_peer *peer;
  1117. uint8_t i;
  1118. for (i = 0; i < mld_link_peers->num_links; i++) {
  1119. peer = mld_link_peers->link_peers[i];
  1120. if (peer)
  1121. dp_peer_unref_delete(peer, mod_id);
  1122. mld_link_peers->link_peers[i] = NULL;
  1123. }
  1124. mld_link_peers->num_links = 0;
  1125. }
  1126. /**
  1127. * dp_peer_get_tgt_peer_hash_find() - get MLD dp_peer handle
  1128. for processing
  1129. * @soc: soc handle
  1130. * @peer_mac_addr: peer mac address
  1131. * @mac_addr_is_aligned: is mac addr alligned
  1132. * @vdev_id: vdev_id
  1133. * @mod_id: id of module requesting reference
  1134. *
  1135. * for MLO connection, get corresponding MLD peer,
  1136. * otherwise get link peer for non-MLO case.
  1137. *
  1138. * return: peer in success
  1139. * NULL in failure
  1140. */
  1141. static inline
  1142. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1143. uint8_t *peer_mac,
  1144. int mac_addr_is_aligned,
  1145. uint8_t vdev_id,
  1146. enum dp_mod_id mod_id)
  1147. {
  1148. struct dp_peer *ta_peer = NULL;
  1149. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1150. peer_mac, 0, vdev_id,
  1151. mod_id);
  1152. if (peer) {
  1153. /* mlo connection link peer, get mld peer with reference */
  1154. if (IS_MLO_DP_LINK_PEER(peer)) {
  1155. /* increase mld peer ref_cnt */
  1156. if (QDF_STATUS_SUCCESS ==
  1157. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1158. ta_peer = peer->mld_peer;
  1159. else
  1160. ta_peer = NULL;
  1161. /* relese peer reference that added by hash find */
  1162. dp_peer_unref_delete(peer, mod_id);
  1163. } else {
  1164. /* mlo MLD peer or non-mlo link peer */
  1165. ta_peer = peer;
  1166. }
  1167. }
  1168. return ta_peer;
  1169. }
  1170. /**
  1171. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1172. * @soc : core DP soc context
  1173. * @peer_id : peer id from peer object can be retrieved
  1174. * @mod_id : ID ot module requesting reference
  1175. *
  1176. * for MLO connection, get corresponding MLD peer,
  1177. * otherwise get link peer for non-MLO case.
  1178. *
  1179. * return: peer in success
  1180. * NULL in failure
  1181. */
  1182. static inline
  1183. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1184. uint16_t peer_id,
  1185. enum dp_mod_id mod_id)
  1186. {
  1187. struct dp_peer *ta_peer = NULL;
  1188. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1189. if (peer) {
  1190. /* mlo connection link peer, get mld peer with reference */
  1191. if (IS_MLO_DP_LINK_PEER(peer)) {
  1192. /* increase mld peer ref_cnt */
  1193. if (QDF_STATUS_SUCCESS ==
  1194. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1195. ta_peer = peer->mld_peer;
  1196. else
  1197. ta_peer = NULL;
  1198. /* relese peer reference that added by hash find */
  1199. dp_peer_unref_delete(peer, mod_id);
  1200. } else {
  1201. /* mlo MLD peer or non-mlo link peer */
  1202. ta_peer = peer;
  1203. }
  1204. }
  1205. return ta_peer;
  1206. }
  1207. /**
  1208. * dp_peer_mlo_delete() - peer MLO related delete operation
  1209. * @soc: Soc handle
  1210. * @peer: DP peer handle
  1211. * Return: None
  1212. */
  1213. static inline
  1214. void dp_peer_mlo_delete(struct dp_soc *soc,
  1215. struct dp_peer *peer)
  1216. {
  1217. /* MLO connection link peer */
  1218. if (IS_MLO_DP_LINK_PEER(peer)) {
  1219. /* if last link peer deletion, delete MLD peer */
  1220. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1221. dp_peer_delete(soc, peer->mld_peer, NULL);
  1222. }
  1223. }
  1224. /**
  1225. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1226. * @soc: Soc handle
  1227. * @vdev_id: Vdev ID
  1228. * @peer_setup_info: peer setup information for MLO
  1229. */
  1230. QDF_STATUS dp_peer_mlo_setup(
  1231. struct dp_soc *soc,
  1232. struct dp_peer *peer,
  1233. uint8_t vdev_id,
  1234. struct cdp_peer_setup_info *setup_info);
  1235. #else
  1236. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1237. #define IS_MLO_DP_LINK_PEER(_peer) false
  1238. #define IS_MLO_DP_MLD_PEER(_peer) false
  1239. static inline
  1240. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1241. uint8_t *peer_mac,
  1242. int mac_addr_is_aligned,
  1243. uint8_t vdev_id,
  1244. enum dp_mod_id mod_id)
  1245. {
  1246. return dp_peer_find_hash_find(soc, peer_mac,
  1247. mac_addr_is_aligned, vdev_id,
  1248. mod_id);
  1249. }
  1250. static inline
  1251. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1252. uint16_t peer_id,
  1253. enum dp_mod_id mod_id)
  1254. {
  1255. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1256. }
  1257. static inline
  1258. QDF_STATUS dp_peer_mlo_setup(
  1259. struct dp_soc *soc,
  1260. struct dp_peer *peer,
  1261. uint8_t vdev_id,
  1262. struct cdp_peer_setup_info *setup_info)
  1263. {
  1264. return QDF_STATUS_SUCCESS;
  1265. }
  1266. static inline
  1267. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1268. {
  1269. }
  1270. static inline
  1271. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1272. {
  1273. }
  1274. static inline
  1275. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1276. {
  1277. }
  1278. static inline
  1279. void dp_peer_mlo_delete(struct dp_soc *soc,
  1280. struct dp_peer *peer)
  1281. {
  1282. }
  1283. static inline
  1284. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1285. struct dp_peer *link_peer)
  1286. {
  1287. }
  1288. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1289. {
  1290. return 0;
  1291. }
  1292. static inline struct dp_peer *
  1293. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1294. uint8_t *peer_mac_addr,
  1295. int mac_addr_is_aligned,
  1296. uint8_t vdev_id,
  1297. uint8_t chip_id,
  1298. enum dp_mod_id mod_id)
  1299. {
  1300. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1301. mac_addr_is_aligned,
  1302. vdev_id, mod_id);
  1303. }
  1304. #endif /* WLAN_FEATURE_11BE_MLO */
  1305. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  1306. /**
  1307. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  1308. * @soc: Soc handle
  1309. * @peer: DP peer handle for ML peer
  1310. * @peer_id: peer_id
  1311. * Return: None
  1312. */
  1313. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1314. struct dp_peer *peer,
  1315. uint16_t peer_id);
  1316. /**
  1317. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  1318. * @soc: Soc handle
  1319. * @peer_id: peer_id
  1320. * Return: None
  1321. */
  1322. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1323. uint16_t peer_id);
  1324. #else
  1325. static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1326. struct dp_peer *peer,
  1327. uint16_t peer_id)
  1328. {
  1329. }
  1330. static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1331. uint16_t peer_id)
  1332. {
  1333. }
  1334. #endif
  1335. static inline
  1336. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1337. {
  1338. uint8_t i;
  1339. if (IS_MLO_DP_MLD_PEER(peer)) {
  1340. dp_peer_info("skip for mld peer");
  1341. return QDF_STATUS_SUCCESS;
  1342. }
  1343. if (peer->rx_tid) {
  1344. QDF_BUG(0);
  1345. dp_peer_err("peer rx_tid mem already exist");
  1346. return QDF_STATUS_E_FAILURE;
  1347. }
  1348. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1349. sizeof(struct dp_rx_tid));
  1350. if (!peer->rx_tid) {
  1351. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1352. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1353. return QDF_STATUS_E_NOMEM;
  1354. }
  1355. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1356. for (i = 0; i < DP_MAX_TIDS; i++)
  1357. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1358. return QDF_STATUS_SUCCESS;
  1359. }
  1360. static inline
  1361. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1362. {
  1363. uint8_t i;
  1364. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1365. for (i = 0; i < DP_MAX_TIDS; i++)
  1366. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1367. qdf_mem_free(peer->rx_tid);
  1368. }
  1369. peer->rx_tid = NULL;
  1370. }
  1371. #endif /* _DP_PEER_H_ */