dp_peer.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #define DP_INVALID_PEER_ID 0xffff
  24. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  25. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  26. void *arg);
  27. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  28. /**
  29. * dp_peer_get_ref() - Returns peer object given the peer id
  30. *
  31. * @soc : core DP soc context
  32. * @peer : DP peer
  33. * @mod_id : id of module requesting the reference
  34. *
  35. * Return: QDF_STATUS_SUCCESS if reference held successfully
  36. * else QDF_STATUS_E_INVAL
  37. */
  38. static inline
  39. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  40. struct dp_peer *peer,
  41. enum dp_mod_id mod_id)
  42. {
  43. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  44. return QDF_STATUS_E_INVAL;
  45. if (mod_id > DP_MOD_ID_RX)
  46. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  47. return QDF_STATUS_SUCCESS;
  48. }
  49. /**
  50. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  51. *
  52. * @soc : core DP soc context
  53. * @peer_id : peer id from peer object can be retrieved
  54. * @mod_id : module id
  55. *
  56. * Return: struct dp_peer*: Pointer to DP peer object
  57. */
  58. static inline struct dp_peer *
  59. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  60. uint16_t peer_id,
  61. enum dp_mod_id mod_id)
  62. {
  63. struct dp_peer *peer;
  64. qdf_spin_lock_bh(&soc->peer_map_lock);
  65. peer = (peer_id >= soc->max_peers) ? NULL :
  66. soc->peer_id_to_obj_map[peer_id];
  67. if (!peer ||
  68. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  69. qdf_spin_unlock_bh(&soc->peer_map_lock);
  70. return NULL;
  71. }
  72. qdf_spin_unlock_bh(&soc->peer_map_lock);
  73. return peer;
  74. }
  75. /**
  76. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  77. * if peer state is active
  78. *
  79. * @soc : core DP soc context
  80. * @peer_id : peer id from peer object can be retrieved
  81. * @mod_id : ID ot module requesting reference
  82. *
  83. * Return: struct dp_peer*: Pointer to DP peer object
  84. */
  85. static inline
  86. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  87. uint16_t peer_id,
  88. enum dp_mod_id mod_id)
  89. {
  90. struct dp_peer *peer;
  91. qdf_spin_lock_bh(&soc->peer_map_lock);
  92. peer = (peer_id >= soc->max_peers) ? NULL :
  93. soc->peer_id_to_obj_map[peer_id];
  94. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  95. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  96. qdf_spin_unlock_bh(&soc->peer_map_lock);
  97. return NULL;
  98. }
  99. qdf_spin_unlock_bh(&soc->peer_map_lock);
  100. return peer;
  101. }
  102. #ifdef PEER_CACHE_RX_PKTS
  103. /**
  104. * dp_rx_flush_rx_cached() - flush cached rx frames
  105. * @peer: peer
  106. * @drop: set flag to drop frames
  107. *
  108. * Return: None
  109. */
  110. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  111. #else
  112. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  113. {
  114. }
  115. #endif
  116. static inline void
  117. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  118. {
  119. qdf_spin_lock_bh(&peer->peer_info_lock);
  120. peer->state = OL_TXRX_PEER_STATE_DISC;
  121. qdf_spin_unlock_bh(&peer->peer_info_lock);
  122. dp_rx_flush_rx_cached(peer, true);
  123. }
  124. /**
  125. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  126. *
  127. * @vdev : DP vdev context
  128. * @func : function to be called for each peer
  129. * @arg : argument need to be passed to func
  130. * @mod_id : module_id
  131. *
  132. * Return: void
  133. */
  134. static inline void
  135. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  136. enum dp_mod_id mod_id)
  137. {
  138. struct dp_peer *peer;
  139. struct dp_peer *tmp_peer;
  140. struct dp_soc *soc = NULL;
  141. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  142. return;
  143. soc = vdev->pdev->soc;
  144. qdf_spin_lock_bh(&vdev->peer_list_lock);
  145. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  146. peer_list_elem,
  147. tmp_peer) {
  148. if (dp_peer_get_ref(soc, peer, mod_id) ==
  149. QDF_STATUS_SUCCESS) {
  150. (*func)(soc, peer, arg);
  151. dp_peer_unref_delete(peer, mod_id);
  152. }
  153. }
  154. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  155. }
  156. /**
  157. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  158. *
  159. * @pdev : DP pdev context
  160. * @func : function to be called for each peer
  161. * @arg : argument need to be passed to func
  162. * @mod_id : module_id
  163. *
  164. * Return: void
  165. */
  166. static inline void
  167. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  168. enum dp_mod_id mod_id)
  169. {
  170. struct dp_vdev *vdev;
  171. if (!pdev)
  172. return;
  173. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  174. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  175. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  176. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  177. }
  178. /**
  179. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  180. *
  181. * @soc : DP soc context
  182. * @func : function to be called for each peer
  183. * @arg : argument need to be passed to func
  184. * @mod_id : module_id
  185. *
  186. * Return: void
  187. */
  188. static inline void
  189. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  190. enum dp_mod_id mod_id)
  191. {
  192. struct dp_pdev *pdev;
  193. int i;
  194. if (!soc)
  195. return;
  196. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  197. pdev = soc->pdev_list[i];
  198. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  199. }
  200. }
  201. /**
  202. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  203. *
  204. * This API will cache the peers in local allocated memory and calls
  205. * iterate function outside the lock.
  206. *
  207. * As this API is allocating new memory it is suggested to use this
  208. * only when lock cannot be held
  209. *
  210. * @vdev : DP vdev context
  211. * @func : function to be called for each peer
  212. * @arg : argument need to be passed to func
  213. * @mod_id : module_id
  214. *
  215. * Return: void
  216. */
  217. static inline void
  218. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  219. dp_peer_iter_func *func,
  220. void *arg,
  221. enum dp_mod_id mod_id)
  222. {
  223. struct dp_peer *peer;
  224. struct dp_peer *tmp_peer;
  225. struct dp_soc *soc = NULL;
  226. struct dp_peer **peer_array = NULL;
  227. int i = 0;
  228. uint32_t num_peers = 0;
  229. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  230. return;
  231. num_peers = vdev->num_peers;
  232. soc = vdev->pdev->soc;
  233. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  234. if (!peer_array)
  235. return;
  236. qdf_spin_lock_bh(&vdev->peer_list_lock);
  237. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  238. peer_list_elem,
  239. tmp_peer) {
  240. if (i >= num_peers)
  241. break;
  242. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  243. peer_array[i] = peer;
  244. i = (i + 1);
  245. }
  246. }
  247. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  248. for (i = 0; i < num_peers; i++) {
  249. peer = peer_array[i];
  250. if (!peer)
  251. continue;
  252. (*func)(soc, peer, arg);
  253. dp_peer_unref_delete(peer, mod_id);
  254. }
  255. qdf_mem_free(peer_array);
  256. }
  257. /**
  258. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  259. *
  260. * This API will cache the peers in local allocated memory and calls
  261. * iterate function outside the lock.
  262. *
  263. * As this API is allocating new memory it is suggested to use this
  264. * only when lock cannot be held
  265. *
  266. * @pdev : DP pdev context
  267. * @func : function to be called for each peer
  268. * @arg : argument need to be passed to func
  269. * @mod_id : module_id
  270. *
  271. * Return: void
  272. */
  273. static inline void
  274. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  275. dp_peer_iter_func *func,
  276. void *arg,
  277. enum dp_mod_id mod_id)
  278. {
  279. struct dp_peer *peer;
  280. struct dp_peer *tmp_peer;
  281. struct dp_soc *soc = NULL;
  282. struct dp_vdev *vdev = NULL;
  283. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  284. int i = 0;
  285. int j = 0;
  286. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  287. if (!pdev || !pdev->soc)
  288. return;
  289. soc = pdev->soc;
  290. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  291. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  292. num_peers[i] = vdev->num_peers;
  293. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  294. sizeof(struct dp_peer *));
  295. if (!peer_array[i])
  296. break;
  297. qdf_spin_lock_bh(&vdev->peer_list_lock);
  298. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  299. peer_list_elem,
  300. tmp_peer) {
  301. if (j >= num_peers[i])
  302. break;
  303. if (dp_peer_get_ref(soc, peer, mod_id) ==
  304. QDF_STATUS_SUCCESS) {
  305. peer_array[i][j] = peer;
  306. j = (j + 1);
  307. }
  308. }
  309. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  310. i = (i + 1);
  311. }
  312. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  313. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  314. if (!peer_array[i])
  315. break;
  316. for (j = 0; j < num_peers[i]; j++) {
  317. peer = peer_array[i][j];
  318. if (!peer)
  319. continue;
  320. (*func)(soc, peer, arg);
  321. dp_peer_unref_delete(peer, mod_id);
  322. }
  323. qdf_mem_free(peer_array[i]);
  324. }
  325. }
  326. /**
  327. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  328. *
  329. * This API will cache the peers in local allocated memory and calls
  330. * iterate function outside the lock.
  331. *
  332. * As this API is allocating new memory it is suggested to use this
  333. * only when lock cannot be held
  334. *
  335. * @soc : DP soc context
  336. * @func : function to be called for each peer
  337. * @arg : argument need to be passed to func
  338. * @mod_id : module_id
  339. *
  340. * Return: void
  341. */
  342. static inline void
  343. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  344. dp_peer_iter_func *func,
  345. void *arg,
  346. enum dp_mod_id mod_id)
  347. {
  348. struct dp_pdev *pdev;
  349. int i;
  350. if (!soc)
  351. return;
  352. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  353. pdev = soc->pdev_list[i];
  354. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  355. }
  356. }
  357. /**
  358. * dp_peer_update_state() - update dp peer state
  359. *
  360. * @soc : core DP soc context
  361. * @peer : DP peer
  362. * @state : new state
  363. *
  364. * Return: None
  365. */
  366. static inline void
  367. dp_peer_update_state(struct dp_soc *soc,
  368. struct dp_peer *peer,
  369. enum dp_peer_state state)
  370. {
  371. uint8_t peer_state = peer->peer_state;
  372. switch (state) {
  373. case DP_PEER_STATE_INIT:
  374. QDF_ASSERT
  375. ((peer_state != DP_PEER_STATE_ACTIVE) ||
  376. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  377. break;
  378. case DP_PEER_STATE_ACTIVE:
  379. QDF_ASSERT(peer_state == DP_PEER_STATE_INIT);
  380. break;
  381. case DP_PEER_STATE_LOGICAL_DELETE:
  382. QDF_ASSERT((peer_state == DP_PEER_STATE_ACTIVE) ||
  383. (peer_state == DP_PEER_STATE_INIT));
  384. break;
  385. case DP_PEER_STATE_INACTIVE:
  386. QDF_ASSERT(peer_state == DP_PEER_STATE_LOGICAL_DELETE);
  387. break;
  388. case DP_PEER_STATE_FREED:
  389. if (peer->sta_self_peer)
  390. QDF_ASSERT(peer_state ==
  391. DP_PEER_STATE_INIT);
  392. else
  393. QDF_ASSERT((peer_state ==
  394. DP_PEER_STATE_INACTIVE) ||
  395. (peer_state ==
  396. DP_PEER_STATE_LOGICAL_DELETE));
  397. break;
  398. default:
  399. QDF_ASSERT(0);
  400. break;
  401. }
  402. qdf_info("Updating peer state from %u to %u mac %pM\n",
  403. peer_state, state, peer->mac_addr.raw);
  404. peer->peer_state = state;
  405. }
  406. void dp_print_ast_stats(struct dp_soc *soc);
  407. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  408. uint16_t hw_peer_id, uint8_t vdev_id,
  409. uint8_t *peer_mac_addr, uint16_t ast_hash,
  410. uint8_t is_wds);
  411. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  412. uint8_t vdev_id, uint8_t *peer_mac_addr,
  413. uint8_t is_wds, uint32_t free_wds_count);
  414. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  415. enum cdp_sec_type sec_type, int is_unicast,
  416. u_int32_t *michael_key, u_int32_t *rx_pn);
  417. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  418. uint8_t tid, uint16_t win_sz);
  419. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  420. uint16_t peer_id, uint8_t *peer_mac);
  421. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  422. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  423. uint32_t flags);
  424. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  425. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  426. struct dp_ast_entry *ast_entry);
  427. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  428. struct dp_ast_entry *ast_entry, uint32_t flags);
  429. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  430. uint8_t *ast_mac_addr,
  431. uint8_t pdev_id);
  432. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  433. uint8_t *ast_mac_addr,
  434. uint8_t vdev_id);
  435. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  436. uint8_t *ast_mac_addr);
  437. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  438. struct dp_ast_entry *ast_entry);
  439. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  440. struct dp_ast_entry *ast_entry);
  441. void dp_peer_ast_set_type(struct dp_soc *soc,
  442. struct dp_ast_entry *ast_entry,
  443. enum cdp_txrx_ast_entry_type type);
  444. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  445. struct dp_ast_entry *ast_entry,
  446. struct dp_peer *peer);
  447. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  448. struct cdp_soc *dp_soc,
  449. void *cookie,
  450. enum cdp_ast_free_status status);
  451. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  452. struct dp_ast_entry *ase);
  453. void dp_peer_free_ast_entry(struct dp_soc *soc,
  454. struct dp_ast_entry *ast_entry);
  455. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  456. struct dp_ast_entry *ast_entry,
  457. struct dp_peer *peer);
  458. #define DP_AST_ASSERT(_condition) \
  459. do { \
  460. if (!(_condition)) { \
  461. dp_print_ast_stats(soc);\
  462. QDF_BUG(_condition); \
  463. } \
  464. } while (0)
  465. /**
  466. * dp_peer_update_inactive_time - Update inactive time for peer
  467. * @pdev: pdev object
  468. * @tag_type: htt_tlv_tag type
  469. * #tag_buf: buf message
  470. */
  471. void
  472. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  473. uint32_t *tag_buf);
  474. #ifndef QCA_MULTIPASS_SUPPORT
  475. /**
  476. * dp_peer_set_vlan_id: set vlan_id for this peer
  477. * @cdp_soc: soc handle
  478. * @vdev_id: id of vdev object
  479. * @peer_mac: mac address
  480. * @vlan_id: vlan id for peer
  481. *
  482. * return: void
  483. */
  484. static inline
  485. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  486. uint8_t vdev_id, uint8_t *peer_mac,
  487. uint16_t vlan_id)
  488. {
  489. }
  490. /**
  491. * dp_set_vlan_groupkey: set vlan map for vdev
  492. * @soc: pointer to soc
  493. * @vdev_id: id of vdev handle
  494. * @vlan_id: vlan_id
  495. * @group_key: group key for vlan
  496. *
  497. * return: set success/failure
  498. */
  499. static inline
  500. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  501. uint16_t vlan_id, uint16_t group_key)
  502. {
  503. return QDF_STATUS_SUCCESS;
  504. }
  505. /**
  506. * dp_peer_multipass_list_init: initialize multipass peer list
  507. * @vdev: pointer to vdev
  508. *
  509. * return: void
  510. */
  511. static inline
  512. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  513. {
  514. }
  515. /**
  516. * dp_peer_multipass_list_remove: remove peer from special peer list
  517. * @peer: peer handle
  518. *
  519. * return: void
  520. */
  521. static inline
  522. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  523. {
  524. }
  525. #else
  526. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  527. uint8_t vdev_id, uint8_t *peer_mac,
  528. uint16_t vlan_id);
  529. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  530. uint16_t vlan_id, uint16_t group_key);
  531. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  532. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  533. #endif
  534. #ifndef QCA_PEER_MULTIQ_SUPPORT
  535. /**
  536. * dp_peer_reset_flowq_map() - reset peer flowq map table
  537. * @peer - dp peer handle
  538. *
  539. * Return: none
  540. */
  541. static inline
  542. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  543. {
  544. }
  545. /**
  546. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  547. * @soc - genereic soc handle
  548. * @is_wds - flag to indicate if peer is wds
  549. * @peer_id - peer_id from htt peer map message
  550. * @peer_mac_addr - mac address of the peer
  551. * @ast_info - ast flow override information from peer map
  552. *
  553. * Return: none
  554. */
  555. static inline
  556. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  557. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  558. struct dp_ast_flow_override_info *ast_info)
  559. {
  560. }
  561. #else
  562. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  563. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  564. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  565. struct dp_ast_flow_override_info *ast_info);
  566. #endif
  567. /**
  568. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  569. * @soc: DP SOC handle
  570. * @pdev_id: id of DP pdev handle
  571. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  572. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  573. * Tx packet capture in monitor mode
  574. * Tx packet capture in monitor mode
  575. * @peer_mac: MAC address for which the above need to be enabled/disabled
  576. *
  577. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  578. */
  579. QDF_STATUS
  580. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  581. uint8_t pdev_id,
  582. bool is_rx_pkt_cap_enable,
  583. uint8_t is_tx_pkt_cap_enable,
  584. uint8_t *peer_mac);
  585. /*
  586. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  587. * after deleting the entries (ie., setting valid=0)
  588. *
  589. * @soc: DP SOC handle
  590. * @cb_ctxt: Callback context
  591. * @reo_status: REO command status
  592. */
  593. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  594. void *cb_ctxt,
  595. union hal_reo_status *reo_status);
  596. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  597. /**
  598. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  599. * @peer: Datapath peer
  600. *
  601. */
  602. static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
  603. {
  604. }
  605. /**
  606. * dp_peer_tid_peer_id_update() – update peer_id to tid structure
  607. * @peer: Datapath peer
  608. * @peer_id: peer_id
  609. *
  610. */
  611. static inline
  612. void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
  613. {
  614. }
  615. /**
  616. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  617. * @peer: Datapath peer
  618. *
  619. */
  620. static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  621. {
  622. }
  623. /**
  624. * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
  625. * @vdev: Datapath vdev
  626. * @peer: Datapath peer
  627. *
  628. */
  629. static inline void
  630. dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  631. {
  632. }
  633. #endif
  634. #ifdef QCA_PEER_EXT_STATS
  635. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  636. struct dp_peer *peer);
  637. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  638. struct dp_peer *peer);
  639. #else
  640. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  641. struct dp_peer *peer)
  642. {
  643. return QDF_STATUS_SUCCESS;
  644. }
  645. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  646. struct dp_peer *peer)
  647. {
  648. }
  649. #endif
  650. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  651. struct dp_vdev *vdev,
  652. enum dp_mod_id mod_id);
  653. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  654. struct dp_vdev *vdev,
  655. enum dp_mod_id mod_id);
  656. #endif /* _DP_PEER_H_ */