dp_peer.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #define DP_INVALID_PEER_ID 0xffff
  24. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  25. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  26. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  27. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  28. #define dp_peer_info(params...) \
  29. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  30. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  31. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  32. void *arg);
  33. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  34. /**
  35. * dp_peer_get_ref() - Returns peer object given the peer id
  36. *
  37. * @soc : core DP soc context
  38. * @peer : DP peer
  39. * @mod_id : id of module requesting the reference
  40. *
  41. * Return: QDF_STATUS_SUCCESS if reference held successfully
  42. * else QDF_STATUS_E_INVAL
  43. */
  44. static inline
  45. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  46. struct dp_peer *peer,
  47. enum dp_mod_id mod_id)
  48. {
  49. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  50. return QDF_STATUS_E_INVAL;
  51. if (mod_id > DP_MOD_ID_RX)
  52. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  53. return QDF_STATUS_SUCCESS;
  54. }
  55. /**
  56. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  57. *
  58. * @soc : core DP soc context
  59. * @peer_id : peer id from peer object can be retrieved
  60. * @mod_id : module id
  61. *
  62. * Return: struct dp_peer*: Pointer to DP peer object
  63. */
  64. static inline struct dp_peer *
  65. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  66. uint16_t peer_id,
  67. enum dp_mod_id mod_id)
  68. {
  69. struct dp_peer *peer;
  70. qdf_spin_lock_bh(&soc->peer_map_lock);
  71. peer = (peer_id >= soc->max_peers) ? NULL :
  72. soc->peer_id_to_obj_map[peer_id];
  73. if (!peer ||
  74. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  75. qdf_spin_unlock_bh(&soc->peer_map_lock);
  76. return NULL;
  77. }
  78. qdf_spin_unlock_bh(&soc->peer_map_lock);
  79. return peer;
  80. }
  81. /**
  82. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  83. * if peer state is active
  84. *
  85. * @soc : core DP soc context
  86. * @peer_id : peer id from peer object can be retrieved
  87. * @mod_id : ID ot module requesting reference
  88. *
  89. * Return: struct dp_peer*: Pointer to DP peer object
  90. */
  91. static inline
  92. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  93. uint16_t peer_id,
  94. enum dp_mod_id mod_id)
  95. {
  96. struct dp_peer *peer;
  97. qdf_spin_lock_bh(&soc->peer_map_lock);
  98. peer = (peer_id >= soc->max_peers) ? NULL :
  99. soc->peer_id_to_obj_map[peer_id];
  100. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  101. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  102. qdf_spin_unlock_bh(&soc->peer_map_lock);
  103. return NULL;
  104. }
  105. qdf_spin_unlock_bh(&soc->peer_map_lock);
  106. return peer;
  107. }
  108. #ifdef PEER_CACHE_RX_PKTS
  109. /**
  110. * dp_rx_flush_rx_cached() - flush cached rx frames
  111. * @peer: peer
  112. * @drop: set flag to drop frames
  113. *
  114. * Return: None
  115. */
  116. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  117. #else
  118. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  119. {
  120. }
  121. #endif
  122. static inline void
  123. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  124. {
  125. qdf_spin_lock_bh(&peer->peer_info_lock);
  126. peer->state = OL_TXRX_PEER_STATE_DISC;
  127. qdf_spin_unlock_bh(&peer->peer_info_lock);
  128. dp_rx_flush_rx_cached(peer, true);
  129. }
  130. /**
  131. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  132. *
  133. * @vdev : DP vdev context
  134. * @func : function to be called for each peer
  135. * @arg : argument need to be passed to func
  136. * @mod_id : module_id
  137. *
  138. * Return: void
  139. */
  140. static inline void
  141. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  142. enum dp_mod_id mod_id)
  143. {
  144. struct dp_peer *peer;
  145. struct dp_peer *tmp_peer;
  146. struct dp_soc *soc = NULL;
  147. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  148. return;
  149. soc = vdev->pdev->soc;
  150. qdf_spin_lock_bh(&vdev->peer_list_lock);
  151. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  152. peer_list_elem,
  153. tmp_peer) {
  154. if (dp_peer_get_ref(soc, peer, mod_id) ==
  155. QDF_STATUS_SUCCESS) {
  156. (*func)(soc, peer, arg);
  157. dp_peer_unref_delete(peer, mod_id);
  158. }
  159. }
  160. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  161. }
  162. /**
  163. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  164. *
  165. * @pdev : DP pdev context
  166. * @func : function to be called for each peer
  167. * @arg : argument need to be passed to func
  168. * @mod_id : module_id
  169. *
  170. * Return: void
  171. */
  172. static inline void
  173. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  174. enum dp_mod_id mod_id)
  175. {
  176. struct dp_vdev *vdev;
  177. if (!pdev)
  178. return;
  179. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  180. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  181. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  182. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  183. }
  184. /**
  185. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  186. *
  187. * @soc : DP soc context
  188. * @func : function to be called for each peer
  189. * @arg : argument need to be passed to func
  190. * @mod_id : module_id
  191. *
  192. * Return: void
  193. */
  194. static inline void
  195. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  196. enum dp_mod_id mod_id)
  197. {
  198. struct dp_pdev *pdev;
  199. int i;
  200. if (!soc)
  201. return;
  202. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  203. pdev = soc->pdev_list[i];
  204. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  205. }
  206. }
  207. /**
  208. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  209. *
  210. * This API will cache the peers in local allocated memory and calls
  211. * iterate function outside the lock.
  212. *
  213. * As this API is allocating new memory it is suggested to use this
  214. * only when lock cannot be held
  215. *
  216. * @vdev : DP vdev context
  217. * @func : function to be called for each peer
  218. * @arg : argument need to be passed to func
  219. * @mod_id : module_id
  220. *
  221. * Return: void
  222. */
  223. static inline void
  224. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  225. dp_peer_iter_func *func,
  226. void *arg,
  227. enum dp_mod_id mod_id)
  228. {
  229. struct dp_peer *peer;
  230. struct dp_peer *tmp_peer;
  231. struct dp_soc *soc = NULL;
  232. struct dp_peer **peer_array = NULL;
  233. int i = 0;
  234. uint32_t num_peers = 0;
  235. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  236. return;
  237. num_peers = vdev->num_peers;
  238. soc = vdev->pdev->soc;
  239. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  240. if (!peer_array)
  241. return;
  242. qdf_spin_lock_bh(&vdev->peer_list_lock);
  243. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  244. peer_list_elem,
  245. tmp_peer) {
  246. if (i >= num_peers)
  247. break;
  248. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  249. peer_array[i] = peer;
  250. i = (i + 1);
  251. }
  252. }
  253. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  254. for (i = 0; i < num_peers; i++) {
  255. peer = peer_array[i];
  256. if (!peer)
  257. continue;
  258. (*func)(soc, peer, arg);
  259. dp_peer_unref_delete(peer, mod_id);
  260. }
  261. qdf_mem_free(peer_array);
  262. }
  263. /**
  264. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  265. *
  266. * This API will cache the peers in local allocated memory and calls
  267. * iterate function outside the lock.
  268. *
  269. * As this API is allocating new memory it is suggested to use this
  270. * only when lock cannot be held
  271. *
  272. * @pdev : DP pdev context
  273. * @func : function to be called for each peer
  274. * @arg : argument need to be passed to func
  275. * @mod_id : module_id
  276. *
  277. * Return: void
  278. */
  279. static inline void
  280. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  281. dp_peer_iter_func *func,
  282. void *arg,
  283. enum dp_mod_id mod_id)
  284. {
  285. struct dp_peer *peer;
  286. struct dp_peer *tmp_peer;
  287. struct dp_soc *soc = NULL;
  288. struct dp_vdev *vdev = NULL;
  289. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  290. int i = 0;
  291. int j = 0;
  292. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  293. if (!pdev || !pdev->soc)
  294. return;
  295. soc = pdev->soc;
  296. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  297. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  298. num_peers[i] = vdev->num_peers;
  299. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  300. sizeof(struct dp_peer *));
  301. if (!peer_array[i])
  302. break;
  303. qdf_spin_lock_bh(&vdev->peer_list_lock);
  304. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  305. peer_list_elem,
  306. tmp_peer) {
  307. if (j >= num_peers[i])
  308. break;
  309. if (dp_peer_get_ref(soc, peer, mod_id) ==
  310. QDF_STATUS_SUCCESS) {
  311. peer_array[i][j] = peer;
  312. j = (j + 1);
  313. }
  314. }
  315. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  316. i = (i + 1);
  317. }
  318. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  319. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  320. if (!peer_array[i])
  321. break;
  322. for (j = 0; j < num_peers[i]; j++) {
  323. peer = peer_array[i][j];
  324. if (!peer)
  325. continue;
  326. (*func)(soc, peer, arg);
  327. dp_peer_unref_delete(peer, mod_id);
  328. }
  329. qdf_mem_free(peer_array[i]);
  330. }
  331. }
  332. /**
  333. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  334. *
  335. * This API will cache the peers in local allocated memory and calls
  336. * iterate function outside the lock.
  337. *
  338. * As this API is allocating new memory it is suggested to use this
  339. * only when lock cannot be held
  340. *
  341. * @soc : DP soc context
  342. * @func : function to be called for each peer
  343. * @arg : argument need to be passed to func
  344. * @mod_id : module_id
  345. *
  346. * Return: void
  347. */
  348. static inline void
  349. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  350. dp_peer_iter_func *func,
  351. void *arg,
  352. enum dp_mod_id mod_id)
  353. {
  354. struct dp_pdev *pdev;
  355. int i;
  356. if (!soc)
  357. return;
  358. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  359. pdev = soc->pdev_list[i];
  360. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  361. }
  362. }
  363. #ifdef DP_PEER_STATE_DEBUG
  364. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  365. do { \
  366. if (!(_condition)) { \
  367. dp_alert("Invalid state shift from %u to %u peer " \
  368. QDF_MAC_ADDR_FMT, \
  369. (_peer)->peer_state, (_new_state), \
  370. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  371. QDF_ASSERT(0); \
  372. } \
  373. } while (0)
  374. #else
  375. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  376. do { \
  377. if (!(_condition)) { \
  378. dp_alert("Invalid state shift from %u to %u peer " \
  379. QDF_MAC_ADDR_FMT, \
  380. (_peer)->peer_state, (_new_state), \
  381. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  382. } \
  383. } while (0)
  384. #endif
  385. /**
  386. * dp_peer_state_cmp() - compare dp peer state
  387. *
  388. * @peer : DP peer
  389. * @state : state
  390. *
  391. * Return: true if state matches with peer state
  392. * false if it does not match
  393. */
  394. static inline bool
  395. dp_peer_state_cmp(struct dp_peer *peer,
  396. enum dp_peer_state state)
  397. {
  398. bool is_status_equal = false;
  399. qdf_spin_lock_bh(&peer->peer_state_lock);
  400. is_status_equal = (peer->peer_state == state);
  401. qdf_spin_unlock_bh(&peer->peer_state_lock);
  402. return is_status_equal;
  403. }
  404. /**
  405. * dp_peer_update_state() - update dp peer state
  406. *
  407. * @soc : core DP soc context
  408. * @peer : DP peer
  409. * @state : new state
  410. *
  411. * Return: None
  412. */
  413. static inline void
  414. dp_peer_update_state(struct dp_soc *soc,
  415. struct dp_peer *peer,
  416. enum dp_peer_state state)
  417. {
  418. uint8_t peer_state;
  419. qdf_spin_lock_bh(&peer->peer_state_lock);
  420. peer_state = peer->peer_state;
  421. switch (state) {
  422. case DP_PEER_STATE_INIT:
  423. DP_PEER_STATE_ASSERT
  424. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  425. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  426. break;
  427. case DP_PEER_STATE_ACTIVE:
  428. DP_PEER_STATE_ASSERT(peer, state,
  429. (peer_state == DP_PEER_STATE_INIT));
  430. break;
  431. case DP_PEER_STATE_LOGICAL_DELETE:
  432. DP_PEER_STATE_ASSERT(peer, state,
  433. (peer_state == DP_PEER_STATE_ACTIVE) ||
  434. (peer_state == DP_PEER_STATE_INIT));
  435. break;
  436. case DP_PEER_STATE_INACTIVE:
  437. DP_PEER_STATE_ASSERT
  438. (peer, state,
  439. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  440. break;
  441. case DP_PEER_STATE_FREED:
  442. if (peer->sta_self_peer)
  443. DP_PEER_STATE_ASSERT
  444. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  445. else
  446. DP_PEER_STATE_ASSERT
  447. (peer, state,
  448. (peer_state == DP_PEER_STATE_INACTIVE) ||
  449. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  450. break;
  451. default:
  452. qdf_spin_unlock_bh(&peer->peer_state_lock);
  453. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  454. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  455. return;
  456. }
  457. peer->peer_state = state;
  458. qdf_spin_unlock_bh(&peer->peer_state_lock);
  459. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  460. peer_state, state,
  461. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  462. }
  463. void dp_print_ast_stats(struct dp_soc *soc);
  464. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  465. uint16_t hw_peer_id, uint8_t vdev_id,
  466. uint8_t *peer_mac_addr, uint16_t ast_hash,
  467. uint8_t is_wds);
  468. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  469. uint8_t vdev_id, uint8_t *peer_mac_addr,
  470. uint8_t is_wds, uint32_t free_wds_count);
  471. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  472. enum cdp_sec_type sec_type, int is_unicast,
  473. u_int32_t *michael_key, u_int32_t *rx_pn);
  474. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  475. uint8_t tid, uint16_t win_sz);
  476. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  477. uint16_t peer_id, uint8_t *peer_mac);
  478. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  479. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  480. uint32_t flags);
  481. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  482. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  483. struct dp_ast_entry *ast_entry);
  484. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  485. struct dp_ast_entry *ast_entry, uint32_t flags);
  486. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  487. uint8_t *ast_mac_addr,
  488. uint8_t pdev_id);
  489. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  490. uint8_t *ast_mac_addr,
  491. uint8_t vdev_id);
  492. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  493. uint8_t *ast_mac_addr);
  494. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  495. struct dp_ast_entry *ast_entry);
  496. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  497. struct dp_ast_entry *ast_entry);
  498. void dp_peer_ast_set_type(struct dp_soc *soc,
  499. struct dp_ast_entry *ast_entry,
  500. enum cdp_txrx_ast_entry_type type);
  501. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  502. struct dp_ast_entry *ast_entry,
  503. struct dp_peer *peer);
  504. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  505. struct cdp_soc *dp_soc,
  506. void *cookie,
  507. enum cdp_ast_free_status status);
  508. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  509. struct dp_ast_entry *ase);
  510. void dp_peer_free_ast_entry(struct dp_soc *soc,
  511. struct dp_ast_entry *ast_entry);
  512. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  513. struct dp_ast_entry *ast_entry,
  514. struct dp_peer *peer);
  515. #define DP_AST_ASSERT(_condition) \
  516. do { \
  517. if (!(_condition)) { \
  518. dp_print_ast_stats(soc);\
  519. QDF_BUG(_condition); \
  520. } \
  521. } while (0)
  522. /**
  523. * dp_peer_update_inactive_time - Update inactive time for peer
  524. * @pdev: pdev object
  525. * @tag_type: htt_tlv_tag type
  526. * #tag_buf: buf message
  527. */
  528. void
  529. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  530. uint32_t *tag_buf);
  531. #ifndef QCA_MULTIPASS_SUPPORT
  532. /**
  533. * dp_peer_set_vlan_id: set vlan_id for this peer
  534. * @cdp_soc: soc handle
  535. * @vdev_id: id of vdev object
  536. * @peer_mac: mac address
  537. * @vlan_id: vlan id for peer
  538. *
  539. * return: void
  540. */
  541. static inline
  542. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  543. uint8_t vdev_id, uint8_t *peer_mac,
  544. uint16_t vlan_id)
  545. {
  546. }
  547. /**
  548. * dp_set_vlan_groupkey: set vlan map for vdev
  549. * @soc: pointer to soc
  550. * @vdev_id: id of vdev handle
  551. * @vlan_id: vlan_id
  552. * @group_key: group key for vlan
  553. *
  554. * return: set success/failure
  555. */
  556. static inline
  557. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  558. uint16_t vlan_id, uint16_t group_key)
  559. {
  560. return QDF_STATUS_SUCCESS;
  561. }
  562. /**
  563. * dp_peer_multipass_list_init: initialize multipass peer list
  564. * @vdev: pointer to vdev
  565. *
  566. * return: void
  567. */
  568. static inline
  569. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  570. {
  571. }
  572. /**
  573. * dp_peer_multipass_list_remove: remove peer from special peer list
  574. * @peer: peer handle
  575. *
  576. * return: void
  577. */
  578. static inline
  579. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  580. {
  581. }
  582. #else
  583. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  584. uint8_t vdev_id, uint8_t *peer_mac,
  585. uint16_t vlan_id);
  586. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  587. uint16_t vlan_id, uint16_t group_key);
  588. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  589. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  590. #endif
  591. #ifndef QCA_PEER_MULTIQ_SUPPORT
  592. /**
  593. * dp_peer_reset_flowq_map() - reset peer flowq map table
  594. * @peer - dp peer handle
  595. *
  596. * Return: none
  597. */
  598. static inline
  599. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  600. {
  601. }
  602. /**
  603. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  604. * @soc - genereic soc handle
  605. * @is_wds - flag to indicate if peer is wds
  606. * @peer_id - peer_id from htt peer map message
  607. * @peer_mac_addr - mac address of the peer
  608. * @ast_info - ast flow override information from peer map
  609. *
  610. * Return: none
  611. */
  612. static inline
  613. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  614. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  615. struct dp_ast_flow_override_info *ast_info)
  616. {
  617. }
  618. #else
  619. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  620. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  621. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  622. struct dp_ast_flow_override_info *ast_info);
  623. #endif
  624. /**
  625. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  626. * @soc: DP SOC handle
  627. * @pdev_id: id of DP pdev handle
  628. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  629. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  630. * Tx packet capture in monitor mode
  631. * Tx packet capture in monitor mode
  632. * @peer_mac: MAC address for which the above need to be enabled/disabled
  633. *
  634. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  635. */
  636. QDF_STATUS
  637. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  638. uint8_t pdev_id,
  639. bool is_rx_pkt_cap_enable,
  640. uint8_t is_tx_pkt_cap_enable,
  641. uint8_t *peer_mac);
  642. /*
  643. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  644. * after deleting the entries (ie., setting valid=0)
  645. *
  646. * @soc: DP SOC handle
  647. * @cb_ctxt: Callback context
  648. * @reo_status: REO command status
  649. */
  650. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  651. void *cb_ctxt,
  652. union hal_reo_status *reo_status);
  653. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  654. /**
  655. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  656. * @peer: Datapath peer
  657. *
  658. */
  659. static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
  660. {
  661. }
  662. /**
  663. * dp_peer_tid_peer_id_update() – update peer_id to tid structure
  664. * @peer: Datapath peer
  665. * @peer_id: peer_id
  666. *
  667. */
  668. static inline
  669. void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
  670. {
  671. }
  672. /**
  673. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  674. * @peer: Datapath peer
  675. *
  676. */
  677. static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  678. {
  679. }
  680. /**
  681. * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
  682. * @vdev: Datapath vdev
  683. * @peer: Datapath peer
  684. *
  685. */
  686. static inline void
  687. dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  688. {
  689. }
  690. #endif
  691. #ifdef QCA_PEER_EXT_STATS
  692. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  693. struct dp_peer *peer);
  694. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  695. struct dp_peer *peer);
  696. #else
  697. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  698. struct dp_peer *peer)
  699. {
  700. return QDF_STATUS_SUCCESS;
  701. }
  702. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  703. struct dp_peer *peer)
  704. {
  705. }
  706. #endif
  707. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  708. struct dp_vdev *vdev,
  709. enum dp_mod_id mod_id);
  710. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  711. struct dp_vdev *vdev,
  712. enum dp_mod_id mod_id);
  713. #ifdef FEATURE_AST
  714. /*
  715. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  716. * @soc - datapath soc handle
  717. * @peer - datapath peer handle
  718. *
  719. * Delete the AST entries belonging to a peer
  720. */
  721. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  722. struct dp_peer *peer)
  723. {
  724. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  725. /*
  726. * Delete peer self ast entry. This is done to handle scenarios
  727. * where peer is freed before peer map is received(for ex in case
  728. * of auth disallow due to ACL) in such cases self ast is not added
  729. * to peer->ast_list.
  730. */
  731. if (peer->self_ast_entry) {
  732. dp_peer_del_ast(soc, peer->self_ast_entry);
  733. peer->self_ast_entry = NULL;
  734. }
  735. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  736. dp_peer_del_ast(soc, ast_entry);
  737. }
  738. #else
  739. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  740. struct dp_peer *peer)
  741. {
  742. }
  743. #endif
  744. #endif /* _DP_PEER_H_ */