dp_peer.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #define DP_INVALID_PEER_ID 0xffff
  24. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  25. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  26. void *arg);
  27. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  28. /**
  29. * dp_peer_get_ref() - Returns peer object given the peer id
  30. *
  31. * @soc : core DP soc context
  32. * @peer : DP peer
  33. * @mod_id : id of module requesting the reference
  34. *
  35. * Return: QDF_STATUS_SUCCESS if reference held successfully
  36. * else QDF_STATUS_E_INVAL
  37. */
  38. static inline
  39. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  40. struct dp_peer *peer,
  41. enum dp_mod_id mod_id)
  42. {
  43. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  44. return QDF_STATUS_E_INVAL;
  45. if (mod_id > DP_MOD_ID_RX)
  46. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  47. return QDF_STATUS_SUCCESS;
  48. }
  49. /**
  50. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  51. *
  52. * @soc : core DP soc context
  53. * @peer_id : peer id from peer object can be retrieved
  54. * @mod_id : module id
  55. *
  56. * Return: struct dp_peer*: Pointer to DP peer object
  57. */
  58. static inline struct dp_peer *
  59. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  60. uint16_t peer_id,
  61. enum dp_mod_id mod_id)
  62. {
  63. struct dp_peer *peer;
  64. qdf_spin_lock_bh(&soc->peer_map_lock);
  65. peer = (peer_id >= soc->max_peers) ? NULL :
  66. soc->peer_id_to_obj_map[peer_id];
  67. if (!peer ||
  68. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  69. qdf_spin_unlock_bh(&soc->peer_map_lock);
  70. return NULL;
  71. }
  72. qdf_spin_unlock_bh(&soc->peer_map_lock);
  73. return peer;
  74. }
  75. /**
  76. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  77. * if peer state is active
  78. *
  79. * @soc : core DP soc context
  80. * @peer_id : peer id from peer object can be retrieved
  81. * @mod_id : ID ot module requesting reference
  82. *
  83. * Return: struct dp_peer*: Pointer to DP peer object
  84. */
  85. static inline
  86. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  87. uint16_t peer_id,
  88. enum dp_mod_id mod_id)
  89. {
  90. struct dp_peer *peer;
  91. qdf_spin_lock_bh(&soc->peer_map_lock);
  92. peer = (peer_id >= soc->max_peers) ? NULL :
  93. soc->peer_id_to_obj_map[peer_id];
  94. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  95. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  96. qdf_spin_unlock_bh(&soc->peer_map_lock);
  97. return NULL;
  98. }
  99. qdf_spin_unlock_bh(&soc->peer_map_lock);
  100. return peer;
  101. }
  102. #ifdef PEER_CACHE_RX_PKTS
  103. /**
  104. * dp_rx_flush_rx_cached() - flush cached rx frames
  105. * @peer: peer
  106. * @drop: set flag to drop frames
  107. *
  108. * Return: None
  109. */
  110. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  111. #else
  112. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  113. {
  114. }
  115. #endif
  116. static inline void
  117. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  118. {
  119. qdf_spin_lock_bh(&peer->peer_info_lock);
  120. peer->state = OL_TXRX_PEER_STATE_DISC;
  121. qdf_spin_unlock_bh(&peer->peer_info_lock);
  122. dp_rx_flush_rx_cached(peer, true);
  123. }
  124. /**
  125. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  126. *
  127. * @vdev : DP vdev context
  128. * @func : function to be called for each peer
  129. * @arg : argument need to be passed to func
  130. * @mod_id : module_id
  131. *
  132. * Return: void
  133. */
  134. static inline void
  135. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  136. enum dp_mod_id mod_id)
  137. {
  138. struct dp_peer *peer;
  139. struct dp_peer *tmp_peer;
  140. struct dp_soc *soc = NULL;
  141. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  142. return;
  143. soc = vdev->pdev->soc;
  144. qdf_spin_lock_bh(&vdev->peer_list_lock);
  145. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  146. peer_list_elem,
  147. tmp_peer) {
  148. if (dp_peer_get_ref(soc, peer, mod_id) ==
  149. QDF_STATUS_SUCCESS) {
  150. (*func)(soc, peer, arg);
  151. dp_peer_unref_delete(peer, mod_id);
  152. }
  153. }
  154. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  155. }
  156. /**
  157. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  158. *
  159. * @pdev : DP pdev context
  160. * @func : function to be called for each peer
  161. * @arg : argument need to be passed to func
  162. * @mod_id : module_id
  163. *
  164. * Return: void
  165. */
  166. static inline void
  167. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  168. enum dp_mod_id mod_id)
  169. {
  170. struct dp_vdev *vdev;
  171. if (!pdev)
  172. return;
  173. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  174. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  175. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  176. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  177. }
  178. /**
  179. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  180. *
  181. * @soc : DP soc context
  182. * @func : function to be called for each peer
  183. * @arg : argument need to be passed to func
  184. * @mod_id : module_id
  185. *
  186. * Return: void
  187. */
  188. static inline void
  189. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  190. enum dp_mod_id mod_id)
  191. {
  192. struct dp_pdev *pdev;
  193. int i;
  194. if (!soc)
  195. return;
  196. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  197. pdev = soc->pdev_list[i];
  198. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  199. }
  200. }
  201. /**
  202. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  203. *
  204. * This API will cache the peers in local allocated memory and calls
  205. * iterate function outside the lock.
  206. *
  207. * As this API is allocating new memory it is suggested to use this
  208. * only when lock cannot be held
  209. *
  210. * @vdev : DP vdev context
  211. * @func : function to be called for each peer
  212. * @arg : argument need to be passed to func
  213. * @mod_id : module_id
  214. *
  215. * Return: void
  216. */
  217. static inline void
  218. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  219. dp_peer_iter_func *func,
  220. void *arg,
  221. enum dp_mod_id mod_id)
  222. {
  223. struct dp_peer *peer;
  224. struct dp_peer *tmp_peer;
  225. struct dp_soc *soc = NULL;
  226. struct dp_peer **peer_array = NULL;
  227. int i = 0;
  228. uint32_t num_peers = 0;
  229. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  230. return;
  231. num_peers = vdev->num_peers;
  232. soc = vdev->pdev->soc;
  233. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  234. if (!peer_array)
  235. return;
  236. qdf_spin_lock_bh(&vdev->peer_list_lock);
  237. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  238. peer_list_elem,
  239. tmp_peer) {
  240. if (i >= num_peers)
  241. break;
  242. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  243. peer_array[i] = peer;
  244. i = (i + 1);
  245. }
  246. }
  247. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  248. for (i = 0; i < num_peers; i++) {
  249. peer = peer_array[i];
  250. if (!peer)
  251. continue;
  252. (*func)(soc, peer, arg);
  253. dp_peer_unref_delete(peer, mod_id);
  254. }
  255. qdf_mem_free(peer_array);
  256. }
  257. /**
  258. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  259. *
  260. * This API will cache the peers in local allocated memory and calls
  261. * iterate function outside the lock.
  262. *
  263. * As this API is allocating new memory it is suggested to use this
  264. * only when lock cannot be held
  265. *
  266. * @pdev : DP pdev context
  267. * @func : function to be called for each peer
  268. * @arg : argument need to be passed to func
  269. * @mod_id : module_id
  270. *
  271. * Return: void
  272. */
  273. static inline void
  274. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  275. dp_peer_iter_func *func,
  276. void *arg,
  277. enum dp_mod_id mod_id)
  278. {
  279. struct dp_peer *peer;
  280. struct dp_peer *tmp_peer;
  281. struct dp_soc *soc = NULL;
  282. struct dp_vdev *vdev = NULL;
  283. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  284. int i = 0;
  285. int j = 0;
  286. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  287. if (!pdev || !pdev->soc)
  288. return;
  289. soc = pdev->soc;
  290. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  291. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  292. num_peers[i] = vdev->num_peers;
  293. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  294. sizeof(struct dp_peer *));
  295. if (!peer_array[i])
  296. break;
  297. qdf_spin_lock_bh(&vdev->peer_list_lock);
  298. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  299. peer_list_elem,
  300. tmp_peer) {
  301. if (j >= num_peers[i])
  302. break;
  303. if (dp_peer_get_ref(soc, peer, mod_id) ==
  304. QDF_STATUS_SUCCESS) {
  305. peer_array[i][j] = peer;
  306. j = (j + 1);
  307. }
  308. }
  309. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  310. i = (i + 1);
  311. }
  312. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  313. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  314. if (!peer_array[i])
  315. break;
  316. for (j = 0; j < num_peers[i]; j++) {
  317. peer = peer_array[i][j];
  318. if (!peer)
  319. continue;
  320. (*func)(soc, peer, arg);
  321. dp_peer_unref_delete(peer, mod_id);
  322. }
  323. qdf_mem_free(peer_array[i]);
  324. }
  325. }
  326. /**
  327. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  328. *
  329. * This API will cache the peers in local allocated memory and calls
  330. * iterate function outside the lock.
  331. *
  332. * As this API is allocating new memory it is suggested to use this
  333. * only when lock cannot be held
  334. *
  335. * @soc : DP soc context
  336. * @func : function to be called for each peer
  337. * @arg : argument need to be passed to func
  338. * @mod_id : module_id
  339. *
  340. * Return: void
  341. */
  342. static inline void
  343. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  344. dp_peer_iter_func *func,
  345. void *arg,
  346. enum dp_mod_id mod_id)
  347. {
  348. struct dp_pdev *pdev;
  349. int i;
  350. if (!soc)
  351. return;
  352. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  353. pdev = soc->pdev_list[i];
  354. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  355. }
  356. }
  357. #ifdef DP_PEER_STATE_DEBUG
  358. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  359. do { \
  360. if (!(_condition)) { \
  361. dp_alert("Invalid state shift from %u to %u peer " \
  362. QDF_MAC_ADDR_FMT, \
  363. (_peer)->peer_state, (_new_state), \
  364. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  365. QDF_ASSERT(0); \
  366. } \
  367. } while (0)
  368. #else
  369. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  370. do { \
  371. if (!(_condition)) { \
  372. dp_alert("Invalid state shift from %u to %u peer " \
  373. QDF_MAC_ADDR_FMT, \
  374. (_peer)->peer_state, (_new_state), \
  375. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  376. } \
  377. } while (0)
  378. #endif
  379. /**
  380. * dp_peer_state_cmp() - compare dp peer state
  381. *
  382. * @peer : DP peer
  383. * @state : state
  384. *
  385. * Return: true if state matches with peer state
  386. * false if it does not match
  387. */
  388. static inline bool
  389. dp_peer_state_cmp(struct dp_peer *peer,
  390. enum dp_peer_state state)
  391. {
  392. bool is_status_equal = false;
  393. qdf_spin_lock_bh(&peer->peer_state_lock);
  394. is_status_equal = (peer->peer_state == state);
  395. qdf_spin_unlock_bh(&peer->peer_state_lock);
  396. return is_status_equal;
  397. }
  398. /**
  399. * dp_peer_update_state() - update dp peer state
  400. *
  401. * @soc : core DP soc context
  402. * @peer : DP peer
  403. * @state : new state
  404. *
  405. * Return: None
  406. */
  407. static inline void
  408. dp_peer_update_state(struct dp_soc *soc,
  409. struct dp_peer *peer,
  410. enum dp_peer_state state)
  411. {
  412. uint8_t peer_state;
  413. qdf_spin_lock_bh(&peer->peer_state_lock);
  414. peer_state = peer->peer_state;
  415. switch (state) {
  416. case DP_PEER_STATE_INIT:
  417. DP_PEER_STATE_ASSERT
  418. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  419. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  420. break;
  421. case DP_PEER_STATE_ACTIVE:
  422. DP_PEER_STATE_ASSERT(peer, state,
  423. (peer_state == DP_PEER_STATE_INIT));
  424. break;
  425. case DP_PEER_STATE_LOGICAL_DELETE:
  426. DP_PEER_STATE_ASSERT(peer, state,
  427. (peer_state == DP_PEER_STATE_ACTIVE) ||
  428. (peer_state == DP_PEER_STATE_INIT));
  429. break;
  430. case DP_PEER_STATE_INACTIVE:
  431. DP_PEER_STATE_ASSERT
  432. (peer, state,
  433. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  434. break;
  435. case DP_PEER_STATE_FREED:
  436. if (peer->sta_self_peer)
  437. DP_PEER_STATE_ASSERT
  438. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  439. else
  440. DP_PEER_STATE_ASSERT
  441. (peer, state,
  442. (peer_state == DP_PEER_STATE_INACTIVE) ||
  443. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  444. break;
  445. default:
  446. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  447. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  448. qdf_spin_unlock_bh(&peer->peer_state_lock);
  449. return;
  450. }
  451. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  452. peer_state, state,
  453. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  454. peer->peer_state = state;
  455. qdf_spin_unlock_bh(&peer->peer_state_lock);
  456. }
  457. void dp_print_ast_stats(struct dp_soc *soc);
  458. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  459. uint16_t hw_peer_id, uint8_t vdev_id,
  460. uint8_t *peer_mac_addr, uint16_t ast_hash,
  461. uint8_t is_wds);
  462. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  463. uint8_t vdev_id, uint8_t *peer_mac_addr,
  464. uint8_t is_wds, uint32_t free_wds_count);
  465. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  466. enum cdp_sec_type sec_type, int is_unicast,
  467. u_int32_t *michael_key, u_int32_t *rx_pn);
  468. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  469. uint8_t tid, uint16_t win_sz);
  470. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  471. uint16_t peer_id, uint8_t *peer_mac);
  472. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  473. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  474. uint32_t flags);
  475. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  476. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  477. struct dp_ast_entry *ast_entry);
  478. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  479. struct dp_ast_entry *ast_entry, uint32_t flags);
  480. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  481. uint8_t *ast_mac_addr,
  482. uint8_t pdev_id);
  483. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  484. uint8_t *ast_mac_addr,
  485. uint8_t vdev_id);
  486. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  487. uint8_t *ast_mac_addr);
  488. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  489. struct dp_ast_entry *ast_entry);
  490. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  491. struct dp_ast_entry *ast_entry);
  492. void dp_peer_ast_set_type(struct dp_soc *soc,
  493. struct dp_ast_entry *ast_entry,
  494. enum cdp_txrx_ast_entry_type type);
  495. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  496. struct dp_ast_entry *ast_entry,
  497. struct dp_peer *peer);
  498. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  499. struct cdp_soc *dp_soc,
  500. void *cookie,
  501. enum cdp_ast_free_status status);
  502. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  503. struct dp_ast_entry *ase);
  504. void dp_peer_free_ast_entry(struct dp_soc *soc,
  505. struct dp_ast_entry *ast_entry);
  506. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  507. struct dp_ast_entry *ast_entry,
  508. struct dp_peer *peer);
  509. #define DP_AST_ASSERT(_condition) \
  510. do { \
  511. if (!(_condition)) { \
  512. dp_print_ast_stats(soc);\
  513. QDF_BUG(_condition); \
  514. } \
  515. } while (0)
  516. /**
  517. * dp_peer_update_inactive_time - Update inactive time for peer
  518. * @pdev: pdev object
  519. * @tag_type: htt_tlv_tag type
  520. * #tag_buf: buf message
  521. */
  522. void
  523. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  524. uint32_t *tag_buf);
  525. #ifndef QCA_MULTIPASS_SUPPORT
  526. /**
  527. * dp_peer_set_vlan_id: set vlan_id for this peer
  528. * @cdp_soc: soc handle
  529. * @vdev_id: id of vdev object
  530. * @peer_mac: mac address
  531. * @vlan_id: vlan id for peer
  532. *
  533. * return: void
  534. */
  535. static inline
  536. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  537. uint8_t vdev_id, uint8_t *peer_mac,
  538. uint16_t vlan_id)
  539. {
  540. }
  541. /**
  542. * dp_set_vlan_groupkey: set vlan map for vdev
  543. * @soc: pointer to soc
  544. * @vdev_id: id of vdev handle
  545. * @vlan_id: vlan_id
  546. * @group_key: group key for vlan
  547. *
  548. * return: set success/failure
  549. */
  550. static inline
  551. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  552. uint16_t vlan_id, uint16_t group_key)
  553. {
  554. return QDF_STATUS_SUCCESS;
  555. }
  556. /**
  557. * dp_peer_multipass_list_init: initialize multipass peer list
  558. * @vdev: pointer to vdev
  559. *
  560. * return: void
  561. */
  562. static inline
  563. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  564. {
  565. }
  566. /**
  567. * dp_peer_multipass_list_remove: remove peer from special peer list
  568. * @peer: peer handle
  569. *
  570. * return: void
  571. */
  572. static inline
  573. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  574. {
  575. }
  576. #else
  577. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  578. uint8_t vdev_id, uint8_t *peer_mac,
  579. uint16_t vlan_id);
  580. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  581. uint16_t vlan_id, uint16_t group_key);
  582. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  583. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  584. #endif
  585. #ifndef QCA_PEER_MULTIQ_SUPPORT
  586. /**
  587. * dp_peer_reset_flowq_map() - reset peer flowq map table
  588. * @peer - dp peer handle
  589. *
  590. * Return: none
  591. */
  592. static inline
  593. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  594. {
  595. }
  596. /**
  597. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  598. * @soc - genereic soc handle
  599. * @is_wds - flag to indicate if peer is wds
  600. * @peer_id - peer_id from htt peer map message
  601. * @peer_mac_addr - mac address of the peer
  602. * @ast_info - ast flow override information from peer map
  603. *
  604. * Return: none
  605. */
  606. static inline
  607. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  608. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  609. struct dp_ast_flow_override_info *ast_info)
  610. {
  611. }
  612. #else
  613. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  614. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  615. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  616. struct dp_ast_flow_override_info *ast_info);
  617. #endif
  618. /**
  619. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  620. * @soc: DP SOC handle
  621. * @pdev_id: id of DP pdev handle
  622. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  623. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  624. * Tx packet capture in monitor mode
  625. * Tx packet capture in monitor mode
  626. * @peer_mac: MAC address for which the above need to be enabled/disabled
  627. *
  628. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  629. */
  630. QDF_STATUS
  631. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  632. uint8_t pdev_id,
  633. bool is_rx_pkt_cap_enable,
  634. uint8_t is_tx_pkt_cap_enable,
  635. uint8_t *peer_mac);
  636. /*
  637. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  638. * after deleting the entries (ie., setting valid=0)
  639. *
  640. * @soc: DP SOC handle
  641. * @cb_ctxt: Callback context
  642. * @reo_status: REO command status
  643. */
  644. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  645. void *cb_ctxt,
  646. union hal_reo_status *reo_status);
  647. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  648. /**
  649. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  650. * @peer: Datapath peer
  651. *
  652. */
  653. static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
  654. {
  655. }
  656. /**
  657. * dp_peer_tid_peer_id_update() – update peer_id to tid structure
  658. * @peer: Datapath peer
  659. * @peer_id: peer_id
  660. *
  661. */
  662. static inline
  663. void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
  664. {
  665. }
  666. /**
  667. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  668. * @peer: Datapath peer
  669. *
  670. */
  671. static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  672. {
  673. }
  674. /**
  675. * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
  676. * @vdev: Datapath vdev
  677. * @peer: Datapath peer
  678. *
  679. */
  680. static inline void
  681. dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  682. {
  683. }
  684. #endif
  685. #ifdef QCA_PEER_EXT_STATS
  686. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  687. struct dp_peer *peer);
  688. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  689. struct dp_peer *peer);
  690. #else
  691. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  692. struct dp_peer *peer)
  693. {
  694. return QDF_STATUS_SUCCESS;
  695. }
  696. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  697. struct dp_peer *peer)
  698. {
  699. }
  700. #endif
  701. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  702. struct dp_vdev *vdev,
  703. enum dp_mod_id mod_id);
  704. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  705. struct dp_vdev *vdev,
  706. enum dp_mod_id mod_id);
  707. #ifdef FEATURE_AST
  708. /*
  709. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  710. * @soc - datapath soc handle
  711. * @peer - datapath peer handle
  712. *
  713. * Delete the AST entries belonging to a peer
  714. */
  715. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  716. struct dp_peer *peer)
  717. {
  718. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  719. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  720. dp_peer_del_ast(soc, ast_entry);
  721. peer->self_ast_entry = NULL;
  722. }
  723. #else
  724. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  725. struct dp_peer *peer)
  726. {
  727. }
  728. #endif
  729. #endif /* _DP_PEER_H_ */