dp_peer.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #define DP_INVALID_PEER_ID 0xffff
  24. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  25. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  26. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  27. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  28. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  29. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  30. #define dp_peer_info(params...) \
  31. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  32. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  33. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  34. void *arg);
  35. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  36. /**
  37. * dp_peer_get_ref() - Returns peer object given the peer id
  38. *
  39. * @soc : core DP soc context
  40. * @peer : DP peer
  41. * @mod_id : id of module requesting the reference
  42. *
  43. * Return: QDF_STATUS_SUCCESS if reference held successfully
  44. * else QDF_STATUS_E_INVAL
  45. */
  46. static inline
  47. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  48. struct dp_peer *peer,
  49. enum dp_mod_id mod_id)
  50. {
  51. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  52. return QDF_STATUS_E_INVAL;
  53. if (mod_id > DP_MOD_ID_RX)
  54. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  55. return QDF_STATUS_SUCCESS;
  56. }
  57. /**
  58. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  59. *
  60. * @soc : core DP soc context
  61. * @peer_id : peer id from peer object can be retrieved
  62. * @mod_id : module id
  63. *
  64. * Return: struct dp_peer*: Pointer to DP peer object
  65. */
  66. static inline struct dp_peer *
  67. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  68. uint16_t peer_id,
  69. enum dp_mod_id mod_id)
  70. {
  71. struct dp_peer *peer;
  72. qdf_spin_lock_bh(&soc->peer_map_lock);
  73. peer = (peer_id >= soc->max_peers) ? NULL :
  74. soc->peer_id_to_obj_map[peer_id];
  75. if (!peer ||
  76. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  77. qdf_spin_unlock_bh(&soc->peer_map_lock);
  78. return NULL;
  79. }
  80. qdf_spin_unlock_bh(&soc->peer_map_lock);
  81. return peer;
  82. }
  83. /**
  84. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  85. * if peer state is active
  86. *
  87. * @soc : core DP soc context
  88. * @peer_id : peer id from peer object can be retrieved
  89. * @mod_id : ID ot module requesting reference
  90. *
  91. * Return: struct dp_peer*: Pointer to DP peer object
  92. */
  93. static inline
  94. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  95. uint16_t peer_id,
  96. enum dp_mod_id mod_id)
  97. {
  98. struct dp_peer *peer;
  99. qdf_spin_lock_bh(&soc->peer_map_lock);
  100. peer = (peer_id >= soc->max_peers) ? NULL :
  101. soc->peer_id_to_obj_map[peer_id];
  102. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  103. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  104. qdf_spin_unlock_bh(&soc->peer_map_lock);
  105. return NULL;
  106. }
  107. qdf_spin_unlock_bh(&soc->peer_map_lock);
  108. return peer;
  109. }
  110. #ifdef PEER_CACHE_RX_PKTS
  111. /**
  112. * dp_rx_flush_rx_cached() - flush cached rx frames
  113. * @peer: peer
  114. * @drop: set flag to drop frames
  115. *
  116. * Return: None
  117. */
  118. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  119. #else
  120. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  121. {
  122. }
  123. #endif
  124. static inline void
  125. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  126. {
  127. qdf_spin_lock_bh(&peer->peer_info_lock);
  128. peer->state = OL_TXRX_PEER_STATE_DISC;
  129. qdf_spin_unlock_bh(&peer->peer_info_lock);
  130. dp_rx_flush_rx_cached(peer, true);
  131. }
  132. /**
  133. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  134. *
  135. * @vdev : DP vdev context
  136. * @func : function to be called for each peer
  137. * @arg : argument need to be passed to func
  138. * @mod_id : module_id
  139. *
  140. * Return: void
  141. */
  142. static inline void
  143. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  144. enum dp_mod_id mod_id)
  145. {
  146. struct dp_peer *peer;
  147. struct dp_peer *tmp_peer;
  148. struct dp_soc *soc = NULL;
  149. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  150. return;
  151. soc = vdev->pdev->soc;
  152. qdf_spin_lock_bh(&vdev->peer_list_lock);
  153. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  154. peer_list_elem,
  155. tmp_peer) {
  156. if (dp_peer_get_ref(soc, peer, mod_id) ==
  157. QDF_STATUS_SUCCESS) {
  158. (*func)(soc, peer, arg);
  159. dp_peer_unref_delete(peer, mod_id);
  160. }
  161. }
  162. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  163. }
  164. /**
  165. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  166. *
  167. * @pdev : DP pdev context
  168. * @func : function to be called for each peer
  169. * @arg : argument need to be passed to func
  170. * @mod_id : module_id
  171. *
  172. * Return: void
  173. */
  174. static inline void
  175. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  176. enum dp_mod_id mod_id)
  177. {
  178. struct dp_vdev *vdev;
  179. if (!pdev)
  180. return;
  181. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  182. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  183. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  184. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  185. }
  186. /**
  187. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  188. *
  189. * @soc : DP soc context
  190. * @func : function to be called for each peer
  191. * @arg : argument need to be passed to func
  192. * @mod_id : module_id
  193. *
  194. * Return: void
  195. */
  196. static inline void
  197. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  198. enum dp_mod_id mod_id)
  199. {
  200. struct dp_pdev *pdev;
  201. int i;
  202. if (!soc)
  203. return;
  204. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  205. pdev = soc->pdev_list[i];
  206. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  207. }
  208. }
  209. /**
  210. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  211. *
  212. * This API will cache the peers in local allocated memory and calls
  213. * iterate function outside the lock.
  214. *
  215. * As this API is allocating new memory it is suggested to use this
  216. * only when lock cannot be held
  217. *
  218. * @vdev : DP vdev context
  219. * @func : function to be called for each peer
  220. * @arg : argument need to be passed to func
  221. * @mod_id : module_id
  222. *
  223. * Return: void
  224. */
  225. static inline void
  226. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  227. dp_peer_iter_func *func,
  228. void *arg,
  229. enum dp_mod_id mod_id)
  230. {
  231. struct dp_peer *peer;
  232. struct dp_peer *tmp_peer;
  233. struct dp_soc *soc = NULL;
  234. struct dp_peer **peer_array = NULL;
  235. int i = 0;
  236. uint32_t num_peers = 0;
  237. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  238. return;
  239. num_peers = vdev->num_peers;
  240. soc = vdev->pdev->soc;
  241. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  242. if (!peer_array)
  243. return;
  244. qdf_spin_lock_bh(&vdev->peer_list_lock);
  245. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  246. peer_list_elem,
  247. tmp_peer) {
  248. if (i >= num_peers)
  249. break;
  250. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  251. peer_array[i] = peer;
  252. i = (i + 1);
  253. }
  254. }
  255. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  256. for (i = 0; i < num_peers; i++) {
  257. peer = peer_array[i];
  258. if (!peer)
  259. continue;
  260. (*func)(soc, peer, arg);
  261. dp_peer_unref_delete(peer, mod_id);
  262. }
  263. qdf_mem_free(peer_array);
  264. }
  265. /**
  266. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  267. *
  268. * This API will cache the peers in local allocated memory and calls
  269. * iterate function outside the lock.
  270. *
  271. * As this API is allocating new memory it is suggested to use this
  272. * only when lock cannot be held
  273. *
  274. * @pdev : DP pdev context
  275. * @func : function to be called for each peer
  276. * @arg : argument need to be passed to func
  277. * @mod_id : module_id
  278. *
  279. * Return: void
  280. */
  281. static inline void
  282. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  283. dp_peer_iter_func *func,
  284. void *arg,
  285. enum dp_mod_id mod_id)
  286. {
  287. struct dp_peer *peer;
  288. struct dp_peer *tmp_peer;
  289. struct dp_soc *soc = NULL;
  290. struct dp_vdev *vdev = NULL;
  291. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  292. int i = 0;
  293. int j = 0;
  294. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  295. if (!pdev || !pdev->soc)
  296. return;
  297. soc = pdev->soc;
  298. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  299. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  300. num_peers[i] = vdev->num_peers;
  301. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  302. sizeof(struct dp_peer *));
  303. if (!peer_array[i])
  304. break;
  305. qdf_spin_lock_bh(&vdev->peer_list_lock);
  306. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  307. peer_list_elem,
  308. tmp_peer) {
  309. if (j >= num_peers[i])
  310. break;
  311. if (dp_peer_get_ref(soc, peer, mod_id) ==
  312. QDF_STATUS_SUCCESS) {
  313. peer_array[i][j] = peer;
  314. j = (j + 1);
  315. }
  316. }
  317. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  318. i = (i + 1);
  319. }
  320. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  321. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  322. if (!peer_array[i])
  323. break;
  324. for (j = 0; j < num_peers[i]; j++) {
  325. peer = peer_array[i][j];
  326. if (!peer)
  327. continue;
  328. (*func)(soc, peer, arg);
  329. dp_peer_unref_delete(peer, mod_id);
  330. }
  331. qdf_mem_free(peer_array[i]);
  332. }
  333. }
  334. /**
  335. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  336. *
  337. * This API will cache the peers in local allocated memory and calls
  338. * iterate function outside the lock.
  339. *
  340. * As this API is allocating new memory it is suggested to use this
  341. * only when lock cannot be held
  342. *
  343. * @soc : DP soc context
  344. * @func : function to be called for each peer
  345. * @arg : argument need to be passed to func
  346. * @mod_id : module_id
  347. *
  348. * Return: void
  349. */
  350. static inline void
  351. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  352. dp_peer_iter_func *func,
  353. void *arg,
  354. enum dp_mod_id mod_id)
  355. {
  356. struct dp_pdev *pdev;
  357. int i;
  358. if (!soc)
  359. return;
  360. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  361. pdev = soc->pdev_list[i];
  362. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  363. }
  364. }
  365. #ifdef DP_PEER_STATE_DEBUG
  366. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  367. do { \
  368. if (!(_condition)) { \
  369. dp_alert("Invalid state shift from %u to %u peer " \
  370. QDF_MAC_ADDR_FMT, \
  371. (_peer)->peer_state, (_new_state), \
  372. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  373. QDF_ASSERT(0); \
  374. } \
  375. } while (0)
  376. #else
  377. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  378. do { \
  379. if (!(_condition)) { \
  380. dp_alert("Invalid state shift from %u to %u peer " \
  381. QDF_MAC_ADDR_FMT, \
  382. (_peer)->peer_state, (_new_state), \
  383. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  384. } \
  385. } while (0)
  386. #endif
  387. /**
  388. * dp_peer_state_cmp() - compare dp peer state
  389. *
  390. * @peer : DP peer
  391. * @state : state
  392. *
  393. * Return: true if state matches with peer state
  394. * false if it does not match
  395. */
  396. static inline bool
  397. dp_peer_state_cmp(struct dp_peer *peer,
  398. enum dp_peer_state state)
  399. {
  400. bool is_status_equal = false;
  401. qdf_spin_lock_bh(&peer->peer_state_lock);
  402. is_status_equal = (peer->peer_state == state);
  403. qdf_spin_unlock_bh(&peer->peer_state_lock);
  404. return is_status_equal;
  405. }
  406. /**
  407. * dp_peer_update_state() - update dp peer state
  408. *
  409. * @soc : core DP soc context
  410. * @peer : DP peer
  411. * @state : new state
  412. *
  413. * Return: None
  414. */
  415. static inline void
  416. dp_peer_update_state(struct dp_soc *soc,
  417. struct dp_peer *peer,
  418. enum dp_peer_state state)
  419. {
  420. uint8_t peer_state;
  421. qdf_spin_lock_bh(&peer->peer_state_lock);
  422. peer_state = peer->peer_state;
  423. switch (state) {
  424. case DP_PEER_STATE_INIT:
  425. DP_PEER_STATE_ASSERT
  426. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  427. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  428. break;
  429. case DP_PEER_STATE_ACTIVE:
  430. DP_PEER_STATE_ASSERT(peer, state,
  431. (peer_state == DP_PEER_STATE_INIT));
  432. break;
  433. case DP_PEER_STATE_LOGICAL_DELETE:
  434. DP_PEER_STATE_ASSERT(peer, state,
  435. (peer_state == DP_PEER_STATE_ACTIVE) ||
  436. (peer_state == DP_PEER_STATE_INIT));
  437. break;
  438. case DP_PEER_STATE_INACTIVE:
  439. DP_PEER_STATE_ASSERT
  440. (peer, state,
  441. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  442. break;
  443. case DP_PEER_STATE_FREED:
  444. if (peer->sta_self_peer)
  445. DP_PEER_STATE_ASSERT
  446. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  447. else
  448. DP_PEER_STATE_ASSERT
  449. (peer, state,
  450. (peer_state == DP_PEER_STATE_INACTIVE) ||
  451. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  452. break;
  453. default:
  454. qdf_spin_unlock_bh(&peer->peer_state_lock);
  455. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  456. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  457. return;
  458. }
  459. peer->peer_state = state;
  460. qdf_spin_unlock_bh(&peer->peer_state_lock);
  461. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  462. peer_state, state,
  463. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  464. }
  465. void dp_print_ast_stats(struct dp_soc *soc);
  466. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  467. uint16_t hw_peer_id, uint8_t vdev_id,
  468. uint8_t *peer_mac_addr, uint16_t ast_hash,
  469. uint8_t is_wds);
  470. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  471. uint8_t vdev_id, uint8_t *peer_mac_addr,
  472. uint8_t is_wds, uint32_t free_wds_count);
  473. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  474. enum cdp_sec_type sec_type, int is_unicast,
  475. u_int32_t *michael_key, u_int32_t *rx_pn);
  476. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  477. uint8_t tid, uint16_t win_sz);
  478. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  479. uint16_t peer_id, uint8_t *peer_mac);
  480. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  481. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  482. uint32_t flags);
  483. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  484. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  485. struct dp_ast_entry *ast_entry);
  486. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  487. struct dp_ast_entry *ast_entry, uint32_t flags);
  488. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  489. uint8_t *ast_mac_addr,
  490. uint8_t pdev_id);
  491. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  492. uint8_t *ast_mac_addr,
  493. uint8_t vdev_id);
  494. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  495. uint8_t *ast_mac_addr);
  496. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  497. struct dp_ast_entry *ast_entry);
  498. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  499. struct dp_ast_entry *ast_entry);
  500. void dp_peer_ast_set_type(struct dp_soc *soc,
  501. struct dp_ast_entry *ast_entry,
  502. enum cdp_txrx_ast_entry_type type);
  503. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  504. struct dp_ast_entry *ast_entry,
  505. struct dp_peer *peer);
  506. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  507. struct cdp_soc *dp_soc,
  508. void *cookie,
  509. enum cdp_ast_free_status status);
  510. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  511. struct dp_ast_entry *ase);
  512. void dp_peer_free_ast_entry(struct dp_soc *soc,
  513. struct dp_ast_entry *ast_entry);
  514. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  515. struct dp_ast_entry *ast_entry,
  516. struct dp_peer *peer);
  517. /**
  518. * dp_peer_mec_detach_entry() - Detach the MEC entry
  519. * @soc: SoC handle
  520. * @mecentry: MEC entry of the node
  521. * @ptr: pointer to free list
  522. *
  523. * The MEC entry is detached from MEC table and added to free_list
  524. * to free the object outside lock
  525. *
  526. * Return: None
  527. */
  528. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  529. void *ptr);
  530. /**
  531. * dp_peer_mec_free_list() - free the MEC entry from free_list
  532. * @soc: SoC handle
  533. * @ptr: pointer to free list
  534. *
  535. * Return: None
  536. */
  537. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  538. /**
  539. * dp_peer_mec_add_entry()
  540. * @soc: SoC handle
  541. * @vdev: vdev to which mec node belongs
  542. * @mac_addr: MAC address of mec node
  543. *
  544. * This function allocates and adds MEC entry to MEC table.
  545. * It assumes caller has taken the mec lock to protect the access to these
  546. * tables
  547. *
  548. * Return: QDF_STATUS
  549. */
  550. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  551. struct dp_vdev *vdev,
  552. uint8_t *mac_addr);
  553. /**
  554. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  555. * within pdev
  556. * @soc: SoC handle
  557. *
  558. * It assumes caller has taken the mec_lock to protect the access to
  559. * MEC hash table
  560. *
  561. * Return: MEC entry
  562. */
  563. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  564. uint8_t pdev_id,
  565. uint8_t *mec_mac_addr);
  566. #define DP_AST_ASSERT(_condition) \
  567. do { \
  568. if (!(_condition)) { \
  569. dp_print_ast_stats(soc);\
  570. QDF_BUG(_condition); \
  571. } \
  572. } while (0)
  573. /**
  574. * dp_peer_update_inactive_time - Update inactive time for peer
  575. * @pdev: pdev object
  576. * @tag_type: htt_tlv_tag type
  577. * #tag_buf: buf message
  578. */
  579. void
  580. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  581. uint32_t *tag_buf);
  582. #ifndef QCA_MULTIPASS_SUPPORT
  583. /**
  584. * dp_peer_set_vlan_id: set vlan_id for this peer
  585. * @cdp_soc: soc handle
  586. * @vdev_id: id of vdev object
  587. * @peer_mac: mac address
  588. * @vlan_id: vlan id for peer
  589. *
  590. * return: void
  591. */
  592. static inline
  593. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  594. uint8_t vdev_id, uint8_t *peer_mac,
  595. uint16_t vlan_id)
  596. {
  597. }
  598. /**
  599. * dp_set_vlan_groupkey: set vlan map for vdev
  600. * @soc: pointer to soc
  601. * @vdev_id: id of vdev handle
  602. * @vlan_id: vlan_id
  603. * @group_key: group key for vlan
  604. *
  605. * return: set success/failure
  606. */
  607. static inline
  608. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  609. uint16_t vlan_id, uint16_t group_key)
  610. {
  611. return QDF_STATUS_SUCCESS;
  612. }
  613. /**
  614. * dp_peer_multipass_list_init: initialize multipass peer list
  615. * @vdev: pointer to vdev
  616. *
  617. * return: void
  618. */
  619. static inline
  620. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  621. {
  622. }
  623. /**
  624. * dp_peer_multipass_list_remove: remove peer from special peer list
  625. * @peer: peer handle
  626. *
  627. * return: void
  628. */
  629. static inline
  630. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  631. {
  632. }
  633. #else
  634. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  635. uint8_t vdev_id, uint8_t *peer_mac,
  636. uint16_t vlan_id);
  637. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  638. uint16_t vlan_id, uint16_t group_key);
  639. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  640. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  641. #endif
  642. #ifndef QCA_PEER_MULTIQ_SUPPORT
  643. /**
  644. * dp_peer_reset_flowq_map() - reset peer flowq map table
  645. * @peer - dp peer handle
  646. *
  647. * Return: none
  648. */
  649. static inline
  650. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  651. {
  652. }
  653. /**
  654. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  655. * @soc - genereic soc handle
  656. * @is_wds - flag to indicate if peer is wds
  657. * @peer_id - peer_id from htt peer map message
  658. * @peer_mac_addr - mac address of the peer
  659. * @ast_info - ast flow override information from peer map
  660. *
  661. * Return: none
  662. */
  663. static inline
  664. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  665. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  666. struct dp_ast_flow_override_info *ast_info)
  667. {
  668. }
  669. #else
  670. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  671. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  672. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  673. struct dp_ast_flow_override_info *ast_info);
  674. #endif
  675. /**
  676. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  677. * @soc: DP SOC handle
  678. * @pdev_id: id of DP pdev handle
  679. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  680. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  681. * Tx packet capture in monitor mode
  682. * Tx packet capture in monitor mode
  683. * @peer_mac: MAC address for which the above need to be enabled/disabled
  684. *
  685. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  686. */
  687. QDF_STATUS
  688. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  689. uint8_t pdev_id,
  690. bool is_rx_pkt_cap_enable,
  691. uint8_t is_tx_pkt_cap_enable,
  692. uint8_t *peer_mac);
  693. /*
  694. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  695. * after deleting the entries (ie., setting valid=0)
  696. *
  697. * @soc: DP SOC handle
  698. * @cb_ctxt: Callback context
  699. * @reo_status: REO command status
  700. */
  701. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  702. void *cb_ctxt,
  703. union hal_reo_status *reo_status);
  704. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  705. /**
  706. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  707. * @peer: Datapath peer
  708. *
  709. */
  710. static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
  711. {
  712. }
  713. /**
  714. * dp_peer_tid_peer_id_update() – update peer_id to tid structure
  715. * @peer: Datapath peer
  716. * @peer_id: peer_id
  717. *
  718. */
  719. static inline
  720. void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
  721. {
  722. }
  723. /**
  724. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  725. * @peer: Datapath peer
  726. *
  727. */
  728. static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  729. {
  730. }
  731. /**
  732. * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
  733. * @vdev: Datapath vdev
  734. * @peer: Datapath peer
  735. *
  736. */
  737. static inline void
  738. dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  739. {
  740. }
  741. #endif
  742. #ifdef QCA_PEER_EXT_STATS
  743. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  744. struct dp_peer *peer);
  745. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  746. struct dp_peer *peer);
  747. #else
  748. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  749. struct dp_peer *peer)
  750. {
  751. return QDF_STATUS_SUCCESS;
  752. }
  753. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  754. struct dp_peer *peer)
  755. {
  756. }
  757. #endif
  758. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  759. struct dp_vdev *vdev,
  760. enum dp_mod_id mod_id);
  761. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  762. struct dp_vdev *vdev,
  763. enum dp_mod_id mod_id);
  764. #ifdef FEATURE_AST
  765. /*
  766. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  767. * @soc - datapath soc handle
  768. * @peer - datapath peer handle
  769. *
  770. * Delete the AST entries belonging to a peer
  771. */
  772. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  773. struct dp_peer *peer)
  774. {
  775. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  776. /*
  777. * Delete peer self ast entry. This is done to handle scenarios
  778. * where peer is freed before peer map is received(for ex in case
  779. * of auth disallow due to ACL) in such cases self ast is not added
  780. * to peer->ast_list.
  781. */
  782. if (peer->self_ast_entry) {
  783. dp_peer_del_ast(soc, peer->self_ast_entry);
  784. peer->self_ast_entry = NULL;
  785. }
  786. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  787. dp_peer_del_ast(soc, ast_entry);
  788. }
  789. #else
  790. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  791. struct dp_peer *peer)
  792. {
  793. }
  794. #endif
  795. #ifdef FEATURE_MEC
  796. /**
  797. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  798. * @soc: SoC handle
  799. *
  800. * Return: none
  801. */
  802. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  803. /**
  804. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  805. * @soc: SoC handle
  806. *
  807. * Return: none
  808. */
  809. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  810. /**
  811. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  812. * @soc: Datapath SOC
  813. *
  814. * Return: None
  815. */
  816. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  817. #else
  818. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  819. {
  820. }
  821. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  822. {
  823. }
  824. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  825. {
  826. }
  827. #endif
  828. #endif /* _DP_PEER_H_ */