dp_peer.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  24. #include "hal_reo.h"
  25. #endif
  26. #define DP_INVALID_PEER_ID 0xffff
  27. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  28. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  29. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  30. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  31. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  32. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  33. #define dp_peer_info(params...) \
  34. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  35. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  36. #ifdef REO_QDESC_HISTORY
  37. enum reo_qdesc_event_type {
  38. REO_QDESC_UPDATE_CB = 0,
  39. REO_QDESC_FREE,
  40. };
  41. struct reo_qdesc_event {
  42. qdf_dma_addr_t qdesc_addr;
  43. uint64_t ts;
  44. enum reo_qdesc_event_type type;
  45. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  46. };
  47. #endif
  48. struct ast_del_ctxt {
  49. bool age;
  50. int del_count;
  51. };
  52. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  53. void *arg);
  54. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  55. /**
  56. * dp_peer_get_ref() - Returns peer object given the peer id
  57. *
  58. * @soc : core DP soc context
  59. * @peer : DP peer
  60. * @mod_id : id of module requesting the reference
  61. *
  62. * Return: QDF_STATUS_SUCCESS if reference held successfully
  63. * else QDF_STATUS_E_INVAL
  64. */
  65. static inline
  66. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  67. struct dp_peer *peer,
  68. enum dp_mod_id mod_id)
  69. {
  70. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  71. return QDF_STATUS_E_INVAL;
  72. if (mod_id > DP_MOD_ID_RX)
  73. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  74. return QDF_STATUS_SUCCESS;
  75. }
  76. /**
  77. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  78. *
  79. * @soc : core DP soc context
  80. * @peer_id : peer id from peer object can be retrieved
  81. * @mod_id : module id
  82. *
  83. * Return: struct dp_peer*: Pointer to DP peer object
  84. */
  85. static inline struct dp_peer *
  86. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  87. uint16_t peer_id,
  88. enum dp_mod_id mod_id)
  89. {
  90. struct dp_peer *peer;
  91. qdf_spin_lock_bh(&soc->peer_map_lock);
  92. peer = (peer_id >= soc->max_peers) ? NULL :
  93. soc->peer_id_to_obj_map[peer_id];
  94. if (!peer ||
  95. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  96. qdf_spin_unlock_bh(&soc->peer_map_lock);
  97. return NULL;
  98. }
  99. qdf_spin_unlock_bh(&soc->peer_map_lock);
  100. return peer;
  101. }
  102. /**
  103. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  104. * if peer state is active
  105. *
  106. * @soc : core DP soc context
  107. * @peer_id : peer id from peer object can be retrieved
  108. * @mod_id : ID ot module requesting reference
  109. *
  110. * Return: struct dp_peer*: Pointer to DP peer object
  111. */
  112. static inline
  113. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  114. uint16_t peer_id,
  115. enum dp_mod_id mod_id)
  116. {
  117. struct dp_peer *peer;
  118. qdf_spin_lock_bh(&soc->peer_map_lock);
  119. peer = (peer_id >= soc->max_peers) ? NULL :
  120. soc->peer_id_to_obj_map[peer_id];
  121. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  122. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  123. qdf_spin_unlock_bh(&soc->peer_map_lock);
  124. return NULL;
  125. }
  126. qdf_spin_unlock_bh(&soc->peer_map_lock);
  127. return peer;
  128. }
  129. #ifdef PEER_CACHE_RX_PKTS
  130. /**
  131. * dp_rx_flush_rx_cached() - flush cached rx frames
  132. * @peer: peer
  133. * @drop: set flag to drop frames
  134. *
  135. * Return: None
  136. */
  137. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  138. #else
  139. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  140. {
  141. }
  142. #endif
  143. static inline void
  144. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  145. {
  146. qdf_spin_lock_bh(&peer->peer_info_lock);
  147. peer->state = OL_TXRX_PEER_STATE_DISC;
  148. qdf_spin_unlock_bh(&peer->peer_info_lock);
  149. dp_rx_flush_rx_cached(peer, true);
  150. }
  151. /**
  152. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  153. *
  154. * @vdev : DP vdev context
  155. * @func : function to be called for each peer
  156. * @arg : argument need to be passed to func
  157. * @mod_id : module_id
  158. *
  159. * Return: void
  160. */
  161. static inline void
  162. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  163. enum dp_mod_id mod_id)
  164. {
  165. struct dp_peer *peer;
  166. struct dp_peer *tmp_peer;
  167. struct dp_soc *soc = NULL;
  168. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  169. return;
  170. soc = vdev->pdev->soc;
  171. qdf_spin_lock_bh(&vdev->peer_list_lock);
  172. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  173. peer_list_elem,
  174. tmp_peer) {
  175. if (dp_peer_get_ref(soc, peer, mod_id) ==
  176. QDF_STATUS_SUCCESS) {
  177. (*func)(soc, peer, arg);
  178. dp_peer_unref_delete(peer, mod_id);
  179. }
  180. }
  181. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  182. }
  183. /**
  184. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  185. *
  186. * @pdev : DP pdev context
  187. * @func : function to be called for each peer
  188. * @arg : argument need to be passed to func
  189. * @mod_id : module_id
  190. *
  191. * Return: void
  192. */
  193. static inline void
  194. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  195. enum dp_mod_id mod_id)
  196. {
  197. struct dp_vdev *vdev;
  198. if (!pdev)
  199. return;
  200. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  201. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  202. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  203. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  204. }
  205. /**
  206. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  207. *
  208. * @soc : DP soc context
  209. * @func : function to be called for each peer
  210. * @arg : argument need to be passed to func
  211. * @mod_id : module_id
  212. *
  213. * Return: void
  214. */
  215. static inline void
  216. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  217. enum dp_mod_id mod_id)
  218. {
  219. struct dp_pdev *pdev;
  220. int i;
  221. if (!soc)
  222. return;
  223. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  224. pdev = soc->pdev_list[i];
  225. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  226. }
  227. }
  228. /**
  229. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  230. *
  231. * This API will cache the peers in local allocated memory and calls
  232. * iterate function outside the lock.
  233. *
  234. * As this API is allocating new memory it is suggested to use this
  235. * only when lock cannot be held
  236. *
  237. * @vdev : DP vdev context
  238. * @func : function to be called for each peer
  239. * @arg : argument need to be passed to func
  240. * @mod_id : module_id
  241. *
  242. * Return: void
  243. */
  244. static inline void
  245. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  246. dp_peer_iter_func *func,
  247. void *arg,
  248. enum dp_mod_id mod_id)
  249. {
  250. struct dp_peer *peer;
  251. struct dp_peer *tmp_peer;
  252. struct dp_soc *soc = NULL;
  253. struct dp_peer **peer_array = NULL;
  254. int i = 0;
  255. uint32_t num_peers = 0;
  256. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  257. return;
  258. num_peers = vdev->num_peers;
  259. soc = vdev->pdev->soc;
  260. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  261. if (!peer_array)
  262. return;
  263. qdf_spin_lock_bh(&vdev->peer_list_lock);
  264. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  265. peer_list_elem,
  266. tmp_peer) {
  267. if (i >= num_peers)
  268. break;
  269. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  270. peer_array[i] = peer;
  271. i = (i + 1);
  272. }
  273. }
  274. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  275. for (i = 0; i < num_peers; i++) {
  276. peer = peer_array[i];
  277. if (!peer)
  278. continue;
  279. (*func)(soc, peer, arg);
  280. dp_peer_unref_delete(peer, mod_id);
  281. }
  282. qdf_mem_free(peer_array);
  283. }
  284. /**
  285. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  286. *
  287. * This API will cache the peers in local allocated memory and calls
  288. * iterate function outside the lock.
  289. *
  290. * As this API is allocating new memory it is suggested to use this
  291. * only when lock cannot be held
  292. *
  293. * @pdev : DP pdev context
  294. * @func : function to be called for each peer
  295. * @arg : argument need to be passed to func
  296. * @mod_id : module_id
  297. *
  298. * Return: void
  299. */
  300. static inline void
  301. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  302. dp_peer_iter_func *func,
  303. void *arg,
  304. enum dp_mod_id mod_id)
  305. {
  306. struct dp_peer *peer;
  307. struct dp_peer *tmp_peer;
  308. struct dp_soc *soc = NULL;
  309. struct dp_vdev *vdev = NULL;
  310. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  311. int i = 0;
  312. int j = 0;
  313. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  314. if (!pdev || !pdev->soc)
  315. return;
  316. soc = pdev->soc;
  317. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  318. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  319. num_peers[i] = vdev->num_peers;
  320. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  321. sizeof(struct dp_peer *));
  322. if (!peer_array[i])
  323. break;
  324. qdf_spin_lock_bh(&vdev->peer_list_lock);
  325. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  326. peer_list_elem,
  327. tmp_peer) {
  328. if (j >= num_peers[i])
  329. break;
  330. if (dp_peer_get_ref(soc, peer, mod_id) ==
  331. QDF_STATUS_SUCCESS) {
  332. peer_array[i][j] = peer;
  333. j = (j + 1);
  334. }
  335. }
  336. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  337. i = (i + 1);
  338. }
  339. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  340. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  341. if (!peer_array[i])
  342. break;
  343. for (j = 0; j < num_peers[i]; j++) {
  344. peer = peer_array[i][j];
  345. if (!peer)
  346. continue;
  347. (*func)(soc, peer, arg);
  348. dp_peer_unref_delete(peer, mod_id);
  349. }
  350. qdf_mem_free(peer_array[i]);
  351. }
  352. }
  353. /**
  354. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  355. *
  356. * This API will cache the peers in local allocated memory and calls
  357. * iterate function outside the lock.
  358. *
  359. * As this API is allocating new memory it is suggested to use this
  360. * only when lock cannot be held
  361. *
  362. * @soc : DP soc context
  363. * @func : function to be called for each peer
  364. * @arg : argument need to be passed to func
  365. * @mod_id : module_id
  366. *
  367. * Return: void
  368. */
  369. static inline void
  370. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  371. dp_peer_iter_func *func,
  372. void *arg,
  373. enum dp_mod_id mod_id)
  374. {
  375. struct dp_pdev *pdev;
  376. int i;
  377. if (!soc)
  378. return;
  379. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  380. pdev = soc->pdev_list[i];
  381. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  382. }
  383. }
  384. #ifdef DP_PEER_STATE_DEBUG
  385. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  386. do { \
  387. if (!(_condition)) { \
  388. dp_alert("Invalid state shift from %u to %u peer " \
  389. QDF_MAC_ADDR_FMT, \
  390. (_peer)->peer_state, (_new_state), \
  391. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  392. QDF_ASSERT(0); \
  393. } \
  394. } while (0)
  395. #else
  396. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  397. do { \
  398. if (!(_condition)) { \
  399. dp_alert("Invalid state shift from %u to %u peer " \
  400. QDF_MAC_ADDR_FMT, \
  401. (_peer)->peer_state, (_new_state), \
  402. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  403. } \
  404. } while (0)
  405. #endif
  406. /**
  407. * dp_peer_state_cmp() - compare dp peer state
  408. *
  409. * @peer : DP peer
  410. * @state : state
  411. *
  412. * Return: true if state matches with peer state
  413. * false if it does not match
  414. */
  415. static inline bool
  416. dp_peer_state_cmp(struct dp_peer *peer,
  417. enum dp_peer_state state)
  418. {
  419. bool is_status_equal = false;
  420. qdf_spin_lock_bh(&peer->peer_state_lock);
  421. is_status_equal = (peer->peer_state == state);
  422. qdf_spin_unlock_bh(&peer->peer_state_lock);
  423. return is_status_equal;
  424. }
  425. /**
  426. * dp_peer_update_state() - update dp peer state
  427. *
  428. * @soc : core DP soc context
  429. * @peer : DP peer
  430. * @state : new state
  431. *
  432. * Return: None
  433. */
  434. static inline void
  435. dp_peer_update_state(struct dp_soc *soc,
  436. struct dp_peer *peer,
  437. enum dp_peer_state state)
  438. {
  439. uint8_t peer_state;
  440. qdf_spin_lock_bh(&peer->peer_state_lock);
  441. peer_state = peer->peer_state;
  442. switch (state) {
  443. case DP_PEER_STATE_INIT:
  444. DP_PEER_STATE_ASSERT
  445. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  446. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  447. break;
  448. case DP_PEER_STATE_ACTIVE:
  449. DP_PEER_STATE_ASSERT(peer, state,
  450. (peer_state == DP_PEER_STATE_INIT));
  451. break;
  452. case DP_PEER_STATE_LOGICAL_DELETE:
  453. DP_PEER_STATE_ASSERT(peer, state,
  454. (peer_state == DP_PEER_STATE_ACTIVE) ||
  455. (peer_state == DP_PEER_STATE_INIT));
  456. break;
  457. case DP_PEER_STATE_INACTIVE:
  458. DP_PEER_STATE_ASSERT
  459. (peer, state,
  460. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  461. break;
  462. case DP_PEER_STATE_FREED:
  463. if (peer->sta_self_peer)
  464. DP_PEER_STATE_ASSERT
  465. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  466. else
  467. DP_PEER_STATE_ASSERT
  468. (peer, state,
  469. (peer_state == DP_PEER_STATE_INACTIVE) ||
  470. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  471. break;
  472. default:
  473. qdf_spin_unlock_bh(&peer->peer_state_lock);
  474. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  475. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  476. return;
  477. }
  478. peer->peer_state = state;
  479. qdf_spin_unlock_bh(&peer->peer_state_lock);
  480. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  481. peer_state, state,
  482. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  483. }
  484. void dp_print_ast_stats(struct dp_soc *soc);
  485. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  486. uint16_t hw_peer_id, uint8_t vdev_id,
  487. uint8_t *peer_mac_addr, uint16_t ast_hash,
  488. uint8_t is_wds);
  489. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  490. uint8_t vdev_id, uint8_t *peer_mac_addr,
  491. uint8_t is_wds, uint32_t free_wds_count);
  492. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  493. enum cdp_sec_type sec_type, int is_unicast,
  494. u_int32_t *michael_key, u_int32_t *rx_pn);
  495. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  496. uint8_t tid, uint16_t win_sz);
  497. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  498. uint16_t peer_id, uint8_t *peer_mac);
  499. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  500. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  501. uint32_t flags);
  502. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  503. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  504. struct dp_ast_entry *ast_entry);
  505. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  506. struct dp_ast_entry *ast_entry, uint32_t flags);
  507. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  508. uint8_t *ast_mac_addr,
  509. uint8_t pdev_id);
  510. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  511. uint8_t *ast_mac_addr,
  512. uint8_t vdev_id);
  513. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  514. uint8_t *ast_mac_addr);
  515. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  516. struct dp_ast_entry *ast_entry);
  517. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  518. struct dp_ast_entry *ast_entry);
  519. void dp_peer_ast_set_type(struct dp_soc *soc,
  520. struct dp_ast_entry *ast_entry,
  521. enum cdp_txrx_ast_entry_type type);
  522. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  523. struct dp_ast_entry *ast_entry,
  524. struct dp_peer *peer);
  525. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  526. struct cdp_soc *dp_soc,
  527. void *cookie,
  528. enum cdp_ast_free_status status);
  529. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  530. struct dp_ast_entry *ase);
  531. void dp_peer_free_ast_entry(struct dp_soc *soc,
  532. struct dp_ast_entry *ast_entry);
  533. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  534. struct dp_ast_entry *ast_entry,
  535. struct dp_peer *peer);
  536. /**
  537. * dp_peer_mec_detach_entry() - Detach the MEC entry
  538. * @soc: SoC handle
  539. * @mecentry: MEC entry of the node
  540. * @ptr: pointer to free list
  541. *
  542. * The MEC entry is detached from MEC table and added to free_list
  543. * to free the object outside lock
  544. *
  545. * Return: None
  546. */
  547. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  548. void *ptr);
  549. /**
  550. * dp_peer_mec_free_list() - free the MEC entry from free_list
  551. * @soc: SoC handle
  552. * @ptr: pointer to free list
  553. *
  554. * Return: None
  555. */
  556. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  557. /**
  558. * dp_peer_mec_add_entry()
  559. * @soc: SoC handle
  560. * @vdev: vdev to which mec node belongs
  561. * @mac_addr: MAC address of mec node
  562. *
  563. * This function allocates and adds MEC entry to MEC table.
  564. * It assumes caller has taken the mec lock to protect the access to these
  565. * tables
  566. *
  567. * Return: QDF_STATUS
  568. */
  569. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  570. struct dp_vdev *vdev,
  571. uint8_t *mac_addr);
  572. /**
  573. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  574. * within pdev
  575. * @soc: SoC handle
  576. *
  577. * It assumes caller has taken the mec_lock to protect the access to
  578. * MEC hash table
  579. *
  580. * Return: MEC entry
  581. */
  582. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  583. uint8_t pdev_id,
  584. uint8_t *mec_mac_addr);
  585. #define DP_AST_ASSERT(_condition) \
  586. do { \
  587. if (!(_condition)) { \
  588. dp_print_ast_stats(soc);\
  589. QDF_BUG(_condition); \
  590. } \
  591. } while (0)
  592. /**
  593. * dp_peer_update_inactive_time - Update inactive time for peer
  594. * @pdev: pdev object
  595. * @tag_type: htt_tlv_tag type
  596. * #tag_buf: buf message
  597. */
  598. void
  599. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  600. uint32_t *tag_buf);
  601. #ifndef QCA_MULTIPASS_SUPPORT
  602. /**
  603. * dp_peer_set_vlan_id: set vlan_id for this peer
  604. * @cdp_soc: soc handle
  605. * @vdev_id: id of vdev object
  606. * @peer_mac: mac address
  607. * @vlan_id: vlan id for peer
  608. *
  609. * return: void
  610. */
  611. static inline
  612. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  613. uint8_t vdev_id, uint8_t *peer_mac,
  614. uint16_t vlan_id)
  615. {
  616. }
  617. /**
  618. * dp_set_vlan_groupkey: set vlan map for vdev
  619. * @soc: pointer to soc
  620. * @vdev_id: id of vdev handle
  621. * @vlan_id: vlan_id
  622. * @group_key: group key for vlan
  623. *
  624. * return: set success/failure
  625. */
  626. static inline
  627. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  628. uint16_t vlan_id, uint16_t group_key)
  629. {
  630. return QDF_STATUS_SUCCESS;
  631. }
  632. /**
  633. * dp_peer_multipass_list_init: initialize multipass peer list
  634. * @vdev: pointer to vdev
  635. *
  636. * return: void
  637. */
  638. static inline
  639. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  640. {
  641. }
  642. /**
  643. * dp_peer_multipass_list_remove: remove peer from special peer list
  644. * @peer: peer handle
  645. *
  646. * return: void
  647. */
  648. static inline
  649. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  650. {
  651. }
  652. #else
  653. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  654. uint8_t vdev_id, uint8_t *peer_mac,
  655. uint16_t vlan_id);
  656. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  657. uint16_t vlan_id, uint16_t group_key);
  658. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  659. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  660. #endif
  661. #ifndef QCA_PEER_MULTIQ_SUPPORT
  662. /**
  663. * dp_peer_reset_flowq_map() - reset peer flowq map table
  664. * @peer - dp peer handle
  665. *
  666. * Return: none
  667. */
  668. static inline
  669. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  670. {
  671. }
  672. /**
  673. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  674. * @soc - genereic soc handle
  675. * @is_wds - flag to indicate if peer is wds
  676. * @peer_id - peer_id from htt peer map message
  677. * @peer_mac_addr - mac address of the peer
  678. * @ast_info - ast flow override information from peer map
  679. *
  680. * Return: none
  681. */
  682. static inline
  683. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  684. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  685. struct dp_ast_flow_override_info *ast_info)
  686. {
  687. }
  688. #else
  689. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  690. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  691. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  692. struct dp_ast_flow_override_info *ast_info);
  693. #endif
  694. /*
  695. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  696. * after deleting the entries (ie., setting valid=0)
  697. *
  698. * @soc: DP SOC handle
  699. * @cb_ctxt: Callback context
  700. * @reo_status: REO command status
  701. */
  702. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  703. void *cb_ctxt,
  704. union hal_reo_status *reo_status);
  705. #ifdef QCA_PEER_EXT_STATS
  706. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  707. struct dp_peer *peer);
  708. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  709. struct dp_peer *peer);
  710. #else
  711. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  712. struct dp_peer *peer)
  713. {
  714. return QDF_STATUS_SUCCESS;
  715. }
  716. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  717. struct dp_peer *peer)
  718. {
  719. }
  720. #endif
  721. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  722. struct dp_vdev *vdev,
  723. enum dp_mod_id mod_id);
  724. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  725. struct dp_vdev *vdev,
  726. enum dp_mod_id mod_id);
  727. #ifdef FEATURE_AST
  728. /*
  729. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  730. * @soc - datapath soc handle
  731. * @peer - datapath peer handle
  732. *
  733. * Delete the AST entries belonging to a peer
  734. */
  735. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  736. struct dp_peer *peer)
  737. {
  738. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  739. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  740. /*
  741. * Delete peer self ast entry. This is done to handle scenarios
  742. * where peer is freed before peer map is received(for ex in case
  743. * of auth disallow due to ACL) in such cases self ast is not added
  744. * to peer->ast_list.
  745. */
  746. if (peer->self_ast_entry) {
  747. dp_peer_del_ast(soc, peer->self_ast_entry);
  748. peer->self_ast_entry = NULL;
  749. }
  750. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  751. dp_peer_del_ast(soc, ast_entry);
  752. }
  753. #else
  754. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  755. struct dp_peer *peer)
  756. {
  757. }
  758. #endif
  759. #ifdef FEATURE_MEC
  760. /**
  761. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  762. * @soc: SoC handle
  763. *
  764. * Return: none
  765. */
  766. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  767. /**
  768. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  769. * @soc: SoC handle
  770. *
  771. * Return: none
  772. */
  773. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  774. /**
  775. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  776. * @soc: Datapath SOC
  777. *
  778. * Return: None
  779. */
  780. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  781. #else
  782. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  783. {
  784. }
  785. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  786. {
  787. }
  788. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  789. {
  790. }
  791. #endif
  792. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  793. /**
  794. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  795. * @soc : dp_soc handle
  796. * @peer: peer
  797. *
  798. * This function is used to send cache flush cmd to reo and
  799. * to register the callback to handle the dumping of the reo
  800. * queue stas from DDR
  801. *
  802. * Return: none
  803. */
  804. void dp_send_cache_flush_for_rx_tid(
  805. struct dp_soc *soc, struct dp_peer *peer);
  806. /**
  807. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  808. * @soc : cdp_soc_t handle
  809. * @vdev_id: vdev id
  810. *
  811. * Handler to get rx tid info from DDR after h/w cache is
  812. * invalidated first using the cache flush cmd.
  813. *
  814. * Return: none
  815. */
  816. void dp_get_rx_reo_queue_info(
  817. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  818. /**
  819. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  820. * @soc : dp_soc handle
  821. * @cb_ctxt - callback context
  822. * @reo_status: vdev id
  823. *
  824. * This is the callback function registered after sending the reo cmd
  825. * to flush the h/w cache and invalidate it. In the callback the reo
  826. * queue desc info is dumped from DDR.
  827. *
  828. * Return: none
  829. */
  830. void dp_dump_rx_reo_queue_info(
  831. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  832. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  833. static inline void dp_get_rx_reo_queue_info(
  834. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  835. {
  836. }
  837. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  838. #endif /* _DP_PEER_H_ */