dp_peer.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #define DP_INVALID_PEER_ID 0xffff
  24. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  25. /**
  26. * dp_peer_get_ref() - Returns peer object given the peer id
  27. *
  28. * @soc : core DP soc context
  29. * @peer : DP peer
  30. * @mod_id : id of module requesting the reference
  31. *
  32. * Return: QDF_STATUS_SUCCESS if reference held successfully
  33. * else QDF_STATUS_E_INVAL
  34. */
  35. static inline
  36. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  37. struct dp_peer *peer,
  38. enum dp_peer_mod_id mod_id)
  39. {
  40. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  41. return QDF_STATUS_E_INVAL;
  42. if (mod_id > DP_MOD_ID_RX)
  43. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  44. return QDF_STATUS_SUCCESS;
  45. }
  46. /**
  47. * __dp_peer_find_by_id() - Returns peer object given the peer id
  48. *
  49. * @soc : core DP soc context
  50. * @peer_id : peer id from peer object can be retrieved
  51. *
  52. * Return: struct dp_peer*: Pointer to DP peer object
  53. */
  54. static inline struct dp_peer *
  55. __dp_peer_find_by_id(struct dp_soc *soc,
  56. uint16_t peer_id)
  57. {
  58. struct dp_peer *peer;
  59. /* TODO: Hold lock */
  60. peer = (peer_id >= soc->max_peers) ? NULL :
  61. soc->peer_id_to_obj_map[peer_id];
  62. return peer;
  63. }
  64. /**
  65. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  66. * if delete_in_progress in not set for peer
  67. *
  68. * @soc : core DP soc context
  69. * @peer_id : peer id from peer object can be retrieved
  70. * @mod_id : ID ot module requesting reference
  71. *
  72. * Return: struct dp_peer*: Pointer to DP peer object
  73. */
  74. static inline
  75. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  76. uint16_t peer_id,
  77. enum dp_peer_mod_id mod_id)
  78. {
  79. struct dp_peer *peer;
  80. qdf_spin_lock_bh(&soc->peer_map_lock);
  81. peer = __dp_peer_find_by_id(soc, peer_id);
  82. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  83. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  84. qdf_spin_unlock_bh(&soc->peer_map_lock);
  85. return NULL;
  86. }
  87. qdf_spin_unlock_bh(&soc->peer_map_lock);
  88. return peer;
  89. }
  90. #ifdef PEER_CACHE_RX_PKTS
  91. /**
  92. * dp_rx_flush_rx_cached() - flush cached rx frames
  93. * @peer: peer
  94. * @drop: set flag to drop frames
  95. *
  96. * Return: None
  97. */
  98. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  99. #else
  100. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  101. {
  102. }
  103. #endif
  104. static inline void
  105. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  106. {
  107. qdf_spin_lock_bh(&peer->peer_info_lock);
  108. peer->state = OL_TXRX_PEER_STATE_DISC;
  109. qdf_spin_unlock_bh(&peer->peer_info_lock);
  110. dp_rx_flush_rx_cached(peer, true);
  111. }
  112. /**
  113. * dp_peer_update_state() - update dp peer state
  114. *
  115. * @soc : core DP soc context
  116. * @peer : DP peer
  117. * @state : new state
  118. *
  119. * Return: None
  120. */
  121. static inline void
  122. dp_peer_update_state(struct dp_soc *soc,
  123. struct dp_peer *peer,
  124. enum dp_peer_state state)
  125. {
  126. uint8_t peer_state = peer->peer_state;
  127. switch (state) {
  128. case DP_PEER_STATE_INIT:
  129. QDF_ASSERT
  130. ((peer_state != DP_PEER_STATE_ACTIVE) ||
  131. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  132. break;
  133. case DP_PEER_STATE_ACTIVE:
  134. QDF_ASSERT(peer_state == DP_PEER_STATE_INIT);
  135. break;
  136. case DP_PEER_STATE_LOGICAL_DELETE:
  137. QDF_ASSERT((peer_state == DP_PEER_STATE_ACTIVE) ||
  138. (peer_state == DP_PEER_STATE_INIT));
  139. break;
  140. case DP_PEER_STATE_INACTIVE:
  141. QDF_ASSERT(peer_state == DP_PEER_STATE_LOGICAL_DELETE);
  142. break;
  143. case DP_PEER_STATE_FREED:
  144. if (peer->sta_self_peer)
  145. QDF_ASSERT(peer_state ==
  146. DP_PEER_STATE_INIT);
  147. else
  148. QDF_ASSERT((peer_state ==
  149. DP_PEER_STATE_INACTIVE) ||
  150. (peer_state ==
  151. DP_PEER_STATE_LOGICAL_DELETE));
  152. break;
  153. default:
  154. QDF_ASSERT(0);
  155. break;
  156. }
  157. qdf_info("Updating peer state from %u to %u mac %pM\n",
  158. peer_state, state, peer->mac_addr.raw);
  159. peer->peer_state = state;
  160. }
  161. void dp_print_ast_stats(struct dp_soc *soc);
  162. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  163. uint16_t hw_peer_id, uint8_t vdev_id,
  164. uint8_t *peer_mac_addr, uint16_t ast_hash,
  165. uint8_t is_wds);
  166. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  167. uint8_t vdev_id, uint8_t *peer_mac_addr,
  168. uint8_t is_wds, uint32_t free_wds_count);
  169. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  170. enum cdp_sec_type sec_type, int is_unicast,
  171. u_int32_t *michael_key, u_int32_t *rx_pn);
  172. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  173. uint8_t tid, uint16_t win_sz);
  174. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  175. uint16_t peer_id, uint8_t *peer_mac);
  176. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  177. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  178. uint32_t flags);
  179. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  180. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  181. struct dp_ast_entry *ast_entry);
  182. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  183. struct dp_ast_entry *ast_entry, uint32_t flags);
  184. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  185. uint8_t *ast_mac_addr,
  186. uint8_t pdev_id);
  187. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  188. uint8_t *ast_mac_addr,
  189. uint8_t vdev_id);
  190. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  191. uint8_t *ast_mac_addr);
  192. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  193. struct dp_ast_entry *ast_entry);
  194. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  195. struct dp_ast_entry *ast_entry);
  196. void dp_peer_ast_set_type(struct dp_soc *soc,
  197. struct dp_ast_entry *ast_entry,
  198. enum cdp_txrx_ast_entry_type type);
  199. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  200. struct dp_ast_entry *ast_entry,
  201. struct dp_peer *peer);
  202. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  203. struct cdp_soc *dp_soc,
  204. void *cookie,
  205. enum cdp_ast_free_status status);
  206. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  207. struct dp_ast_entry *ase);
  208. void dp_peer_free_ast_entry(struct dp_soc *soc,
  209. struct dp_ast_entry *ast_entry);
  210. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  211. struct dp_ast_entry *ast_entry,
  212. struct dp_peer *peer);
  213. #define DP_AST_ASSERT(_condition) \
  214. do { \
  215. if (!(_condition)) { \
  216. dp_print_ast_stats(soc);\
  217. QDF_BUG(_condition); \
  218. } \
  219. } while (0)
  220. /**
  221. * dp_peer_update_inactive_time - Update inactive time for peer
  222. * @pdev: pdev object
  223. * @tag_type: htt_tlv_tag type
  224. * #tag_buf: buf message
  225. */
  226. void
  227. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  228. uint32_t *tag_buf);
  229. #ifndef QCA_MULTIPASS_SUPPORT
  230. /**
  231. * dp_peer_set_vlan_id: set vlan_id for this peer
  232. * @cdp_soc: soc handle
  233. * @vdev_id: id of vdev object
  234. * @peer_mac: mac address
  235. * @vlan_id: vlan id for peer
  236. *
  237. * return: void
  238. */
  239. static inline
  240. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  241. uint8_t vdev_id, uint8_t *peer_mac,
  242. uint16_t vlan_id)
  243. {
  244. }
  245. /**
  246. * dp_set_vlan_groupkey: set vlan map for vdev
  247. * @soc: pointer to soc
  248. * @vdev_id: id of vdev handle
  249. * @vlan_id: vlan_id
  250. * @group_key: group key for vlan
  251. *
  252. * return: set success/failure
  253. */
  254. static inline
  255. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  256. uint16_t vlan_id, uint16_t group_key)
  257. {
  258. return QDF_STATUS_SUCCESS;
  259. }
  260. /**
  261. * dp_peer_multipass_list_init: initialize multipass peer list
  262. * @vdev: pointer to vdev
  263. *
  264. * return: void
  265. */
  266. static inline
  267. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  268. {
  269. }
  270. /**
  271. * dp_peer_multipass_list_remove: remove peer from special peer list
  272. * @peer: peer handle
  273. *
  274. * return: void
  275. */
  276. static inline
  277. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  278. {
  279. }
  280. #else
  281. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  282. uint8_t vdev_id, uint8_t *peer_mac,
  283. uint16_t vlan_id);
  284. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  285. uint16_t vlan_id, uint16_t group_key);
  286. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  287. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  288. #endif
  289. #ifndef QCA_PEER_MULTIQ_SUPPORT
  290. /**
  291. * dp_peer_reset_flowq_map() - reset peer flowq map table
  292. * @peer - dp peer handle
  293. *
  294. * Return: none
  295. */
  296. static inline
  297. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  298. {
  299. }
  300. /**
  301. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  302. * @soc - genereic soc handle
  303. * @is_wds - flag to indicate if peer is wds
  304. * @peer_id - peer_id from htt peer map message
  305. * @peer_mac_addr - mac address of the peer
  306. * @ast_info - ast flow override information from peer map
  307. *
  308. * Return: none
  309. */
  310. static inline
  311. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  312. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  313. struct dp_ast_flow_override_info *ast_info)
  314. {
  315. }
  316. #else
  317. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  318. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  319. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  320. struct dp_ast_flow_override_info *ast_info);
  321. #endif
  322. /**
  323. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  324. * @soc: DP SOC handle
  325. * @pdev_id: id of DP pdev handle
  326. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  327. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  328. * Tx packet capture in monitor mode
  329. * Tx packet capture in monitor mode
  330. * @peer_mac: MAC address for which the above need to be enabled/disabled
  331. *
  332. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  333. */
  334. QDF_STATUS
  335. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  336. uint8_t pdev_id,
  337. bool is_rx_pkt_cap_enable,
  338. uint8_t is_tx_pkt_cap_enable,
  339. uint8_t *peer_mac);
  340. /*
  341. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  342. * after deleting the entries (ie., setting valid=0)
  343. *
  344. * @soc: DP SOC handle
  345. * @cb_ctxt: Callback context
  346. * @reo_status: REO command status
  347. */
  348. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  349. void *cb_ctxt,
  350. union hal_reo_status *reo_status);
  351. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  352. /**
  353. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  354. * @peer: Datapath peer
  355. *
  356. */
  357. static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
  358. {
  359. }
  360. /**
  361. * dp_peer_tid_peer_id_update() – update peer_id to tid structure
  362. * @peer: Datapath peer
  363. * @peer_id: peer_id
  364. *
  365. */
  366. static inline
  367. void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
  368. {
  369. }
  370. /**
  371. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  372. * @peer: Datapath peer
  373. *
  374. */
  375. static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  376. {
  377. }
  378. /**
  379. * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
  380. * @vdev: Datapath vdev
  381. * @peer: Datapath peer
  382. *
  383. */
  384. static inline void
  385. dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  386. {
  387. }
  388. #endif
  389. #ifdef QCA_PEER_EXT_STATS
  390. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  391. struct dp_peer *peer);
  392. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  393. struct dp_peer *peer);
  394. #else
  395. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  396. struct dp_peer *peer)
  397. {
  398. return QDF_STATUS_SUCCESS;
  399. }
  400. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  401. struct dp_peer *peer)
  402. {
  403. }
  404. #endif
  405. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  406. struct dp_vdev *vdev,
  407. enum dp_peer_mod_id mod_id);
  408. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  409. struct dp_vdev *vdev,
  410. enum dp_peer_mod_id mod_id);
  411. #endif /* _DP_PEER_H_ */