dp_peer.h 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  24. #include "hal_reo.h"
  25. #endif
  26. #define DP_INVALID_PEER_ID 0xffff
  27. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  28. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  29. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  30. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  31. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  32. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  33. #define dp_peer_info(params...) \
  34. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  35. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  36. #ifdef REO_QDESC_HISTORY
  37. enum reo_qdesc_event_type {
  38. REO_QDESC_UPDATE_CB = 0,
  39. REO_QDESC_FREE,
  40. };
  41. struct reo_qdesc_event {
  42. qdf_dma_addr_t qdesc_addr;
  43. uint64_t ts;
  44. enum reo_qdesc_event_type type;
  45. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  46. };
  47. #endif
  48. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  49. void *arg);
  50. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  51. /**
  52. * dp_peer_get_ref() - Returns peer object given the peer id
  53. *
  54. * @soc : core DP soc context
  55. * @peer : DP peer
  56. * @mod_id : id of module requesting the reference
  57. *
  58. * Return: QDF_STATUS_SUCCESS if reference held successfully
  59. * else QDF_STATUS_E_INVAL
  60. */
  61. static inline
  62. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  63. struct dp_peer *peer,
  64. enum dp_mod_id mod_id)
  65. {
  66. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  67. return QDF_STATUS_E_INVAL;
  68. if (mod_id > DP_MOD_ID_RX)
  69. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  70. return QDF_STATUS_SUCCESS;
  71. }
  72. /**
  73. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  74. *
  75. * @soc : core DP soc context
  76. * @peer_id : peer id from peer object can be retrieved
  77. * @mod_id : module id
  78. *
  79. * Return: struct dp_peer*: Pointer to DP peer object
  80. */
  81. static inline struct dp_peer *
  82. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  83. uint16_t peer_id,
  84. enum dp_mod_id mod_id)
  85. {
  86. struct dp_peer *peer;
  87. qdf_spin_lock_bh(&soc->peer_map_lock);
  88. peer = (peer_id >= soc->max_peers) ? NULL :
  89. soc->peer_id_to_obj_map[peer_id];
  90. if (!peer ||
  91. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  92. qdf_spin_unlock_bh(&soc->peer_map_lock);
  93. return NULL;
  94. }
  95. qdf_spin_unlock_bh(&soc->peer_map_lock);
  96. return peer;
  97. }
  98. /**
  99. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  100. * if peer state is active
  101. *
  102. * @soc : core DP soc context
  103. * @peer_id : peer id from peer object can be retrieved
  104. * @mod_id : ID ot module requesting reference
  105. *
  106. * Return: struct dp_peer*: Pointer to DP peer object
  107. */
  108. static inline
  109. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  110. uint16_t peer_id,
  111. enum dp_mod_id mod_id)
  112. {
  113. struct dp_peer *peer;
  114. qdf_spin_lock_bh(&soc->peer_map_lock);
  115. peer = (peer_id >= soc->max_peers) ? NULL :
  116. soc->peer_id_to_obj_map[peer_id];
  117. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  118. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  119. qdf_spin_unlock_bh(&soc->peer_map_lock);
  120. return NULL;
  121. }
  122. qdf_spin_unlock_bh(&soc->peer_map_lock);
  123. return peer;
  124. }
  125. #ifdef PEER_CACHE_RX_PKTS
  126. /**
  127. * dp_rx_flush_rx_cached() - flush cached rx frames
  128. * @peer: peer
  129. * @drop: set flag to drop frames
  130. *
  131. * Return: None
  132. */
  133. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  134. #else
  135. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  136. {
  137. }
  138. #endif
  139. static inline void
  140. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  141. {
  142. qdf_spin_lock_bh(&peer->peer_info_lock);
  143. peer->state = OL_TXRX_PEER_STATE_DISC;
  144. qdf_spin_unlock_bh(&peer->peer_info_lock);
  145. dp_rx_flush_rx_cached(peer, true);
  146. }
  147. /**
  148. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  149. *
  150. * @vdev : DP vdev context
  151. * @func : function to be called for each peer
  152. * @arg : argument need to be passed to func
  153. * @mod_id : module_id
  154. *
  155. * Return: void
  156. */
  157. static inline void
  158. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  159. enum dp_mod_id mod_id)
  160. {
  161. struct dp_peer *peer;
  162. struct dp_peer *tmp_peer;
  163. struct dp_soc *soc = NULL;
  164. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  165. return;
  166. soc = vdev->pdev->soc;
  167. qdf_spin_lock_bh(&vdev->peer_list_lock);
  168. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  169. peer_list_elem,
  170. tmp_peer) {
  171. if (dp_peer_get_ref(soc, peer, mod_id) ==
  172. QDF_STATUS_SUCCESS) {
  173. (*func)(soc, peer, arg);
  174. dp_peer_unref_delete(peer, mod_id);
  175. }
  176. }
  177. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  178. }
  179. /**
  180. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  181. *
  182. * @pdev : DP pdev context
  183. * @func : function to be called for each peer
  184. * @arg : argument need to be passed to func
  185. * @mod_id : module_id
  186. *
  187. * Return: void
  188. */
  189. static inline void
  190. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  191. enum dp_mod_id mod_id)
  192. {
  193. struct dp_vdev *vdev;
  194. if (!pdev)
  195. return;
  196. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  197. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  198. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  199. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  200. }
  201. /**
  202. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  203. *
  204. * @soc : DP soc context
  205. * @func : function to be called for each peer
  206. * @arg : argument need to be passed to func
  207. * @mod_id : module_id
  208. *
  209. * Return: void
  210. */
  211. static inline void
  212. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  213. enum dp_mod_id mod_id)
  214. {
  215. struct dp_pdev *pdev;
  216. int i;
  217. if (!soc)
  218. return;
  219. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  220. pdev = soc->pdev_list[i];
  221. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  222. }
  223. }
  224. /**
  225. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  226. *
  227. * This API will cache the peers in local allocated memory and calls
  228. * iterate function outside the lock.
  229. *
  230. * As this API is allocating new memory it is suggested to use this
  231. * only when lock cannot be held
  232. *
  233. * @vdev : DP vdev context
  234. * @func : function to be called for each peer
  235. * @arg : argument need to be passed to func
  236. * @mod_id : module_id
  237. *
  238. * Return: void
  239. */
  240. static inline void
  241. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  242. dp_peer_iter_func *func,
  243. void *arg,
  244. enum dp_mod_id mod_id)
  245. {
  246. struct dp_peer *peer;
  247. struct dp_peer *tmp_peer;
  248. struct dp_soc *soc = NULL;
  249. struct dp_peer **peer_array = NULL;
  250. int i = 0;
  251. uint32_t num_peers = 0;
  252. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  253. return;
  254. num_peers = vdev->num_peers;
  255. soc = vdev->pdev->soc;
  256. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  257. if (!peer_array)
  258. return;
  259. qdf_spin_lock_bh(&vdev->peer_list_lock);
  260. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  261. peer_list_elem,
  262. tmp_peer) {
  263. if (i >= num_peers)
  264. break;
  265. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  266. peer_array[i] = peer;
  267. i = (i + 1);
  268. }
  269. }
  270. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  271. for (i = 0; i < num_peers; i++) {
  272. peer = peer_array[i];
  273. if (!peer)
  274. continue;
  275. (*func)(soc, peer, arg);
  276. dp_peer_unref_delete(peer, mod_id);
  277. }
  278. qdf_mem_free(peer_array);
  279. }
  280. /**
  281. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  282. *
  283. * This API will cache the peers in local allocated memory and calls
  284. * iterate function outside the lock.
  285. *
  286. * As this API is allocating new memory it is suggested to use this
  287. * only when lock cannot be held
  288. *
  289. * @pdev : DP pdev context
  290. * @func : function to be called for each peer
  291. * @arg : argument need to be passed to func
  292. * @mod_id : module_id
  293. *
  294. * Return: void
  295. */
  296. static inline void
  297. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  298. dp_peer_iter_func *func,
  299. void *arg,
  300. enum dp_mod_id mod_id)
  301. {
  302. struct dp_peer *peer;
  303. struct dp_peer *tmp_peer;
  304. struct dp_soc *soc = NULL;
  305. struct dp_vdev *vdev = NULL;
  306. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  307. int i = 0;
  308. int j = 0;
  309. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  310. if (!pdev || !pdev->soc)
  311. return;
  312. soc = pdev->soc;
  313. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  314. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  315. num_peers[i] = vdev->num_peers;
  316. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  317. sizeof(struct dp_peer *));
  318. if (!peer_array[i])
  319. break;
  320. qdf_spin_lock_bh(&vdev->peer_list_lock);
  321. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  322. peer_list_elem,
  323. tmp_peer) {
  324. if (j >= num_peers[i])
  325. break;
  326. if (dp_peer_get_ref(soc, peer, mod_id) ==
  327. QDF_STATUS_SUCCESS) {
  328. peer_array[i][j] = peer;
  329. j = (j + 1);
  330. }
  331. }
  332. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  333. i = (i + 1);
  334. }
  335. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  336. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  337. if (!peer_array[i])
  338. break;
  339. for (j = 0; j < num_peers[i]; j++) {
  340. peer = peer_array[i][j];
  341. if (!peer)
  342. continue;
  343. (*func)(soc, peer, arg);
  344. dp_peer_unref_delete(peer, mod_id);
  345. }
  346. qdf_mem_free(peer_array[i]);
  347. }
  348. }
  349. /**
  350. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  351. *
  352. * This API will cache the peers in local allocated memory and calls
  353. * iterate function outside the lock.
  354. *
  355. * As this API is allocating new memory it is suggested to use this
  356. * only when lock cannot be held
  357. *
  358. * @soc : DP soc context
  359. * @func : function to be called for each peer
  360. * @arg : argument need to be passed to func
  361. * @mod_id : module_id
  362. *
  363. * Return: void
  364. */
  365. static inline void
  366. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  367. dp_peer_iter_func *func,
  368. void *arg,
  369. enum dp_mod_id mod_id)
  370. {
  371. struct dp_pdev *pdev;
  372. int i;
  373. if (!soc)
  374. return;
  375. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  376. pdev = soc->pdev_list[i];
  377. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  378. }
  379. }
  380. #ifdef DP_PEER_STATE_DEBUG
  381. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  382. do { \
  383. if (!(_condition)) { \
  384. dp_alert("Invalid state shift from %u to %u peer " \
  385. QDF_MAC_ADDR_FMT, \
  386. (_peer)->peer_state, (_new_state), \
  387. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  388. QDF_ASSERT(0); \
  389. } \
  390. } while (0)
  391. #else
  392. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  393. do { \
  394. if (!(_condition)) { \
  395. dp_alert("Invalid state shift from %u to %u peer " \
  396. QDF_MAC_ADDR_FMT, \
  397. (_peer)->peer_state, (_new_state), \
  398. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  399. } \
  400. } while (0)
  401. #endif
  402. /**
  403. * dp_peer_state_cmp() - compare dp peer state
  404. *
  405. * @peer : DP peer
  406. * @state : state
  407. *
  408. * Return: true if state matches with peer state
  409. * false if it does not match
  410. */
  411. static inline bool
  412. dp_peer_state_cmp(struct dp_peer *peer,
  413. enum dp_peer_state state)
  414. {
  415. bool is_status_equal = false;
  416. qdf_spin_lock_bh(&peer->peer_state_lock);
  417. is_status_equal = (peer->peer_state == state);
  418. qdf_spin_unlock_bh(&peer->peer_state_lock);
  419. return is_status_equal;
  420. }
  421. /**
  422. * dp_peer_update_state() - update dp peer state
  423. *
  424. * @soc : core DP soc context
  425. * @peer : DP peer
  426. * @state : new state
  427. *
  428. * Return: None
  429. */
  430. static inline void
  431. dp_peer_update_state(struct dp_soc *soc,
  432. struct dp_peer *peer,
  433. enum dp_peer_state state)
  434. {
  435. uint8_t peer_state;
  436. qdf_spin_lock_bh(&peer->peer_state_lock);
  437. peer_state = peer->peer_state;
  438. switch (state) {
  439. case DP_PEER_STATE_INIT:
  440. DP_PEER_STATE_ASSERT
  441. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  442. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  443. break;
  444. case DP_PEER_STATE_ACTIVE:
  445. DP_PEER_STATE_ASSERT(peer, state,
  446. (peer_state == DP_PEER_STATE_INIT));
  447. break;
  448. case DP_PEER_STATE_LOGICAL_DELETE:
  449. DP_PEER_STATE_ASSERT(peer, state,
  450. (peer_state == DP_PEER_STATE_ACTIVE) ||
  451. (peer_state == DP_PEER_STATE_INIT));
  452. break;
  453. case DP_PEER_STATE_INACTIVE:
  454. DP_PEER_STATE_ASSERT
  455. (peer, state,
  456. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  457. break;
  458. case DP_PEER_STATE_FREED:
  459. if (peer->sta_self_peer)
  460. DP_PEER_STATE_ASSERT
  461. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  462. else
  463. DP_PEER_STATE_ASSERT
  464. (peer, state,
  465. (peer_state == DP_PEER_STATE_INACTIVE) ||
  466. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  467. break;
  468. default:
  469. qdf_spin_unlock_bh(&peer->peer_state_lock);
  470. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  471. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  472. return;
  473. }
  474. peer->peer_state = state;
  475. qdf_spin_unlock_bh(&peer->peer_state_lock);
  476. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  477. peer_state, state,
  478. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  479. }
  480. void dp_print_ast_stats(struct dp_soc *soc);
  481. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  482. uint16_t hw_peer_id, uint8_t vdev_id,
  483. uint8_t *peer_mac_addr, uint16_t ast_hash,
  484. uint8_t is_wds);
  485. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  486. uint8_t vdev_id, uint8_t *peer_mac_addr,
  487. uint8_t is_wds, uint32_t free_wds_count);
  488. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  489. enum cdp_sec_type sec_type, int is_unicast,
  490. u_int32_t *michael_key, u_int32_t *rx_pn);
  491. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  492. uint8_t tid, uint16_t win_sz);
  493. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  494. uint16_t peer_id, uint8_t *peer_mac);
  495. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  496. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  497. uint32_t flags);
  498. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  499. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  500. struct dp_ast_entry *ast_entry);
  501. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  502. struct dp_ast_entry *ast_entry, uint32_t flags);
  503. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  504. uint8_t *ast_mac_addr,
  505. uint8_t pdev_id);
  506. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  507. uint8_t *ast_mac_addr,
  508. uint8_t vdev_id);
  509. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  510. uint8_t *ast_mac_addr);
  511. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  512. struct dp_ast_entry *ast_entry);
  513. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  514. struct dp_ast_entry *ast_entry);
  515. void dp_peer_ast_set_type(struct dp_soc *soc,
  516. struct dp_ast_entry *ast_entry,
  517. enum cdp_txrx_ast_entry_type type);
  518. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  519. struct dp_ast_entry *ast_entry,
  520. struct dp_peer *peer);
  521. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  522. struct cdp_soc *dp_soc,
  523. void *cookie,
  524. enum cdp_ast_free_status status);
  525. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  526. struct dp_ast_entry *ase);
  527. void dp_peer_free_ast_entry(struct dp_soc *soc,
  528. struct dp_ast_entry *ast_entry);
  529. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  530. struct dp_ast_entry *ast_entry,
  531. struct dp_peer *peer);
  532. /**
  533. * dp_peer_mec_detach_entry() - Detach the MEC entry
  534. * @soc: SoC handle
  535. * @mecentry: MEC entry of the node
  536. * @ptr: pointer to free list
  537. *
  538. * The MEC entry is detached from MEC table and added to free_list
  539. * to free the object outside lock
  540. *
  541. * Return: None
  542. */
  543. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  544. void *ptr);
  545. /**
  546. * dp_peer_mec_free_list() - free the MEC entry from free_list
  547. * @soc: SoC handle
  548. * @ptr: pointer to free list
  549. *
  550. * Return: None
  551. */
  552. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  553. /**
  554. * dp_peer_mec_add_entry()
  555. * @soc: SoC handle
  556. * @vdev: vdev to which mec node belongs
  557. * @mac_addr: MAC address of mec node
  558. *
  559. * This function allocates and adds MEC entry to MEC table.
  560. * It assumes caller has taken the mec lock to protect the access to these
  561. * tables
  562. *
  563. * Return: QDF_STATUS
  564. */
  565. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  566. struct dp_vdev *vdev,
  567. uint8_t *mac_addr);
  568. /**
  569. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  570. * within pdev
  571. * @soc: SoC handle
  572. *
  573. * It assumes caller has taken the mec_lock to protect the access to
  574. * MEC hash table
  575. *
  576. * Return: MEC entry
  577. */
  578. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  579. uint8_t pdev_id,
  580. uint8_t *mec_mac_addr);
  581. #define DP_AST_ASSERT(_condition) \
  582. do { \
  583. if (!(_condition)) { \
  584. dp_print_ast_stats(soc);\
  585. QDF_BUG(_condition); \
  586. } \
  587. } while (0)
  588. /**
  589. * dp_peer_update_inactive_time - Update inactive time for peer
  590. * @pdev: pdev object
  591. * @tag_type: htt_tlv_tag type
  592. * #tag_buf: buf message
  593. */
  594. void
  595. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  596. uint32_t *tag_buf);
  597. #ifndef QCA_MULTIPASS_SUPPORT
  598. /**
  599. * dp_peer_set_vlan_id: set vlan_id for this peer
  600. * @cdp_soc: soc handle
  601. * @vdev_id: id of vdev object
  602. * @peer_mac: mac address
  603. * @vlan_id: vlan id for peer
  604. *
  605. * return: void
  606. */
  607. static inline
  608. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  609. uint8_t vdev_id, uint8_t *peer_mac,
  610. uint16_t vlan_id)
  611. {
  612. }
  613. /**
  614. * dp_set_vlan_groupkey: set vlan map for vdev
  615. * @soc: pointer to soc
  616. * @vdev_id: id of vdev handle
  617. * @vlan_id: vlan_id
  618. * @group_key: group key for vlan
  619. *
  620. * return: set success/failure
  621. */
  622. static inline
  623. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  624. uint16_t vlan_id, uint16_t group_key)
  625. {
  626. return QDF_STATUS_SUCCESS;
  627. }
  628. /**
  629. * dp_peer_multipass_list_init: initialize multipass peer list
  630. * @vdev: pointer to vdev
  631. *
  632. * return: void
  633. */
  634. static inline
  635. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  636. {
  637. }
  638. /**
  639. * dp_peer_multipass_list_remove: remove peer from special peer list
  640. * @peer: peer handle
  641. *
  642. * return: void
  643. */
  644. static inline
  645. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  646. {
  647. }
  648. #else
  649. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  650. uint8_t vdev_id, uint8_t *peer_mac,
  651. uint16_t vlan_id);
  652. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  653. uint16_t vlan_id, uint16_t group_key);
  654. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  655. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  656. #endif
  657. #ifndef QCA_PEER_MULTIQ_SUPPORT
  658. /**
  659. * dp_peer_reset_flowq_map() - reset peer flowq map table
  660. * @peer - dp peer handle
  661. *
  662. * Return: none
  663. */
  664. static inline
  665. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  666. {
  667. }
  668. /**
  669. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  670. * @soc - genereic soc handle
  671. * @is_wds - flag to indicate if peer is wds
  672. * @peer_id - peer_id from htt peer map message
  673. * @peer_mac_addr - mac address of the peer
  674. * @ast_info - ast flow override information from peer map
  675. *
  676. * Return: none
  677. */
  678. static inline
  679. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  680. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  681. struct dp_ast_flow_override_info *ast_info)
  682. {
  683. }
  684. #else
  685. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  686. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  687. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  688. struct dp_ast_flow_override_info *ast_info);
  689. #endif
  690. /**
  691. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  692. * @soc: DP SOC handle
  693. * @pdev_id: id of DP pdev handle
  694. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  695. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  696. * Tx packet capture in monitor mode
  697. * Tx packet capture in monitor mode
  698. * @peer_mac: MAC address for which the above need to be enabled/disabled
  699. *
  700. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  701. */
  702. QDF_STATUS
  703. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  704. uint8_t pdev_id,
  705. bool is_rx_pkt_cap_enable,
  706. uint8_t is_tx_pkt_cap_enable,
  707. uint8_t *peer_mac);
  708. /*
  709. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  710. * after deleting the entries (ie., setting valid=0)
  711. *
  712. * @soc: DP SOC handle
  713. * @cb_ctxt: Callback context
  714. * @reo_status: REO command status
  715. */
  716. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  717. void *cb_ctxt,
  718. union hal_reo_status *reo_status);
  719. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  720. /**
  721. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  722. * @peer: Datapath peer
  723. *
  724. */
  725. static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
  726. {
  727. }
  728. /**
  729. * dp_peer_tid_peer_id_update() – update peer_id to tid structure
  730. * @peer: Datapath peer
  731. * @peer_id: peer_id
  732. *
  733. */
  734. static inline
  735. void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
  736. {
  737. }
  738. /**
  739. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  740. * @peer: Datapath peer
  741. *
  742. */
  743. static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  744. {
  745. }
  746. /**
  747. * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
  748. * @vdev: Datapath vdev
  749. * @peer: Datapath peer
  750. *
  751. */
  752. static inline void
  753. dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  754. {
  755. }
  756. #endif
  757. #ifdef QCA_PEER_EXT_STATS
  758. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  759. struct dp_peer *peer);
  760. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  761. struct dp_peer *peer);
  762. #else
  763. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  764. struct dp_peer *peer)
  765. {
  766. return QDF_STATUS_SUCCESS;
  767. }
  768. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  769. struct dp_peer *peer)
  770. {
  771. }
  772. #endif
  773. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  774. struct dp_vdev *vdev,
  775. enum dp_mod_id mod_id);
  776. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  777. struct dp_vdev *vdev,
  778. enum dp_mod_id mod_id);
  779. #ifdef FEATURE_AST
  780. /*
  781. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  782. * @soc - datapath soc handle
  783. * @peer - datapath peer handle
  784. *
  785. * Delete the AST entries belonging to a peer
  786. */
  787. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  788. struct dp_peer *peer)
  789. {
  790. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  791. /*
  792. * Delete peer self ast entry. This is done to handle scenarios
  793. * where peer is freed before peer map is received(for ex in case
  794. * of auth disallow due to ACL) in such cases self ast is not added
  795. * to peer->ast_list.
  796. */
  797. if (peer->self_ast_entry) {
  798. dp_peer_del_ast(soc, peer->self_ast_entry);
  799. peer->self_ast_entry = NULL;
  800. }
  801. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  802. dp_peer_del_ast(soc, ast_entry);
  803. }
  804. #else
  805. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  806. struct dp_peer *peer)
  807. {
  808. }
  809. #endif
  810. #ifdef FEATURE_MEC
  811. /**
  812. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  813. * @soc: SoC handle
  814. *
  815. * Return: none
  816. */
  817. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  818. /**
  819. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  820. * @soc: SoC handle
  821. *
  822. * Return: none
  823. */
  824. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  825. /**
  826. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  827. * @soc: Datapath SOC
  828. *
  829. * Return: None
  830. */
  831. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  832. #else
  833. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  834. {
  835. }
  836. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  837. {
  838. }
  839. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  840. {
  841. }
  842. #endif
  843. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  844. /**
  845. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  846. * @soc : dp_soc handle
  847. * @peer: peer
  848. *
  849. * This function is used to send cache flush cmd to reo and
  850. * to register the callback to handle the dumping of the reo
  851. * queue stas from DDR
  852. *
  853. * Return: none
  854. */
  855. void dp_send_cache_flush_for_rx_tid(
  856. struct dp_soc *soc, struct dp_peer *peer);
  857. /**
  858. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  859. * @soc : cdp_soc_t handle
  860. * @vdev_id: vdev id
  861. *
  862. * Handler to get rx tid info from DDR after h/w cache is
  863. * invalidated first using the cache flush cmd.
  864. *
  865. * Return: none
  866. */
  867. void dp_get_rx_reo_queue_info(
  868. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  869. /**
  870. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  871. * @soc : dp_soc handle
  872. * @cb_ctxt - callback context
  873. * @reo_status: vdev id
  874. *
  875. * This is the callback function registered after sending the reo cmd
  876. * to flush the h/w cache and invalidate it. In the callback the reo
  877. * queue desc info is dumped from DDR.
  878. *
  879. * Return: none
  880. */
  881. void dp_dump_rx_reo_queue_info(
  882. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  883. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  884. static inline void dp_get_rx_reo_queue_info(
  885. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  886. {
  887. }
  888. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  889. #endif /* _DP_PEER_H_ */