dp_peer.h 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  24. #include "hal_reo.h"
  25. #endif
  26. #define DP_INVALID_PEER_ID 0xffff
  27. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  28. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  29. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  30. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  31. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  32. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  33. #define dp_peer_info(params...) \
  34. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  35. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  36. #ifdef REO_QDESC_HISTORY
  37. enum reo_qdesc_event_type {
  38. REO_QDESC_UPDATE_CB = 0,
  39. REO_QDESC_FREE,
  40. };
  41. struct reo_qdesc_event {
  42. qdf_dma_addr_t qdesc_addr;
  43. uint64_t ts;
  44. enum reo_qdesc_event_type type;
  45. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  46. };
  47. #endif
  48. struct ast_del_ctxt {
  49. bool age;
  50. int del_count;
  51. };
  52. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  53. void *arg);
  54. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  55. /**
  56. * dp_peer_get_ref() - Returns peer object given the peer id
  57. *
  58. * @soc : core DP soc context
  59. * @peer : DP peer
  60. * @mod_id : id of module requesting the reference
  61. *
  62. * Return: QDF_STATUS_SUCCESS if reference held successfully
  63. * else QDF_STATUS_E_INVAL
  64. */
  65. static inline
  66. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  67. struct dp_peer *peer,
  68. enum dp_mod_id mod_id)
  69. {
  70. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  71. return QDF_STATUS_E_INVAL;
  72. if (mod_id > DP_MOD_ID_RX)
  73. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  74. return QDF_STATUS_SUCCESS;
  75. }
  76. /**
  77. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  78. *
  79. * @soc : core DP soc context
  80. * @peer_id : peer id from peer object can be retrieved
  81. * @mod_id : module id
  82. *
  83. * Return: struct dp_peer*: Pointer to DP peer object
  84. */
  85. static inline struct dp_peer *
  86. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  87. uint16_t peer_id,
  88. enum dp_mod_id mod_id)
  89. {
  90. struct dp_peer *peer;
  91. qdf_spin_lock_bh(&soc->peer_map_lock);
  92. peer = (peer_id >= soc->max_peers) ? NULL :
  93. soc->peer_id_to_obj_map[peer_id];
  94. if (!peer ||
  95. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  96. qdf_spin_unlock_bh(&soc->peer_map_lock);
  97. return NULL;
  98. }
  99. qdf_spin_unlock_bh(&soc->peer_map_lock);
  100. return peer;
  101. }
  102. /**
  103. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  104. * if peer state is active
  105. *
  106. * @soc : core DP soc context
  107. * @peer_id : peer id from peer object can be retrieved
  108. * @mod_id : ID ot module requesting reference
  109. *
  110. * Return: struct dp_peer*: Pointer to DP peer object
  111. */
  112. static inline
  113. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  114. uint16_t peer_id,
  115. enum dp_mod_id mod_id)
  116. {
  117. struct dp_peer *peer;
  118. qdf_spin_lock_bh(&soc->peer_map_lock);
  119. peer = (peer_id >= soc->max_peers) ? NULL :
  120. soc->peer_id_to_obj_map[peer_id];
  121. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  122. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  123. qdf_spin_unlock_bh(&soc->peer_map_lock);
  124. return NULL;
  125. }
  126. qdf_spin_unlock_bh(&soc->peer_map_lock);
  127. return peer;
  128. }
  129. #ifdef PEER_CACHE_RX_PKTS
  130. /**
  131. * dp_rx_flush_rx_cached() - flush cached rx frames
  132. * @peer: peer
  133. * @drop: set flag to drop frames
  134. *
  135. * Return: None
  136. */
  137. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  138. #else
  139. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  140. {
  141. }
  142. #endif
  143. static inline void
  144. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  145. {
  146. qdf_spin_lock_bh(&peer->peer_info_lock);
  147. peer->state = OL_TXRX_PEER_STATE_DISC;
  148. qdf_spin_unlock_bh(&peer->peer_info_lock);
  149. dp_rx_flush_rx_cached(peer, true);
  150. }
  151. /**
  152. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  153. *
  154. * @vdev : DP vdev context
  155. * @func : function to be called for each peer
  156. * @arg : argument need to be passed to func
  157. * @mod_id : module_id
  158. *
  159. * Return: void
  160. */
  161. static inline void
  162. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  163. enum dp_mod_id mod_id)
  164. {
  165. struct dp_peer *peer;
  166. struct dp_peer *tmp_peer;
  167. struct dp_soc *soc = NULL;
  168. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  169. return;
  170. soc = vdev->pdev->soc;
  171. qdf_spin_lock_bh(&vdev->peer_list_lock);
  172. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  173. peer_list_elem,
  174. tmp_peer) {
  175. if (dp_peer_get_ref(soc, peer, mod_id) ==
  176. QDF_STATUS_SUCCESS) {
  177. (*func)(soc, peer, arg);
  178. dp_peer_unref_delete(peer, mod_id);
  179. }
  180. }
  181. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  182. }
  183. /**
  184. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  185. *
  186. * @pdev : DP pdev context
  187. * @func : function to be called for each peer
  188. * @arg : argument need to be passed to func
  189. * @mod_id : module_id
  190. *
  191. * Return: void
  192. */
  193. static inline void
  194. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  195. enum dp_mod_id mod_id)
  196. {
  197. struct dp_vdev *vdev;
  198. if (!pdev)
  199. return;
  200. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  201. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  202. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  203. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  204. }
  205. /**
  206. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  207. *
  208. * @soc : DP soc context
  209. * @func : function to be called for each peer
  210. * @arg : argument need to be passed to func
  211. * @mod_id : module_id
  212. *
  213. * Return: void
  214. */
  215. static inline void
  216. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  217. enum dp_mod_id mod_id)
  218. {
  219. struct dp_pdev *pdev;
  220. int i;
  221. if (!soc)
  222. return;
  223. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  224. pdev = soc->pdev_list[i];
  225. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  226. }
  227. }
  228. /**
  229. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  230. *
  231. * This API will cache the peers in local allocated memory and calls
  232. * iterate function outside the lock.
  233. *
  234. * As this API is allocating new memory it is suggested to use this
  235. * only when lock cannot be held
  236. *
  237. * @vdev : DP vdev context
  238. * @func : function to be called for each peer
  239. * @arg : argument need to be passed to func
  240. * @mod_id : module_id
  241. *
  242. * Return: void
  243. */
  244. static inline void
  245. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  246. dp_peer_iter_func *func,
  247. void *arg,
  248. enum dp_mod_id mod_id)
  249. {
  250. struct dp_peer *peer;
  251. struct dp_peer *tmp_peer;
  252. struct dp_soc *soc = NULL;
  253. struct dp_peer **peer_array = NULL;
  254. int i = 0;
  255. uint32_t num_peers = 0;
  256. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  257. return;
  258. num_peers = vdev->num_peers;
  259. soc = vdev->pdev->soc;
  260. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  261. if (!peer_array)
  262. return;
  263. qdf_spin_lock_bh(&vdev->peer_list_lock);
  264. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  265. peer_list_elem,
  266. tmp_peer) {
  267. if (i >= num_peers)
  268. break;
  269. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  270. peer_array[i] = peer;
  271. i = (i + 1);
  272. }
  273. }
  274. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  275. for (i = 0; i < num_peers; i++) {
  276. peer = peer_array[i];
  277. if (!peer)
  278. continue;
  279. (*func)(soc, peer, arg);
  280. dp_peer_unref_delete(peer, mod_id);
  281. }
  282. qdf_mem_free(peer_array);
  283. }
  284. /**
  285. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  286. *
  287. * This API will cache the peers in local allocated memory and calls
  288. * iterate function outside the lock.
  289. *
  290. * As this API is allocating new memory it is suggested to use this
  291. * only when lock cannot be held
  292. *
  293. * @pdev : DP pdev context
  294. * @func : function to be called for each peer
  295. * @arg : argument need to be passed to func
  296. * @mod_id : module_id
  297. *
  298. * Return: void
  299. */
  300. static inline void
  301. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  302. dp_peer_iter_func *func,
  303. void *arg,
  304. enum dp_mod_id mod_id)
  305. {
  306. struct dp_peer *peer;
  307. struct dp_peer *tmp_peer;
  308. struct dp_soc *soc = NULL;
  309. struct dp_vdev *vdev = NULL;
  310. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  311. int i = 0;
  312. int j = 0;
  313. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  314. if (!pdev || !pdev->soc)
  315. return;
  316. soc = pdev->soc;
  317. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  318. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  319. num_peers[i] = vdev->num_peers;
  320. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  321. sizeof(struct dp_peer *));
  322. if (!peer_array[i])
  323. break;
  324. qdf_spin_lock_bh(&vdev->peer_list_lock);
  325. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  326. peer_list_elem,
  327. tmp_peer) {
  328. if (j >= num_peers[i])
  329. break;
  330. if (dp_peer_get_ref(soc, peer, mod_id) ==
  331. QDF_STATUS_SUCCESS) {
  332. peer_array[i][j] = peer;
  333. j = (j + 1);
  334. }
  335. }
  336. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  337. i = (i + 1);
  338. }
  339. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  340. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  341. if (!peer_array[i])
  342. break;
  343. for (j = 0; j < num_peers[i]; j++) {
  344. peer = peer_array[i][j];
  345. if (!peer)
  346. continue;
  347. (*func)(soc, peer, arg);
  348. dp_peer_unref_delete(peer, mod_id);
  349. }
  350. qdf_mem_free(peer_array[i]);
  351. }
  352. }
  353. /**
  354. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  355. *
  356. * This API will cache the peers in local allocated memory and calls
  357. * iterate function outside the lock.
  358. *
  359. * As this API is allocating new memory it is suggested to use this
  360. * only when lock cannot be held
  361. *
  362. * @soc : DP soc context
  363. * @func : function to be called for each peer
  364. * @arg : argument need to be passed to func
  365. * @mod_id : module_id
  366. *
  367. * Return: void
  368. */
  369. static inline void
  370. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  371. dp_peer_iter_func *func,
  372. void *arg,
  373. enum dp_mod_id mod_id)
  374. {
  375. struct dp_pdev *pdev;
  376. int i;
  377. if (!soc)
  378. return;
  379. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  380. pdev = soc->pdev_list[i];
  381. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  382. }
  383. }
  384. #ifdef DP_PEER_STATE_DEBUG
  385. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  386. do { \
  387. if (!(_condition)) { \
  388. dp_alert("Invalid state shift from %u to %u peer " \
  389. QDF_MAC_ADDR_FMT, \
  390. (_peer)->peer_state, (_new_state), \
  391. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  392. QDF_ASSERT(0); \
  393. } \
  394. } while (0)
  395. #else
  396. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  397. do { \
  398. if (!(_condition)) { \
  399. dp_alert("Invalid state shift from %u to %u peer " \
  400. QDF_MAC_ADDR_FMT, \
  401. (_peer)->peer_state, (_new_state), \
  402. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  403. } \
  404. } while (0)
  405. #endif
  406. /**
  407. * dp_peer_state_cmp() - compare dp peer state
  408. *
  409. * @peer : DP peer
  410. * @state : state
  411. *
  412. * Return: true if state matches with peer state
  413. * false if it does not match
  414. */
  415. static inline bool
  416. dp_peer_state_cmp(struct dp_peer *peer,
  417. enum dp_peer_state state)
  418. {
  419. bool is_status_equal = false;
  420. qdf_spin_lock_bh(&peer->peer_state_lock);
  421. is_status_equal = (peer->peer_state == state);
  422. qdf_spin_unlock_bh(&peer->peer_state_lock);
  423. return is_status_equal;
  424. }
  425. /**
  426. * dp_peer_update_state() - update dp peer state
  427. *
  428. * @soc : core DP soc context
  429. * @peer : DP peer
  430. * @state : new state
  431. *
  432. * Return: None
  433. */
  434. static inline void
  435. dp_peer_update_state(struct dp_soc *soc,
  436. struct dp_peer *peer,
  437. enum dp_peer_state state)
  438. {
  439. uint8_t peer_state;
  440. qdf_spin_lock_bh(&peer->peer_state_lock);
  441. peer_state = peer->peer_state;
  442. switch (state) {
  443. case DP_PEER_STATE_INIT:
  444. DP_PEER_STATE_ASSERT
  445. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  446. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  447. break;
  448. case DP_PEER_STATE_ACTIVE:
  449. DP_PEER_STATE_ASSERT(peer, state,
  450. (peer_state == DP_PEER_STATE_INIT));
  451. break;
  452. case DP_PEER_STATE_LOGICAL_DELETE:
  453. DP_PEER_STATE_ASSERT(peer, state,
  454. (peer_state == DP_PEER_STATE_ACTIVE) ||
  455. (peer_state == DP_PEER_STATE_INIT));
  456. break;
  457. case DP_PEER_STATE_INACTIVE:
  458. DP_PEER_STATE_ASSERT
  459. (peer, state,
  460. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  461. break;
  462. case DP_PEER_STATE_FREED:
  463. if (peer->sta_self_peer)
  464. DP_PEER_STATE_ASSERT
  465. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  466. else
  467. DP_PEER_STATE_ASSERT
  468. (peer, state,
  469. (peer_state == DP_PEER_STATE_INACTIVE) ||
  470. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  471. break;
  472. default:
  473. qdf_spin_unlock_bh(&peer->peer_state_lock);
  474. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  475. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  476. return;
  477. }
  478. peer->peer_state = state;
  479. qdf_spin_unlock_bh(&peer->peer_state_lock);
  480. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  481. peer_state, state,
  482. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  483. }
  484. void dp_print_ast_stats(struct dp_soc *soc);
  485. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  486. uint16_t hw_peer_id, uint8_t vdev_id,
  487. uint8_t *peer_mac_addr, uint16_t ast_hash,
  488. uint8_t is_wds);
  489. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  490. uint8_t vdev_id, uint8_t *peer_mac_addr,
  491. uint8_t is_wds, uint32_t free_wds_count);
  492. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  493. enum cdp_sec_type sec_type, int is_unicast,
  494. u_int32_t *michael_key, u_int32_t *rx_pn);
  495. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  496. uint8_t tid, uint16_t win_sz);
  497. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  498. uint16_t peer_id, uint8_t *peer_mac);
  499. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  500. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  501. uint32_t flags);
  502. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  503. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  504. struct dp_ast_entry *ast_entry);
  505. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  506. struct dp_ast_entry *ast_entry, uint32_t flags);
  507. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  508. uint8_t *ast_mac_addr,
  509. uint8_t pdev_id);
  510. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  511. uint8_t *ast_mac_addr,
  512. uint8_t vdev_id);
  513. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  514. uint8_t *ast_mac_addr);
  515. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  516. struct dp_ast_entry *ast_entry);
  517. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  518. struct dp_ast_entry *ast_entry);
  519. void dp_peer_ast_set_type(struct dp_soc *soc,
  520. struct dp_ast_entry *ast_entry,
  521. enum cdp_txrx_ast_entry_type type);
  522. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  523. struct dp_ast_entry *ast_entry,
  524. struct dp_peer *peer);
  525. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  526. struct cdp_soc *dp_soc,
  527. void *cookie,
  528. enum cdp_ast_free_status status);
  529. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  530. struct dp_ast_entry *ase);
  531. void dp_peer_free_ast_entry(struct dp_soc *soc,
  532. struct dp_ast_entry *ast_entry);
  533. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  534. struct dp_ast_entry *ast_entry,
  535. struct dp_peer *peer);
  536. /**
  537. * dp_peer_mec_detach_entry() - Detach the MEC entry
  538. * @soc: SoC handle
  539. * @mecentry: MEC entry of the node
  540. * @ptr: pointer to free list
  541. *
  542. * The MEC entry is detached from MEC table and added to free_list
  543. * to free the object outside lock
  544. *
  545. * Return: None
  546. */
  547. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  548. void *ptr);
  549. /**
  550. * dp_peer_mec_free_list() - free the MEC entry from free_list
  551. * @soc: SoC handle
  552. * @ptr: pointer to free list
  553. *
  554. * Return: None
  555. */
  556. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  557. /**
  558. * dp_peer_mec_add_entry()
  559. * @soc: SoC handle
  560. * @vdev: vdev to which mec node belongs
  561. * @mac_addr: MAC address of mec node
  562. *
  563. * This function allocates and adds MEC entry to MEC table.
  564. * It assumes caller has taken the mec lock to protect the access to these
  565. * tables
  566. *
  567. * Return: QDF_STATUS
  568. */
  569. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  570. struct dp_vdev *vdev,
  571. uint8_t *mac_addr);
  572. /**
  573. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  574. * within pdev
  575. * @soc: SoC handle
  576. *
  577. * It assumes caller has taken the mec_lock to protect the access to
  578. * MEC hash table
  579. *
  580. * Return: MEC entry
  581. */
  582. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  583. uint8_t pdev_id,
  584. uint8_t *mec_mac_addr);
  585. #define DP_AST_ASSERT(_condition) \
  586. do { \
  587. if (!(_condition)) { \
  588. dp_print_ast_stats(soc);\
  589. QDF_BUG(_condition); \
  590. } \
  591. } while (0)
  592. /**
  593. * dp_peer_update_inactive_time - Update inactive time for peer
  594. * @pdev: pdev object
  595. * @tag_type: htt_tlv_tag type
  596. * #tag_buf: buf message
  597. */
  598. void
  599. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  600. uint32_t *tag_buf);
  601. #ifndef QCA_MULTIPASS_SUPPORT
  602. /**
  603. * dp_peer_set_vlan_id: set vlan_id for this peer
  604. * @cdp_soc: soc handle
  605. * @vdev_id: id of vdev object
  606. * @peer_mac: mac address
  607. * @vlan_id: vlan id for peer
  608. *
  609. * return: void
  610. */
  611. static inline
  612. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  613. uint8_t vdev_id, uint8_t *peer_mac,
  614. uint16_t vlan_id)
  615. {
  616. }
  617. /**
  618. * dp_set_vlan_groupkey: set vlan map for vdev
  619. * @soc: pointer to soc
  620. * @vdev_id: id of vdev handle
  621. * @vlan_id: vlan_id
  622. * @group_key: group key for vlan
  623. *
  624. * return: set success/failure
  625. */
  626. static inline
  627. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  628. uint16_t vlan_id, uint16_t group_key)
  629. {
  630. return QDF_STATUS_SUCCESS;
  631. }
  632. /**
  633. * dp_peer_multipass_list_init: initialize multipass peer list
  634. * @vdev: pointer to vdev
  635. *
  636. * return: void
  637. */
  638. static inline
  639. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  640. {
  641. }
  642. /**
  643. * dp_peer_multipass_list_remove: remove peer from special peer list
  644. * @peer: peer handle
  645. *
  646. * return: void
  647. */
  648. static inline
  649. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  650. {
  651. }
  652. #else
  653. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  654. uint8_t vdev_id, uint8_t *peer_mac,
  655. uint16_t vlan_id);
  656. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  657. uint16_t vlan_id, uint16_t group_key);
  658. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  659. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  660. #endif
  661. #ifndef QCA_PEER_MULTIQ_SUPPORT
  662. /**
  663. * dp_peer_reset_flowq_map() - reset peer flowq map table
  664. * @peer - dp peer handle
  665. *
  666. * Return: none
  667. */
  668. static inline
  669. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  670. {
  671. }
  672. /**
  673. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  674. * @soc - genereic soc handle
  675. * @is_wds - flag to indicate if peer is wds
  676. * @peer_id - peer_id from htt peer map message
  677. * @peer_mac_addr - mac address of the peer
  678. * @ast_info - ast flow override information from peer map
  679. *
  680. * Return: none
  681. */
  682. static inline
  683. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  684. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  685. struct dp_ast_flow_override_info *ast_info)
  686. {
  687. }
  688. #else
  689. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  690. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  691. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  692. struct dp_ast_flow_override_info *ast_info);
  693. #endif
  694. /**
  695. * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
  696. * @soc: DP SOC handle
  697. * @pdev_id: id of DP pdev handle
  698. * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
  699. * @is_tx_pkt_cap_enable: enable/disable/delete/print
  700. * Tx packet capture in monitor mode
  701. * Tx packet capture in monitor mode
  702. * @peer_mac: MAC address for which the above need to be enabled/disabled
  703. *
  704. * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
  705. */
  706. QDF_STATUS
  707. dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
  708. uint8_t pdev_id,
  709. bool is_rx_pkt_cap_enable,
  710. uint8_t is_tx_pkt_cap_enable,
  711. uint8_t *peer_mac);
  712. /*
  713. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  714. * after deleting the entries (ie., setting valid=0)
  715. *
  716. * @soc: DP SOC handle
  717. * @cb_ctxt: Callback context
  718. * @reo_status: REO command status
  719. */
  720. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  721. void *cb_ctxt,
  722. union hal_reo_status *reo_status);
  723. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  724. /**
  725. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  726. * @peer: Datapath peer
  727. *
  728. */
  729. static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
  730. {
  731. }
  732. /**
  733. * dp_peer_tid_peer_id_update() – update peer_id to tid structure
  734. * @peer: Datapath peer
  735. * @peer_id: peer_id
  736. *
  737. */
  738. static inline
  739. void dp_peer_tid_peer_id_update(struct dp_peer *peer, uint16_t peer_id)
  740. {
  741. }
  742. /**
  743. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  744. * @peer: Datapath peer
  745. *
  746. */
  747. static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  748. {
  749. }
  750. /**
  751. * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
  752. * @vdev: Datapath vdev
  753. * @peer: Datapath peer
  754. *
  755. */
  756. static inline void
  757. dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  758. {
  759. }
  760. #endif
  761. #ifdef QCA_PEER_EXT_STATS
  762. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  763. struct dp_peer *peer);
  764. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  765. struct dp_peer *peer);
  766. #else
  767. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  768. struct dp_peer *peer)
  769. {
  770. return QDF_STATUS_SUCCESS;
  771. }
  772. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  773. struct dp_peer *peer)
  774. {
  775. }
  776. #endif
  777. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  778. struct dp_vdev *vdev,
  779. enum dp_mod_id mod_id);
  780. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  781. struct dp_vdev *vdev,
  782. enum dp_mod_id mod_id);
  783. #ifdef FEATURE_AST
  784. /*
  785. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  786. * @soc - datapath soc handle
  787. * @peer - datapath peer handle
  788. *
  789. * Delete the AST entries belonging to a peer
  790. */
  791. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  792. struct dp_peer *peer)
  793. {
  794. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  795. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  796. /*
  797. * Delete peer self ast entry. This is done to handle scenarios
  798. * where peer is freed before peer map is received(for ex in case
  799. * of auth disallow due to ACL) in such cases self ast is not added
  800. * to peer->ast_list.
  801. */
  802. if (peer->self_ast_entry) {
  803. dp_peer_del_ast(soc, peer->self_ast_entry);
  804. peer->self_ast_entry = NULL;
  805. }
  806. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  807. dp_peer_del_ast(soc, ast_entry);
  808. }
  809. #else
  810. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  811. struct dp_peer *peer)
  812. {
  813. }
  814. #endif
  815. #ifdef FEATURE_MEC
  816. /**
  817. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  818. * @soc: SoC handle
  819. *
  820. * Return: none
  821. */
  822. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  823. /**
  824. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  825. * @soc: SoC handle
  826. *
  827. * Return: none
  828. */
  829. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  830. /**
  831. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  832. * @soc: Datapath SOC
  833. *
  834. * Return: None
  835. */
  836. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  837. #else
  838. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  839. {
  840. }
  841. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  842. {
  843. }
  844. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  845. {
  846. }
  847. #endif
  848. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  849. /**
  850. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  851. * @soc : dp_soc handle
  852. * @peer: peer
  853. *
  854. * This function is used to send cache flush cmd to reo and
  855. * to register the callback to handle the dumping of the reo
  856. * queue stas from DDR
  857. *
  858. * Return: none
  859. */
  860. void dp_send_cache_flush_for_rx_tid(
  861. struct dp_soc *soc, struct dp_peer *peer);
  862. /**
  863. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  864. * @soc : cdp_soc_t handle
  865. * @vdev_id: vdev id
  866. *
  867. * Handler to get rx tid info from DDR after h/w cache is
  868. * invalidated first using the cache flush cmd.
  869. *
  870. * Return: none
  871. */
  872. void dp_get_rx_reo_queue_info(
  873. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  874. /**
  875. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  876. * @soc : dp_soc handle
  877. * @cb_ctxt - callback context
  878. * @reo_status: vdev id
  879. *
  880. * This is the callback function registered after sending the reo cmd
  881. * to flush the h/w cache and invalidate it. In the callback the reo
  882. * queue desc info is dumped from DDR.
  883. *
  884. * Return: none
  885. */
  886. void dp_dump_rx_reo_queue_info(
  887. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  888. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  889. static inline void dp_get_rx_reo_queue_info(
  890. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  891. {
  892. }
  893. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  894. #endif /* _DP_PEER_H_ */