dp_peer.h 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _DP_PEER_H_
  19. #define _DP_PEER_H_
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include "dp_types.h"
  23. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  24. #include "hal_reo.h"
  25. #endif
  26. #define DP_INVALID_PEER_ID 0xffff
  27. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  28. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  29. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  30. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  31. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  32. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  33. #define dp_peer_info(params...) \
  34. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  35. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  36. #ifdef REO_QDESC_HISTORY
  37. enum reo_qdesc_event_type {
  38. REO_QDESC_UPDATE_CB = 0,
  39. REO_QDESC_FREE,
  40. };
  41. struct reo_qdesc_event {
  42. qdf_dma_addr_t qdesc_addr;
  43. uint64_t ts;
  44. enum reo_qdesc_event_type type;
  45. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  46. };
  47. #endif
  48. struct ast_del_ctxt {
  49. bool age;
  50. int del_count;
  51. };
  52. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  53. void *arg);
  54. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  55. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  56. uint8_t *peer_mac_addr,
  57. int mac_addr_is_aligned,
  58. uint8_t vdev_id,
  59. enum dp_mod_id id);
  60. /**
  61. * dp_peer_get_ref() - Returns peer object given the peer id
  62. *
  63. * @soc : core DP soc context
  64. * @peer : DP peer
  65. * @mod_id : id of module requesting the reference
  66. *
  67. * Return: QDF_STATUS_SUCCESS if reference held successfully
  68. * else QDF_STATUS_E_INVAL
  69. */
  70. static inline
  71. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  72. struct dp_peer *peer,
  73. enum dp_mod_id mod_id)
  74. {
  75. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  76. return QDF_STATUS_E_INVAL;
  77. if (mod_id > DP_MOD_ID_RX)
  78. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  79. return QDF_STATUS_SUCCESS;
  80. }
  81. /**
  82. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  83. *
  84. * @soc : core DP soc context
  85. * @peer_id : peer id from peer object can be retrieved
  86. * @mod_id : module id
  87. *
  88. * Return: struct dp_peer*: Pointer to DP peer object
  89. */
  90. static inline struct dp_peer *
  91. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  92. uint16_t peer_id,
  93. enum dp_mod_id mod_id)
  94. {
  95. struct dp_peer *peer;
  96. qdf_spin_lock_bh(&soc->peer_map_lock);
  97. peer = (peer_id >= soc->max_peers) ? NULL :
  98. soc->peer_id_to_obj_map[peer_id];
  99. if (!peer ||
  100. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  101. qdf_spin_unlock_bh(&soc->peer_map_lock);
  102. return NULL;
  103. }
  104. qdf_spin_unlock_bh(&soc->peer_map_lock);
  105. return peer;
  106. }
  107. /**
  108. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  109. * if peer state is active
  110. *
  111. * @soc : core DP soc context
  112. * @peer_id : peer id from peer object can be retrieved
  113. * @mod_id : ID ot module requesting reference
  114. *
  115. * Return: struct dp_peer*: Pointer to DP peer object
  116. */
  117. static inline
  118. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  119. uint16_t peer_id,
  120. enum dp_mod_id mod_id)
  121. {
  122. struct dp_peer *peer;
  123. qdf_spin_lock_bh(&soc->peer_map_lock);
  124. peer = (peer_id >= soc->max_peers) ? NULL :
  125. soc->peer_id_to_obj_map[peer_id];
  126. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  127. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  128. qdf_spin_unlock_bh(&soc->peer_map_lock);
  129. return NULL;
  130. }
  131. qdf_spin_unlock_bh(&soc->peer_map_lock);
  132. return peer;
  133. }
  134. #ifdef PEER_CACHE_RX_PKTS
  135. /**
  136. * dp_rx_flush_rx_cached() - flush cached rx frames
  137. * @peer: peer
  138. * @drop: set flag to drop frames
  139. *
  140. * Return: None
  141. */
  142. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  143. #else
  144. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  145. {
  146. }
  147. #endif
  148. static inline void
  149. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  150. {
  151. qdf_spin_lock_bh(&peer->peer_info_lock);
  152. peer->state = OL_TXRX_PEER_STATE_DISC;
  153. qdf_spin_unlock_bh(&peer->peer_info_lock);
  154. dp_rx_flush_rx_cached(peer, true);
  155. }
  156. /**
  157. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  158. *
  159. * @vdev : DP vdev context
  160. * @func : function to be called for each peer
  161. * @arg : argument need to be passed to func
  162. * @mod_id : module_id
  163. *
  164. * Return: void
  165. */
  166. static inline void
  167. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  168. enum dp_mod_id mod_id)
  169. {
  170. struct dp_peer *peer;
  171. struct dp_peer *tmp_peer;
  172. struct dp_soc *soc = NULL;
  173. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  174. return;
  175. soc = vdev->pdev->soc;
  176. qdf_spin_lock_bh(&vdev->peer_list_lock);
  177. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  178. peer_list_elem,
  179. tmp_peer) {
  180. if (dp_peer_get_ref(soc, peer, mod_id) ==
  181. QDF_STATUS_SUCCESS) {
  182. (*func)(soc, peer, arg);
  183. dp_peer_unref_delete(peer, mod_id);
  184. }
  185. }
  186. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  187. }
  188. /**
  189. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  190. *
  191. * @pdev : DP pdev context
  192. * @func : function to be called for each peer
  193. * @arg : argument need to be passed to func
  194. * @mod_id : module_id
  195. *
  196. * Return: void
  197. */
  198. static inline void
  199. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  200. enum dp_mod_id mod_id)
  201. {
  202. struct dp_vdev *vdev;
  203. if (!pdev)
  204. return;
  205. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  206. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  207. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  208. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  209. }
  210. /**
  211. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  212. *
  213. * @soc : DP soc context
  214. * @func : function to be called for each peer
  215. * @arg : argument need to be passed to func
  216. * @mod_id : module_id
  217. *
  218. * Return: void
  219. */
  220. static inline void
  221. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  222. enum dp_mod_id mod_id)
  223. {
  224. struct dp_pdev *pdev;
  225. int i;
  226. if (!soc)
  227. return;
  228. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  229. pdev = soc->pdev_list[i];
  230. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  231. }
  232. }
  233. /**
  234. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  235. *
  236. * This API will cache the peers in local allocated memory and calls
  237. * iterate function outside the lock.
  238. *
  239. * As this API is allocating new memory it is suggested to use this
  240. * only when lock cannot be held
  241. *
  242. * @vdev : DP vdev context
  243. * @func : function to be called for each peer
  244. * @arg : argument need to be passed to func
  245. * @mod_id : module_id
  246. *
  247. * Return: void
  248. */
  249. static inline void
  250. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  251. dp_peer_iter_func *func,
  252. void *arg,
  253. enum dp_mod_id mod_id)
  254. {
  255. struct dp_peer *peer;
  256. struct dp_peer *tmp_peer;
  257. struct dp_soc *soc = NULL;
  258. struct dp_peer **peer_array = NULL;
  259. int i = 0;
  260. uint32_t num_peers = 0;
  261. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  262. return;
  263. num_peers = vdev->num_peers;
  264. soc = vdev->pdev->soc;
  265. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  266. if (!peer_array)
  267. return;
  268. qdf_spin_lock_bh(&vdev->peer_list_lock);
  269. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  270. peer_list_elem,
  271. tmp_peer) {
  272. if (i >= num_peers)
  273. break;
  274. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  275. peer_array[i] = peer;
  276. i = (i + 1);
  277. }
  278. }
  279. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  280. for (i = 0; i < num_peers; i++) {
  281. peer = peer_array[i];
  282. if (!peer)
  283. continue;
  284. (*func)(soc, peer, arg);
  285. dp_peer_unref_delete(peer, mod_id);
  286. }
  287. qdf_mem_free(peer_array);
  288. }
  289. /**
  290. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  291. *
  292. * This API will cache the peers in local allocated memory and calls
  293. * iterate function outside the lock.
  294. *
  295. * As this API is allocating new memory it is suggested to use this
  296. * only when lock cannot be held
  297. *
  298. * @pdev : DP pdev context
  299. * @func : function to be called for each peer
  300. * @arg : argument need to be passed to func
  301. * @mod_id : module_id
  302. *
  303. * Return: void
  304. */
  305. static inline void
  306. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  307. dp_peer_iter_func *func,
  308. void *arg,
  309. enum dp_mod_id mod_id)
  310. {
  311. struct dp_peer *peer;
  312. struct dp_peer *tmp_peer;
  313. struct dp_soc *soc = NULL;
  314. struct dp_vdev *vdev = NULL;
  315. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  316. int i = 0;
  317. int j = 0;
  318. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  319. if (!pdev || !pdev->soc)
  320. return;
  321. soc = pdev->soc;
  322. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  323. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  324. num_peers[i] = vdev->num_peers;
  325. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  326. sizeof(struct dp_peer *));
  327. if (!peer_array[i])
  328. break;
  329. qdf_spin_lock_bh(&vdev->peer_list_lock);
  330. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  331. peer_list_elem,
  332. tmp_peer) {
  333. if (j >= num_peers[i])
  334. break;
  335. if (dp_peer_get_ref(soc, peer, mod_id) ==
  336. QDF_STATUS_SUCCESS) {
  337. peer_array[i][j] = peer;
  338. j = (j + 1);
  339. }
  340. }
  341. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  342. i = (i + 1);
  343. }
  344. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  345. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  346. if (!peer_array[i])
  347. break;
  348. for (j = 0; j < num_peers[i]; j++) {
  349. peer = peer_array[i][j];
  350. if (!peer)
  351. continue;
  352. (*func)(soc, peer, arg);
  353. dp_peer_unref_delete(peer, mod_id);
  354. }
  355. qdf_mem_free(peer_array[i]);
  356. }
  357. }
  358. /**
  359. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  360. *
  361. * This API will cache the peers in local allocated memory and calls
  362. * iterate function outside the lock.
  363. *
  364. * As this API is allocating new memory it is suggested to use this
  365. * only when lock cannot be held
  366. *
  367. * @soc : DP soc context
  368. * @func : function to be called for each peer
  369. * @arg : argument need to be passed to func
  370. * @mod_id : module_id
  371. *
  372. * Return: void
  373. */
  374. static inline void
  375. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  376. dp_peer_iter_func *func,
  377. void *arg,
  378. enum dp_mod_id mod_id)
  379. {
  380. struct dp_pdev *pdev;
  381. int i;
  382. if (!soc)
  383. return;
  384. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  385. pdev = soc->pdev_list[i];
  386. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  387. }
  388. }
  389. #ifdef DP_PEER_STATE_DEBUG
  390. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  391. do { \
  392. if (!(_condition)) { \
  393. dp_alert("Invalid state shift from %u to %u peer " \
  394. QDF_MAC_ADDR_FMT, \
  395. (_peer)->peer_state, (_new_state), \
  396. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  397. QDF_ASSERT(0); \
  398. } \
  399. } while (0)
  400. #else
  401. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  402. do { \
  403. if (!(_condition)) { \
  404. dp_alert("Invalid state shift from %u to %u peer " \
  405. QDF_MAC_ADDR_FMT, \
  406. (_peer)->peer_state, (_new_state), \
  407. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  408. } \
  409. } while (0)
  410. #endif
  411. /**
  412. * dp_peer_state_cmp() - compare dp peer state
  413. *
  414. * @peer : DP peer
  415. * @state : state
  416. *
  417. * Return: true if state matches with peer state
  418. * false if it does not match
  419. */
  420. static inline bool
  421. dp_peer_state_cmp(struct dp_peer *peer,
  422. enum dp_peer_state state)
  423. {
  424. bool is_status_equal = false;
  425. qdf_spin_lock_bh(&peer->peer_state_lock);
  426. is_status_equal = (peer->peer_state == state);
  427. qdf_spin_unlock_bh(&peer->peer_state_lock);
  428. return is_status_equal;
  429. }
  430. /**
  431. * dp_peer_update_state() - update dp peer state
  432. *
  433. * @soc : core DP soc context
  434. * @peer : DP peer
  435. * @state : new state
  436. *
  437. * Return: None
  438. */
  439. static inline void
  440. dp_peer_update_state(struct dp_soc *soc,
  441. struct dp_peer *peer,
  442. enum dp_peer_state state)
  443. {
  444. uint8_t peer_state;
  445. qdf_spin_lock_bh(&peer->peer_state_lock);
  446. peer_state = peer->peer_state;
  447. switch (state) {
  448. case DP_PEER_STATE_INIT:
  449. DP_PEER_STATE_ASSERT
  450. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  451. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  452. break;
  453. case DP_PEER_STATE_ACTIVE:
  454. DP_PEER_STATE_ASSERT(peer, state,
  455. (peer_state == DP_PEER_STATE_INIT));
  456. break;
  457. case DP_PEER_STATE_LOGICAL_DELETE:
  458. DP_PEER_STATE_ASSERT(peer, state,
  459. (peer_state == DP_PEER_STATE_ACTIVE) ||
  460. (peer_state == DP_PEER_STATE_INIT));
  461. break;
  462. case DP_PEER_STATE_INACTIVE:
  463. DP_PEER_STATE_ASSERT
  464. (peer, state,
  465. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  466. break;
  467. case DP_PEER_STATE_FREED:
  468. if (peer->sta_self_peer)
  469. DP_PEER_STATE_ASSERT
  470. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  471. else
  472. DP_PEER_STATE_ASSERT
  473. (peer, state,
  474. (peer_state == DP_PEER_STATE_INACTIVE) ||
  475. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  476. break;
  477. default:
  478. qdf_spin_unlock_bh(&peer->peer_state_lock);
  479. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  480. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  481. return;
  482. }
  483. peer->peer_state = state;
  484. qdf_spin_unlock_bh(&peer->peer_state_lock);
  485. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  486. peer_state, state,
  487. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  488. }
  489. void dp_print_ast_stats(struct dp_soc *soc);
  490. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  491. uint16_t hw_peer_id, uint8_t vdev_id,
  492. uint8_t *peer_mac_addr, uint16_t ast_hash,
  493. uint8_t is_wds);
  494. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  495. uint8_t vdev_id, uint8_t *peer_mac_addr,
  496. uint8_t is_wds, uint32_t free_wds_count);
  497. #ifdef WLAN_FEATURE_11BE_MLO
  498. /**
  499. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  500. * @soc_handle - genereic soc handle
  501. * @peer_id - ML peer_id from firmware
  502. * @peer_mac_addr - mac address of the peer
  503. * @mlo_ast_flow_info: MLO AST flow info
  504. *
  505. * associate the ML peer_id that firmware provided with peer entry
  506. * and update the ast table in the host with the hw_peer_id.
  507. *
  508. * Return: QDF_STATUS code
  509. */
  510. QDF_STATUS
  511. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  512. uint8_t *peer_mac_addr,
  513. struct dp_mlo_flow_override_info *mlo_flow_info);
  514. /**
  515. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  516. * @soc_handle - genereic soc handle
  517. * @peeri_id - peer_id from firmware
  518. *
  519. * Return: none
  520. */
  521. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  522. #endif
  523. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  524. enum cdp_sec_type sec_type, int is_unicast,
  525. u_int32_t *michael_key, u_int32_t *rx_pn);
  526. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  527. uint8_t tid, uint16_t win_sz);
  528. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  529. uint16_t peer_id, uint8_t *peer_mac);
  530. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  531. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  532. uint32_t flags);
  533. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  534. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  535. struct dp_ast_entry *ast_entry);
  536. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  537. struct dp_ast_entry *ast_entry, uint32_t flags);
  538. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  539. uint8_t *ast_mac_addr,
  540. uint8_t pdev_id);
  541. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  542. uint8_t *ast_mac_addr,
  543. uint8_t vdev_id);
  544. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  545. uint8_t *ast_mac_addr);
  546. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  547. struct dp_ast_entry *ast_entry);
  548. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  549. struct dp_ast_entry *ast_entry);
  550. void dp_peer_ast_set_type(struct dp_soc *soc,
  551. struct dp_ast_entry *ast_entry,
  552. enum cdp_txrx_ast_entry_type type);
  553. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  554. struct dp_ast_entry *ast_entry,
  555. struct dp_peer *peer);
  556. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  557. struct cdp_soc *dp_soc,
  558. void *cookie,
  559. enum cdp_ast_free_status status);
  560. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  561. struct dp_ast_entry *ase);
  562. void dp_peer_free_ast_entry(struct dp_soc *soc,
  563. struct dp_ast_entry *ast_entry);
  564. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  565. struct dp_ast_entry *ast_entry,
  566. struct dp_peer *peer);
  567. /**
  568. * dp_peer_mec_detach_entry() - Detach the MEC entry
  569. * @soc: SoC handle
  570. * @mecentry: MEC entry of the node
  571. * @ptr: pointer to free list
  572. *
  573. * The MEC entry is detached from MEC table and added to free_list
  574. * to free the object outside lock
  575. *
  576. * Return: None
  577. */
  578. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  579. void *ptr);
  580. /**
  581. * dp_peer_mec_free_list() - free the MEC entry from free_list
  582. * @soc: SoC handle
  583. * @ptr: pointer to free list
  584. *
  585. * Return: None
  586. */
  587. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  588. /**
  589. * dp_peer_mec_add_entry()
  590. * @soc: SoC handle
  591. * @vdev: vdev to which mec node belongs
  592. * @mac_addr: MAC address of mec node
  593. *
  594. * This function allocates and adds MEC entry to MEC table.
  595. * It assumes caller has taken the mec lock to protect the access to these
  596. * tables
  597. *
  598. * Return: QDF_STATUS
  599. */
  600. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  601. struct dp_vdev *vdev,
  602. uint8_t *mac_addr);
  603. /**
  604. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  605. * within pdev
  606. * @soc: SoC handle
  607. *
  608. * It assumes caller has taken the mec_lock to protect the access to
  609. * MEC hash table
  610. *
  611. * Return: MEC entry
  612. */
  613. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  614. uint8_t pdev_id,
  615. uint8_t *mec_mac_addr);
  616. #define DP_AST_ASSERT(_condition) \
  617. do { \
  618. if (!(_condition)) { \
  619. dp_print_ast_stats(soc);\
  620. QDF_BUG(_condition); \
  621. } \
  622. } while (0)
  623. /**
  624. * dp_peer_update_inactive_time - Update inactive time for peer
  625. * @pdev: pdev object
  626. * @tag_type: htt_tlv_tag type
  627. * #tag_buf: buf message
  628. */
  629. void
  630. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  631. uint32_t *tag_buf);
  632. #ifndef QCA_MULTIPASS_SUPPORT
  633. /**
  634. * dp_peer_set_vlan_id: set vlan_id for this peer
  635. * @cdp_soc: soc handle
  636. * @vdev_id: id of vdev object
  637. * @peer_mac: mac address
  638. * @vlan_id: vlan id for peer
  639. *
  640. * return: void
  641. */
  642. static inline
  643. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  644. uint8_t vdev_id, uint8_t *peer_mac,
  645. uint16_t vlan_id)
  646. {
  647. }
  648. /**
  649. * dp_set_vlan_groupkey: set vlan map for vdev
  650. * @soc: pointer to soc
  651. * @vdev_id: id of vdev handle
  652. * @vlan_id: vlan_id
  653. * @group_key: group key for vlan
  654. *
  655. * return: set success/failure
  656. */
  657. static inline
  658. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  659. uint16_t vlan_id, uint16_t group_key)
  660. {
  661. return QDF_STATUS_SUCCESS;
  662. }
  663. /**
  664. * dp_peer_multipass_list_init: initialize multipass peer list
  665. * @vdev: pointer to vdev
  666. *
  667. * return: void
  668. */
  669. static inline
  670. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  671. {
  672. }
  673. /**
  674. * dp_peer_multipass_list_remove: remove peer from special peer list
  675. * @peer: peer handle
  676. *
  677. * return: void
  678. */
  679. static inline
  680. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  681. {
  682. }
  683. #else
  684. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  685. uint8_t vdev_id, uint8_t *peer_mac,
  686. uint16_t vlan_id);
  687. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  688. uint16_t vlan_id, uint16_t group_key);
  689. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  690. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  691. #endif
  692. #ifndef QCA_PEER_MULTIQ_SUPPORT
  693. /**
  694. * dp_peer_reset_flowq_map() - reset peer flowq map table
  695. * @peer - dp peer handle
  696. *
  697. * Return: none
  698. */
  699. static inline
  700. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  701. {
  702. }
  703. /**
  704. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  705. * @soc - genereic soc handle
  706. * @is_wds - flag to indicate if peer is wds
  707. * @peer_id - peer_id from htt peer map message
  708. * @peer_mac_addr - mac address of the peer
  709. * @ast_info - ast flow override information from peer map
  710. *
  711. * Return: none
  712. */
  713. static inline
  714. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  715. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  716. struct dp_ast_flow_override_info *ast_info)
  717. {
  718. }
  719. #else
  720. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  721. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  722. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  723. struct dp_ast_flow_override_info *ast_info);
  724. #endif
  725. /*
  726. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  727. * after deleting the entries (ie., setting valid=0)
  728. *
  729. * @soc: DP SOC handle
  730. * @cb_ctxt: Callback context
  731. * @reo_status: REO command status
  732. */
  733. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  734. void *cb_ctxt,
  735. union hal_reo_status *reo_status);
  736. #ifdef QCA_PEER_EXT_STATS
  737. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  738. struct dp_peer *peer);
  739. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  740. struct dp_peer *peer);
  741. #else
  742. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  743. struct dp_peer *peer)
  744. {
  745. return QDF_STATUS_SUCCESS;
  746. }
  747. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  748. struct dp_peer *peer)
  749. {
  750. }
  751. #endif
  752. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  753. struct dp_vdev *vdev,
  754. enum dp_mod_id mod_id);
  755. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  756. struct dp_vdev *vdev,
  757. enum dp_mod_id mod_id);
  758. void dp_peer_ast_table_detach(struct dp_soc *soc);
  759. void dp_peer_find_map_detach(struct dp_soc *soc);
  760. void dp_soc_wds_detach(struct dp_soc *soc);
  761. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  762. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  763. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  764. void dp_soc_wds_attach(struct dp_soc *soc);
  765. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  766. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  767. #ifdef FEATURE_AST
  768. /*
  769. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  770. * @soc - datapath soc handle
  771. * @peer - datapath peer handle
  772. *
  773. * Delete the AST entries belonging to a peer
  774. */
  775. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  776. struct dp_peer *peer)
  777. {
  778. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  779. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  780. /*
  781. * Delete peer self ast entry. This is done to handle scenarios
  782. * where peer is freed before peer map is received(for ex in case
  783. * of auth disallow due to ACL) in such cases self ast is not added
  784. * to peer->ast_list.
  785. */
  786. if (peer->self_ast_entry) {
  787. dp_peer_del_ast(soc, peer->self_ast_entry);
  788. peer->self_ast_entry = NULL;
  789. }
  790. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  791. dp_peer_del_ast(soc, ast_entry);
  792. }
  793. #else
  794. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  795. struct dp_peer *peer)
  796. {
  797. }
  798. #endif
  799. #ifdef FEATURE_MEC
  800. /**
  801. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  802. * @soc: SoC handle
  803. *
  804. * Return: none
  805. */
  806. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  807. /**
  808. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  809. * @soc: SoC handle
  810. *
  811. * Return: none
  812. */
  813. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  814. /**
  815. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  816. * @soc: Datapath SOC
  817. *
  818. * Return: None
  819. */
  820. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  821. #else
  822. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  823. {
  824. }
  825. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  826. {
  827. }
  828. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  829. {
  830. }
  831. #endif
  832. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  833. /**
  834. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  835. * @soc : dp_soc handle
  836. * @peer: peer
  837. *
  838. * This function is used to send cache flush cmd to reo and
  839. * to register the callback to handle the dumping of the reo
  840. * queue stas from DDR
  841. *
  842. * Return: none
  843. */
  844. void dp_send_cache_flush_for_rx_tid(
  845. struct dp_soc *soc, struct dp_peer *peer);
  846. /**
  847. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  848. * @soc : cdp_soc_t handle
  849. * @vdev_id: vdev id
  850. *
  851. * Handler to get rx tid info from DDR after h/w cache is
  852. * invalidated first using the cache flush cmd.
  853. *
  854. * Return: none
  855. */
  856. void dp_get_rx_reo_queue_info(
  857. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  858. /**
  859. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  860. * @soc : dp_soc handle
  861. * @cb_ctxt - callback context
  862. * @reo_status: vdev id
  863. *
  864. * This is the callback function registered after sending the reo cmd
  865. * to flush the h/w cache and invalidate it. In the callback the reo
  866. * queue desc info is dumped from DDR.
  867. *
  868. * Return: none
  869. */
  870. void dp_dump_rx_reo_queue_info(
  871. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  872. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  873. static inline void dp_get_rx_reo_queue_info(
  874. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  875. {
  876. }
  877. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  878. static inline int dp_peer_find_mac_addr_cmp(
  879. union dp_align_mac_addr *mac_addr1,
  880. union dp_align_mac_addr *mac_addr2)
  881. {
  882. /*
  883. * Intentionally use & rather than &&.
  884. * because the operands are binary rather than generic boolean,
  885. * the functionality is equivalent.
  886. * Using && has the advantage of short-circuited evaluation,
  887. * but using & has the advantage of no conditional branching,
  888. * which is a more significant benefit.
  889. */
  890. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  891. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  892. }
  893. /**
  894. * dp_peer_delete() - delete DP peer
  895. *
  896. * @soc: Datatpath soc
  897. * @peer: Datapath peer
  898. * @arg: argument to iter function
  899. *
  900. * Return: void
  901. */
  902. void dp_peer_delete(struct dp_soc *soc,
  903. struct dp_peer *peer,
  904. void *arg);
  905. #ifdef WLAN_FEATURE_11BE_MLO
  906. /* set peer type */
  907. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  908. ((_peer)->peer_type = (_type_val))
  909. /* is MLO connection link peer */
  910. #define IS_MLO_DP_LINK_PEER(_peer) \
  911. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  912. /* is MLO connection mld peer */
  913. #define IS_MLO_DP_MLD_PEER(_peer) \
  914. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  915. /**
  916. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  917. increase mld peer ref_cnt
  918. * @link_peer: link peer pointer
  919. * @mld_peer: mld peer pointer
  920. *
  921. * Return: none
  922. */
  923. static inline
  924. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  925. struct dp_peer *mld_peer)
  926. {
  927. /* increase mld_peer ref_cnt */
  928. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  929. link_peer->mld_peer = mld_peer;
  930. }
  931. /**
  932. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  933. decrease mld peer ref_cnt
  934. * @link_peer: link peer pointer
  935. *
  936. * Return: None
  937. */
  938. static inline
  939. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  940. {
  941. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  942. link_peer->mld_peer = NULL;
  943. }
  944. /**
  945. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  946. * @mld_peer: mld peer pointer
  947. *
  948. * Return: None
  949. */
  950. static inline
  951. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  952. {
  953. int i;
  954. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  955. mld_peer->num_links = 0;
  956. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  957. mld_peer->link_peers[i].is_valid = false;
  958. }
  959. /**
  960. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  961. * @mld_peer: mld peer pointer
  962. *
  963. * Return: None
  964. */
  965. static inline
  966. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  967. {
  968. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  969. }
  970. /**
  971. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  972. * @mld_peer: mld dp peer pointer
  973. * @link_peer: link dp peer pointer
  974. *
  975. * Return: None
  976. */
  977. static inline
  978. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  979. struct dp_peer *link_peer)
  980. {
  981. int i;
  982. struct dp_peer_link_info *link_peer_info;
  983. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  984. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  985. link_peer_info = &mld_peer->link_peers[i];
  986. if (!link_peer_info->is_valid) {
  987. qdf_mem_copy(link_peer_info->mac_addr.raw,
  988. link_peer->mac_addr.raw,
  989. QDF_MAC_ADDR_SIZE);
  990. link_peer_info->is_valid = true;
  991. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  992. mld_peer->num_links++;
  993. break;
  994. }
  995. }
  996. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  997. if (i == DP_MAX_MLO_LINKS)
  998. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  999. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1000. }
  1001. /**
  1002. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1003. * @mld_peer: MLD dp peer pointer
  1004. * @link_peer: link dp peer pointer
  1005. *
  1006. * Return: number of links left after deletion
  1007. */
  1008. static inline
  1009. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1010. struct dp_peer *link_peer)
  1011. {
  1012. int i;
  1013. struct dp_peer_link_info *link_peer_info;
  1014. uint8_t num_links;
  1015. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1016. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1017. link_peer_info = &mld_peer->link_peers[i];
  1018. if (link_peer_info->is_valid &&
  1019. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1020. &link_peer_info->mac_addr)) {
  1021. link_peer_info->is_valid = false;
  1022. mld_peer->num_links--;
  1023. break;
  1024. }
  1025. }
  1026. num_links = mld_peer->num_links;
  1027. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1028. if (i == DP_MAX_MLO_LINKS)
  1029. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1030. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1031. return num_links;
  1032. }
  1033. /**
  1034. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1035. increase link peers ref_cnt
  1036. * @soc: dp_soc handle
  1037. * @mld_peer: dp mld peer pointer
  1038. * @mld_link_peers: structure that hold links peers ponter array and number
  1039. * @mod_id: id of module requesting reference
  1040. *
  1041. * Return: None
  1042. */
  1043. static inline
  1044. void dp_get_link_peers_ref_from_mld_peer(
  1045. struct dp_soc *soc,
  1046. struct dp_peer *mld_peer,
  1047. struct dp_mld_link_peers *mld_link_peers,
  1048. enum dp_mod_id mod_id)
  1049. {
  1050. struct dp_peer *peer;
  1051. uint8_t i = 0, j = 0;
  1052. struct dp_peer_link_info *link_peer_info;
  1053. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1054. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1055. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1056. link_peer_info = &mld_peer->link_peers[i];
  1057. if (link_peer_info->is_valid) {
  1058. peer = dp_peer_find_hash_find(
  1059. soc,
  1060. link_peer_info->mac_addr.raw,
  1061. true,
  1062. link_peer_info->vdev_id,
  1063. mod_id);
  1064. if (peer)
  1065. mld_link_peers->link_peers[j++] = peer;
  1066. }
  1067. }
  1068. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1069. mld_link_peers->num_links = j;
  1070. }
  1071. /**
  1072. * dp_release_link_peers_ref() - release all link peers reference
  1073. * @mld_link_peers: structure that hold links peers ponter array and number
  1074. * @mod_id: id of module requesting reference
  1075. *
  1076. * Return: None.
  1077. */
  1078. static inline
  1079. void dp_release_link_peers_ref(
  1080. struct dp_mld_link_peers *mld_link_peers,
  1081. enum dp_mod_id mod_id)
  1082. {
  1083. struct dp_peer *peer;
  1084. uint8_t i;
  1085. for (i = 0; i < mld_link_peers->num_links; i++) {
  1086. peer = mld_link_peers->link_peers[i];
  1087. if (peer)
  1088. dp_peer_unref_delete(peer, mod_id);
  1089. mld_link_peers->link_peers[i] = NULL;
  1090. }
  1091. mld_link_peers->num_links = 0;
  1092. }
  1093. /**
  1094. * dp_peer_get_tgt_peer_hash_find() - get MLD dp_peer handle
  1095. for processing
  1096. * @soc: soc handle
  1097. * @peer_mac_addr: peer mac address
  1098. * @mac_addr_is_aligned: is mac addr alligned
  1099. * @vdev_id: vdev_id
  1100. * @mod_id: id of module requesting reference
  1101. *
  1102. * for MLO connection, get corresponding MLD peer,
  1103. * otherwise get link peer for non-MLO case.
  1104. *
  1105. * return: peer in success
  1106. * NULL in failure
  1107. */
  1108. static inline
  1109. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1110. uint8_t *peer_mac,
  1111. int mac_addr_is_aligned,
  1112. uint8_t vdev_id,
  1113. enum dp_mod_id mod_id)
  1114. {
  1115. struct dp_peer *ta_peer = NULL;
  1116. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1117. peer_mac, 0, vdev_id,
  1118. mod_id);
  1119. if (peer) {
  1120. /* mlo connection link peer, get mld peer with reference */
  1121. if (IS_MLO_DP_LINK_PEER(peer)) {
  1122. /* increase mld peer ref_cnt */
  1123. if (QDF_STATUS_SUCCESS ==
  1124. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1125. ta_peer = peer->mld_peer;
  1126. else
  1127. ta_peer = NULL;
  1128. /* relese peer reference that added by hash find */
  1129. dp_peer_unref_delete(peer, mod_id);
  1130. } else {
  1131. /* mlo MLD peer or non-mlo link peer */
  1132. ta_peer = peer;
  1133. }
  1134. }
  1135. return ta_peer;
  1136. }
  1137. /**
  1138. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1139. * @soc : core DP soc context
  1140. * @peer_id : peer id from peer object can be retrieved
  1141. * @mod_id : ID ot module requesting reference
  1142. *
  1143. * for MLO connection, get corresponding MLD peer,
  1144. * otherwise get link peer for non-MLO case.
  1145. *
  1146. * return: peer in success
  1147. * NULL in failure
  1148. */
  1149. static inline
  1150. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1151. uint16_t peer_id,
  1152. enum dp_mod_id mod_id)
  1153. {
  1154. struct dp_peer *ta_peer = NULL;
  1155. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1156. if (peer) {
  1157. /* mlo connection link peer, get mld peer with reference */
  1158. if (IS_MLO_DP_LINK_PEER(peer)) {
  1159. /* increase mld peer ref_cnt */
  1160. if (QDF_STATUS_SUCCESS ==
  1161. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1162. ta_peer = peer->mld_peer;
  1163. else
  1164. ta_peer = NULL;
  1165. /* relese peer reference that added by hash find */
  1166. dp_peer_unref_delete(peer, mod_id);
  1167. } else {
  1168. /* mlo MLD peer or non-mlo link peer */
  1169. ta_peer = peer;
  1170. }
  1171. }
  1172. return ta_peer;
  1173. }
  1174. /**
  1175. * dp_peer_mlo_delete() - peer MLO related delete operation
  1176. * @soc: Soc handle
  1177. * @peer: DP peer handle
  1178. * Return: None
  1179. */
  1180. static inline
  1181. void dp_peer_mlo_delete(struct dp_soc *soc,
  1182. struct dp_peer *peer)
  1183. {
  1184. /* MLO connection link peer */
  1185. if (IS_MLO_DP_LINK_PEER(peer)) {
  1186. /* if last link peer deletion, delete MLD peer */
  1187. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1188. dp_peer_delete(soc, peer->mld_peer, NULL);
  1189. }
  1190. }
  1191. /**
  1192. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1193. * @soc: Soc handle
  1194. * @vdev_id: Vdev ID
  1195. * @peer_setup_info: peer setup information for MLO
  1196. */
  1197. QDF_STATUS dp_peer_mlo_setup(
  1198. struct dp_soc *soc,
  1199. struct dp_peer *peer,
  1200. uint8_t vdev_id,
  1201. struct cdp_peer_setup_info *setup_info);
  1202. #else
  1203. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1204. #define IS_MLO_DP_LINK_PEER(_peer) false
  1205. #define IS_MLO_DP_MLD_PEER(_peer) false
  1206. static inline
  1207. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1208. uint8_t *peer_mac,
  1209. int mac_addr_is_aligned,
  1210. uint8_t vdev_id,
  1211. enum dp_mod_id mod_id)
  1212. {
  1213. return dp_peer_find_hash_find(soc, peer_mac,
  1214. mac_addr_is_aligned, vdev_id,
  1215. mod_id);
  1216. }
  1217. static inline
  1218. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1219. uint16_t peer_id,
  1220. enum dp_mod_id mod_id)
  1221. {
  1222. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1223. }
  1224. static inline
  1225. QDF_STATUS dp_peer_mlo_setup(
  1226. struct dp_soc *soc,
  1227. struct dp_peer *peer,
  1228. uint8_t vdev_id,
  1229. struct cdp_peer_setup_info *setup_info)
  1230. {
  1231. return QDF_STATUS_SUCCESS;
  1232. }
  1233. static inline
  1234. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1235. {
  1236. }
  1237. static inline
  1238. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1239. {
  1240. }
  1241. static inline
  1242. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1243. {
  1244. }
  1245. static inline
  1246. void dp_peer_mlo_delete(struct dp_soc *soc,
  1247. struct dp_peer *peer)
  1248. {
  1249. }
  1250. #endif /* WLAN_FEATURE_11BE_MLO */
  1251. static inline
  1252. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1253. {
  1254. uint8_t i;
  1255. if (IS_MLO_DP_MLD_PEER(peer)) {
  1256. dp_peer_info("skip for mld peer");
  1257. return QDF_STATUS_SUCCESS;
  1258. }
  1259. if (peer->rx_tid) {
  1260. QDF_BUG(0);
  1261. dp_peer_err("peer rx_tid mem already exist");
  1262. return QDF_STATUS_E_FAILURE;
  1263. }
  1264. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1265. sizeof(struct dp_rx_tid));
  1266. if (!peer->rx_tid) {
  1267. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1268. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1269. return QDF_STATUS_E_NOMEM;
  1270. }
  1271. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1272. for (i = 0; i < DP_MAX_TIDS; i++)
  1273. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1274. return QDF_STATUS_SUCCESS;
  1275. }
  1276. static inline
  1277. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1278. {
  1279. uint8_t i;
  1280. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1281. for (i = 0; i < DP_MAX_TIDS; i++)
  1282. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1283. qdf_mem_free(peer->rx_tid);
  1284. }
  1285. peer->rx_tid = NULL;
  1286. }
  1287. #endif /* _DP_PEER_H_ */