dp_peer.h 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  25. #include "hal_reo.h"
  26. #endif
  27. #define DP_INVALID_PEER_ID 0xffff
  28. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  29. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  30. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  31. #define DP_PEER_HASH_LOAD_MULT 2
  32. #define DP_PEER_HASH_LOAD_SHIFT 0
  33. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  34. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  35. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  36. #define dp_peer_info(params...) \
  37. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  38. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  39. #ifdef REO_QDESC_HISTORY
  40. enum reo_qdesc_event_type {
  41. REO_QDESC_UPDATE_CB = 0,
  42. REO_QDESC_FREE,
  43. };
  44. struct reo_qdesc_event {
  45. qdf_dma_addr_t qdesc_addr;
  46. uint64_t ts;
  47. enum reo_qdesc_event_type type;
  48. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  49. };
  50. #endif
  51. struct ast_del_ctxt {
  52. bool age;
  53. int del_count;
  54. };
  55. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  56. void *arg);
  57. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  58. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  59. uint8_t *peer_mac_addr,
  60. int mac_addr_is_aligned,
  61. uint8_t vdev_id,
  62. enum dp_mod_id id);
  63. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  64. /**
  65. * dp_peer_get_ref() - Returns peer object given the peer id
  66. *
  67. * @soc : core DP soc context
  68. * @peer : DP peer
  69. * @mod_id : id of module requesting the reference
  70. *
  71. * Return: QDF_STATUS_SUCCESS if reference held successfully
  72. * else QDF_STATUS_E_INVAL
  73. */
  74. static inline
  75. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  76. struct dp_peer *peer,
  77. enum dp_mod_id mod_id)
  78. {
  79. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  80. return QDF_STATUS_E_INVAL;
  81. if (mod_id > DP_MOD_ID_RX)
  82. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  83. return QDF_STATUS_SUCCESS;
  84. }
  85. /**
  86. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  87. *
  88. * @soc : core DP soc context
  89. * @peer_id : peer id from peer object can be retrieved
  90. * @mod_id : module id
  91. *
  92. * Return: struct dp_peer*: Pointer to DP peer object
  93. */
  94. static inline struct dp_peer *
  95. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  96. uint16_t peer_id,
  97. enum dp_mod_id mod_id)
  98. {
  99. struct dp_peer *peer;
  100. qdf_spin_lock_bh(&soc->peer_map_lock);
  101. peer = (peer_id >= soc->max_peer_id) ? NULL :
  102. soc->peer_id_to_obj_map[peer_id];
  103. if (!peer ||
  104. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  105. qdf_spin_unlock_bh(&soc->peer_map_lock);
  106. return NULL;
  107. }
  108. qdf_spin_unlock_bh(&soc->peer_map_lock);
  109. return peer;
  110. }
  111. /**
  112. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  113. * if peer state is active
  114. *
  115. * @soc : core DP soc context
  116. * @peer_id : peer id from peer object can be retrieved
  117. * @mod_id : ID ot module requesting reference
  118. *
  119. * Return: struct dp_peer*: Pointer to DP peer object
  120. */
  121. static inline
  122. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  123. uint16_t peer_id,
  124. enum dp_mod_id mod_id)
  125. {
  126. struct dp_peer *peer;
  127. qdf_spin_lock_bh(&soc->peer_map_lock);
  128. peer = (peer_id >= soc->max_peer_id) ? NULL :
  129. soc->peer_id_to_obj_map[peer_id];
  130. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  131. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  132. qdf_spin_unlock_bh(&soc->peer_map_lock);
  133. return NULL;
  134. }
  135. qdf_spin_unlock_bh(&soc->peer_map_lock);
  136. return peer;
  137. }
  138. #ifdef PEER_CACHE_RX_PKTS
  139. /**
  140. * dp_rx_flush_rx_cached() - flush cached rx frames
  141. * @peer: peer
  142. * @drop: set flag to drop frames
  143. *
  144. * Return: None
  145. */
  146. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  147. #else
  148. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  149. {
  150. }
  151. #endif
  152. static inline void
  153. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  154. {
  155. qdf_spin_lock_bh(&peer->peer_info_lock);
  156. peer->state = OL_TXRX_PEER_STATE_DISC;
  157. qdf_spin_unlock_bh(&peer->peer_info_lock);
  158. dp_rx_flush_rx_cached(peer, true);
  159. }
  160. /**
  161. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  162. *
  163. * @vdev : DP vdev context
  164. * @func : function to be called for each peer
  165. * @arg : argument need to be passed to func
  166. * @mod_id : module_id
  167. *
  168. * Return: void
  169. */
  170. static inline void
  171. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  172. enum dp_mod_id mod_id)
  173. {
  174. struct dp_peer *peer;
  175. struct dp_peer *tmp_peer;
  176. struct dp_soc *soc = NULL;
  177. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  178. return;
  179. soc = vdev->pdev->soc;
  180. qdf_spin_lock_bh(&vdev->peer_list_lock);
  181. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  182. peer_list_elem,
  183. tmp_peer) {
  184. if (dp_peer_get_ref(soc, peer, mod_id) ==
  185. QDF_STATUS_SUCCESS) {
  186. (*func)(soc, peer, arg);
  187. dp_peer_unref_delete(peer, mod_id);
  188. }
  189. }
  190. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  191. }
  192. /**
  193. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  194. *
  195. * @pdev : DP pdev context
  196. * @func : function to be called for each peer
  197. * @arg : argument need to be passed to func
  198. * @mod_id : module_id
  199. *
  200. * Return: void
  201. */
  202. static inline void
  203. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  204. enum dp_mod_id mod_id)
  205. {
  206. struct dp_vdev *vdev;
  207. if (!pdev)
  208. return;
  209. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  210. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  211. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  212. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  213. }
  214. /**
  215. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  216. *
  217. * @soc : DP soc context
  218. * @func : function to be called for each peer
  219. * @arg : argument need to be passed to func
  220. * @mod_id : module_id
  221. *
  222. * Return: void
  223. */
  224. static inline void
  225. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  226. enum dp_mod_id mod_id)
  227. {
  228. struct dp_pdev *pdev;
  229. int i;
  230. if (!soc)
  231. return;
  232. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  233. pdev = soc->pdev_list[i];
  234. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  235. }
  236. }
  237. /**
  238. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  239. *
  240. * This API will cache the peers in local allocated memory and calls
  241. * iterate function outside the lock.
  242. *
  243. * As this API is allocating new memory it is suggested to use this
  244. * only when lock cannot be held
  245. *
  246. * @vdev : DP vdev context
  247. * @func : function to be called for each peer
  248. * @arg : argument need to be passed to func
  249. * @mod_id : module_id
  250. *
  251. * Return: void
  252. */
  253. static inline void
  254. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  255. dp_peer_iter_func *func,
  256. void *arg,
  257. enum dp_mod_id mod_id)
  258. {
  259. struct dp_peer *peer;
  260. struct dp_peer *tmp_peer;
  261. struct dp_soc *soc = NULL;
  262. struct dp_peer **peer_array = NULL;
  263. int i = 0;
  264. uint32_t num_peers = 0;
  265. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  266. return;
  267. num_peers = vdev->num_peers;
  268. soc = vdev->pdev->soc;
  269. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  270. if (!peer_array)
  271. return;
  272. qdf_spin_lock_bh(&vdev->peer_list_lock);
  273. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  274. peer_list_elem,
  275. tmp_peer) {
  276. if (i >= num_peers)
  277. break;
  278. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  279. peer_array[i] = peer;
  280. i = (i + 1);
  281. }
  282. }
  283. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  284. for (i = 0; i < num_peers; i++) {
  285. peer = peer_array[i];
  286. if (!peer)
  287. continue;
  288. (*func)(soc, peer, arg);
  289. dp_peer_unref_delete(peer, mod_id);
  290. }
  291. qdf_mem_free(peer_array);
  292. }
  293. /**
  294. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  295. *
  296. * This API will cache the peers in local allocated memory and calls
  297. * iterate function outside the lock.
  298. *
  299. * As this API is allocating new memory it is suggested to use this
  300. * only when lock cannot be held
  301. *
  302. * @pdev : DP pdev context
  303. * @func : function to be called for each peer
  304. * @arg : argument need to be passed to func
  305. * @mod_id : module_id
  306. *
  307. * Return: void
  308. */
  309. static inline void
  310. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  311. dp_peer_iter_func *func,
  312. void *arg,
  313. enum dp_mod_id mod_id)
  314. {
  315. struct dp_peer *peer;
  316. struct dp_peer *tmp_peer;
  317. struct dp_soc *soc = NULL;
  318. struct dp_vdev *vdev = NULL;
  319. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  320. int i = 0;
  321. int j = 0;
  322. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  323. if (!pdev || !pdev->soc)
  324. return;
  325. soc = pdev->soc;
  326. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  327. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  328. num_peers[i] = vdev->num_peers;
  329. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  330. sizeof(struct dp_peer *));
  331. if (!peer_array[i])
  332. break;
  333. qdf_spin_lock_bh(&vdev->peer_list_lock);
  334. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  335. peer_list_elem,
  336. tmp_peer) {
  337. if (j >= num_peers[i])
  338. break;
  339. if (dp_peer_get_ref(soc, peer, mod_id) ==
  340. QDF_STATUS_SUCCESS) {
  341. peer_array[i][j] = peer;
  342. j = (j + 1);
  343. }
  344. }
  345. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  346. i = (i + 1);
  347. }
  348. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  349. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  350. if (!peer_array[i])
  351. break;
  352. for (j = 0; j < num_peers[i]; j++) {
  353. peer = peer_array[i][j];
  354. if (!peer)
  355. continue;
  356. (*func)(soc, peer, arg);
  357. dp_peer_unref_delete(peer, mod_id);
  358. }
  359. qdf_mem_free(peer_array[i]);
  360. }
  361. }
  362. /**
  363. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  364. *
  365. * This API will cache the peers in local allocated memory and calls
  366. * iterate function outside the lock.
  367. *
  368. * As this API is allocating new memory it is suggested to use this
  369. * only when lock cannot be held
  370. *
  371. * @soc : DP soc context
  372. * @func : function to be called for each peer
  373. * @arg : argument need to be passed to func
  374. * @mod_id : module_id
  375. *
  376. * Return: void
  377. */
  378. static inline void
  379. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  380. dp_peer_iter_func *func,
  381. void *arg,
  382. enum dp_mod_id mod_id)
  383. {
  384. struct dp_pdev *pdev;
  385. int i;
  386. if (!soc)
  387. return;
  388. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  389. pdev = soc->pdev_list[i];
  390. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  391. }
  392. }
  393. #ifdef DP_PEER_STATE_DEBUG
  394. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  395. do { \
  396. if (!(_condition)) { \
  397. dp_alert("Invalid state shift from %u to %u peer " \
  398. QDF_MAC_ADDR_FMT, \
  399. (_peer)->peer_state, (_new_state), \
  400. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  401. QDF_ASSERT(0); \
  402. } \
  403. } while (0)
  404. #else
  405. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  406. do { \
  407. if (!(_condition)) { \
  408. dp_alert("Invalid state shift from %u to %u peer " \
  409. QDF_MAC_ADDR_FMT, \
  410. (_peer)->peer_state, (_new_state), \
  411. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  412. } \
  413. } while (0)
  414. #endif
  415. /**
  416. * dp_peer_state_cmp() - compare dp peer state
  417. *
  418. * @peer : DP peer
  419. * @state : state
  420. *
  421. * Return: true if state matches with peer state
  422. * false if it does not match
  423. */
  424. static inline bool
  425. dp_peer_state_cmp(struct dp_peer *peer,
  426. enum dp_peer_state state)
  427. {
  428. bool is_status_equal = false;
  429. qdf_spin_lock_bh(&peer->peer_state_lock);
  430. is_status_equal = (peer->peer_state == state);
  431. qdf_spin_unlock_bh(&peer->peer_state_lock);
  432. return is_status_equal;
  433. }
  434. /**
  435. * dp_peer_update_state() - update dp peer state
  436. *
  437. * @soc : core DP soc context
  438. * @peer : DP peer
  439. * @state : new state
  440. *
  441. * Return: None
  442. */
  443. static inline void
  444. dp_peer_update_state(struct dp_soc *soc,
  445. struct dp_peer *peer,
  446. enum dp_peer_state state)
  447. {
  448. uint8_t peer_state;
  449. qdf_spin_lock_bh(&peer->peer_state_lock);
  450. peer_state = peer->peer_state;
  451. switch (state) {
  452. case DP_PEER_STATE_INIT:
  453. DP_PEER_STATE_ASSERT
  454. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  455. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  456. break;
  457. case DP_PEER_STATE_ACTIVE:
  458. DP_PEER_STATE_ASSERT(peer, state,
  459. (peer_state == DP_PEER_STATE_INIT));
  460. break;
  461. case DP_PEER_STATE_LOGICAL_DELETE:
  462. DP_PEER_STATE_ASSERT(peer, state,
  463. (peer_state == DP_PEER_STATE_ACTIVE) ||
  464. (peer_state == DP_PEER_STATE_INIT));
  465. break;
  466. case DP_PEER_STATE_INACTIVE:
  467. DP_PEER_STATE_ASSERT
  468. (peer, state,
  469. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  470. break;
  471. case DP_PEER_STATE_FREED:
  472. if (peer->sta_self_peer)
  473. DP_PEER_STATE_ASSERT
  474. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  475. else
  476. DP_PEER_STATE_ASSERT
  477. (peer, state,
  478. (peer_state == DP_PEER_STATE_INACTIVE) ||
  479. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  480. break;
  481. default:
  482. qdf_spin_unlock_bh(&peer->peer_state_lock);
  483. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  484. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  485. return;
  486. }
  487. peer->peer_state = state;
  488. qdf_spin_unlock_bh(&peer->peer_state_lock);
  489. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  490. peer_state, state,
  491. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  492. }
  493. void dp_print_ast_stats(struct dp_soc *soc);
  494. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  495. uint16_t hw_peer_id, uint8_t vdev_id,
  496. uint8_t *peer_mac_addr, uint16_t ast_hash,
  497. uint8_t is_wds);
  498. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  499. uint8_t vdev_id, uint8_t *peer_mac_addr,
  500. uint8_t is_wds, uint32_t free_wds_count);
  501. #ifdef WLAN_FEATURE_11BE_MLO
  502. /**
  503. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  504. * @soc_handle - genereic soc handle
  505. * @peer_id - ML peer_id from firmware
  506. * @peer_mac_addr - mac address of the peer
  507. * @mlo_ast_flow_info: MLO AST flow info
  508. *
  509. * associate the ML peer_id that firmware provided with peer entry
  510. * and update the ast table in the host with the hw_peer_id.
  511. *
  512. * Return: QDF_STATUS code
  513. */
  514. QDF_STATUS
  515. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  516. uint8_t *peer_mac_addr,
  517. struct dp_mlo_flow_override_info *mlo_flow_info);
  518. /**
  519. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  520. * @soc_handle - genereic soc handle
  521. * @peeri_id - peer_id from firmware
  522. *
  523. * Return: none
  524. */
  525. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  526. #endif
  527. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  528. enum cdp_sec_type sec_type, int is_unicast,
  529. u_int32_t *michael_key, u_int32_t *rx_pn);
  530. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  531. uint8_t tid, uint16_t win_sz);
  532. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  533. uint16_t peer_id, uint8_t *peer_mac);
  534. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  535. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  536. uint32_t flags);
  537. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  538. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  539. struct dp_ast_entry *ast_entry);
  540. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  541. struct dp_ast_entry *ast_entry, uint32_t flags);
  542. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  543. uint8_t *ast_mac_addr,
  544. uint8_t pdev_id);
  545. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  546. uint8_t *ast_mac_addr,
  547. uint8_t vdev_id);
  548. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  549. uint8_t *ast_mac_addr);
  550. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  551. struct dp_ast_entry *ast_entry);
  552. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  553. struct dp_ast_entry *ast_entry);
  554. void dp_peer_ast_set_type(struct dp_soc *soc,
  555. struct dp_ast_entry *ast_entry,
  556. enum cdp_txrx_ast_entry_type type);
  557. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  558. struct dp_ast_entry *ast_entry,
  559. struct dp_peer *peer);
  560. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  561. struct cdp_soc *dp_soc,
  562. void *cookie,
  563. enum cdp_ast_free_status status);
  564. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  565. struct dp_ast_entry *ase);
  566. void dp_peer_free_ast_entry(struct dp_soc *soc,
  567. struct dp_ast_entry *ast_entry);
  568. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  569. struct dp_ast_entry *ast_entry,
  570. struct dp_peer *peer);
  571. /**
  572. * dp_peer_mec_detach_entry() - Detach the MEC entry
  573. * @soc: SoC handle
  574. * @mecentry: MEC entry of the node
  575. * @ptr: pointer to free list
  576. *
  577. * The MEC entry is detached from MEC table and added to free_list
  578. * to free the object outside lock
  579. *
  580. * Return: None
  581. */
  582. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  583. void *ptr);
  584. /**
  585. * dp_peer_mec_free_list() - free the MEC entry from free_list
  586. * @soc: SoC handle
  587. * @ptr: pointer to free list
  588. *
  589. * Return: None
  590. */
  591. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  592. /**
  593. * dp_peer_mec_add_entry()
  594. * @soc: SoC handle
  595. * @vdev: vdev to which mec node belongs
  596. * @mac_addr: MAC address of mec node
  597. *
  598. * This function allocates and adds MEC entry to MEC table.
  599. * It assumes caller has taken the mec lock to protect the access to these
  600. * tables
  601. *
  602. * Return: QDF_STATUS
  603. */
  604. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  605. struct dp_vdev *vdev,
  606. uint8_t *mac_addr);
  607. /**
  608. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  609. * within pdev
  610. * @soc: SoC handle
  611. *
  612. * It assumes caller has taken the mec_lock to protect the access to
  613. * MEC hash table
  614. *
  615. * Return: MEC entry
  616. */
  617. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  618. uint8_t pdev_id,
  619. uint8_t *mec_mac_addr);
  620. #define DP_AST_ASSERT(_condition) \
  621. do { \
  622. if (!(_condition)) { \
  623. dp_print_ast_stats(soc);\
  624. QDF_BUG(_condition); \
  625. } \
  626. } while (0)
  627. /**
  628. * dp_peer_update_inactive_time - Update inactive time for peer
  629. * @pdev: pdev object
  630. * @tag_type: htt_tlv_tag type
  631. * #tag_buf: buf message
  632. */
  633. void
  634. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  635. uint32_t *tag_buf);
  636. #ifndef QCA_MULTIPASS_SUPPORT
  637. /**
  638. * dp_peer_set_vlan_id: set vlan_id for this peer
  639. * @cdp_soc: soc handle
  640. * @vdev_id: id of vdev object
  641. * @peer_mac: mac address
  642. * @vlan_id: vlan id for peer
  643. *
  644. * return: void
  645. */
  646. static inline
  647. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  648. uint8_t vdev_id, uint8_t *peer_mac,
  649. uint16_t vlan_id)
  650. {
  651. }
  652. /**
  653. * dp_set_vlan_groupkey: set vlan map for vdev
  654. * @soc: pointer to soc
  655. * @vdev_id: id of vdev handle
  656. * @vlan_id: vlan_id
  657. * @group_key: group key for vlan
  658. *
  659. * return: set success/failure
  660. */
  661. static inline
  662. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  663. uint16_t vlan_id, uint16_t group_key)
  664. {
  665. return QDF_STATUS_SUCCESS;
  666. }
  667. /**
  668. * dp_peer_multipass_list_init: initialize multipass peer list
  669. * @vdev: pointer to vdev
  670. *
  671. * return: void
  672. */
  673. static inline
  674. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  675. {
  676. }
  677. /**
  678. * dp_peer_multipass_list_remove: remove peer from special peer list
  679. * @peer: peer handle
  680. *
  681. * return: void
  682. */
  683. static inline
  684. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  685. {
  686. }
  687. #else
  688. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  689. uint8_t vdev_id, uint8_t *peer_mac,
  690. uint16_t vlan_id);
  691. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  692. uint16_t vlan_id, uint16_t group_key);
  693. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  694. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  695. #endif
  696. #ifndef QCA_PEER_MULTIQ_SUPPORT
  697. /**
  698. * dp_peer_reset_flowq_map() - reset peer flowq map table
  699. * @peer - dp peer handle
  700. *
  701. * Return: none
  702. */
  703. static inline
  704. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  705. {
  706. }
  707. /**
  708. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  709. * @soc - genereic soc handle
  710. * @is_wds - flag to indicate if peer is wds
  711. * @peer_id - peer_id from htt peer map message
  712. * @peer_mac_addr - mac address of the peer
  713. * @ast_info - ast flow override information from peer map
  714. *
  715. * Return: none
  716. */
  717. static inline
  718. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  719. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  720. struct dp_ast_flow_override_info *ast_info)
  721. {
  722. }
  723. #else
  724. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  725. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  726. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  727. struct dp_ast_flow_override_info *ast_info);
  728. #endif
  729. /*
  730. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  731. * after deleting the entries (ie., setting valid=0)
  732. *
  733. * @soc: DP SOC handle
  734. * @cb_ctxt: Callback context
  735. * @reo_status: REO command status
  736. */
  737. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  738. void *cb_ctxt,
  739. union hal_reo_status *reo_status);
  740. #ifdef QCA_PEER_EXT_STATS
  741. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  742. struct dp_peer *peer);
  743. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  744. struct dp_peer *peer);
  745. #else
  746. static inline QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  747. struct dp_peer *peer)
  748. {
  749. return QDF_STATUS_SUCCESS;
  750. }
  751. static inline void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc,
  752. struct dp_peer *peer)
  753. {
  754. }
  755. #endif
  756. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  757. struct dp_vdev *vdev,
  758. enum dp_mod_id mod_id);
  759. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  760. struct dp_vdev *vdev,
  761. enum dp_mod_id mod_id);
  762. void dp_peer_ast_table_detach(struct dp_soc *soc);
  763. void dp_peer_find_map_detach(struct dp_soc *soc);
  764. void dp_soc_wds_detach(struct dp_soc *soc);
  765. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  766. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  767. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  768. void dp_soc_wds_attach(struct dp_soc *soc);
  769. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  770. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  771. #ifdef FEATURE_AST
  772. /*
  773. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  774. * @soc - datapath soc handle
  775. * @peer - datapath peer handle
  776. *
  777. * Delete the AST entries belonging to a peer
  778. */
  779. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  780. struct dp_peer *peer)
  781. {
  782. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  783. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  784. /*
  785. * Delete peer self ast entry. This is done to handle scenarios
  786. * where peer is freed before peer map is received(for ex in case
  787. * of auth disallow due to ACL) in such cases self ast is not added
  788. * to peer->ast_list.
  789. */
  790. if (peer->self_ast_entry) {
  791. dp_peer_del_ast(soc, peer->self_ast_entry);
  792. peer->self_ast_entry = NULL;
  793. }
  794. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  795. dp_peer_del_ast(soc, ast_entry);
  796. }
  797. #else
  798. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  799. struct dp_peer *peer)
  800. {
  801. }
  802. #endif
  803. #ifdef FEATURE_MEC
  804. /**
  805. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  806. * @soc: SoC handle
  807. *
  808. * Return: none
  809. */
  810. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  811. /**
  812. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  813. * @soc: SoC handle
  814. *
  815. * Return: none
  816. */
  817. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  818. /**
  819. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  820. * @soc: Datapath SOC
  821. *
  822. * Return: None
  823. */
  824. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  825. #else
  826. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  827. {
  828. }
  829. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  830. {
  831. }
  832. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  833. {
  834. }
  835. #endif
  836. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  837. /**
  838. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  839. * @soc : dp_soc handle
  840. * @peer: peer
  841. *
  842. * This function is used to send cache flush cmd to reo and
  843. * to register the callback to handle the dumping of the reo
  844. * queue stas from DDR
  845. *
  846. * Return: none
  847. */
  848. void dp_send_cache_flush_for_rx_tid(
  849. struct dp_soc *soc, struct dp_peer *peer);
  850. /**
  851. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  852. * @soc : cdp_soc_t handle
  853. * @vdev_id: vdev id
  854. *
  855. * Handler to get rx tid info from DDR after h/w cache is
  856. * invalidated first using the cache flush cmd.
  857. *
  858. * Return: none
  859. */
  860. void dp_get_rx_reo_queue_info(
  861. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  862. /**
  863. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  864. * @soc : dp_soc handle
  865. * @cb_ctxt - callback context
  866. * @reo_status: vdev id
  867. *
  868. * This is the callback function registered after sending the reo cmd
  869. * to flush the h/w cache and invalidate it. In the callback the reo
  870. * queue desc info is dumped from DDR.
  871. *
  872. * Return: none
  873. */
  874. void dp_dump_rx_reo_queue_info(
  875. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  876. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  877. static inline void dp_get_rx_reo_queue_info(
  878. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  879. {
  880. }
  881. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  882. static inline int dp_peer_find_mac_addr_cmp(
  883. union dp_align_mac_addr *mac_addr1,
  884. union dp_align_mac_addr *mac_addr2)
  885. {
  886. /*
  887. * Intentionally use & rather than &&.
  888. * because the operands are binary rather than generic boolean,
  889. * the functionality is equivalent.
  890. * Using && has the advantage of short-circuited evaluation,
  891. * but using & has the advantage of no conditional branching,
  892. * which is a more significant benefit.
  893. */
  894. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  895. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  896. }
  897. /**
  898. * dp_peer_delete() - delete DP peer
  899. *
  900. * @soc: Datatpath soc
  901. * @peer: Datapath peer
  902. * @arg: argument to iter function
  903. *
  904. * Return: void
  905. */
  906. void dp_peer_delete(struct dp_soc *soc,
  907. struct dp_peer *peer,
  908. void *arg);
  909. #ifdef WLAN_FEATURE_11BE_MLO
  910. /* set peer type */
  911. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  912. ((_peer)->peer_type = (_type_val))
  913. /* is MLO connection link peer */
  914. #define IS_MLO_DP_LINK_PEER(_peer) \
  915. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  916. /* is MLO connection mld peer */
  917. #define IS_MLO_DP_MLD_PEER(_peer) \
  918. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  919. #ifdef WLAN_MLO_MULTI_CHIP
  920. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  921. struct dp_peer *
  922. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  923. uint8_t *peer_mac_addr,
  924. int mac_addr_is_aligned,
  925. uint8_t vdev_id,
  926. uint8_t chip_id,
  927. enum dp_mod_id mod_id);
  928. #else
  929. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  930. {
  931. return 0;
  932. }
  933. static inline struct dp_peer *
  934. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  935. uint8_t *peer_mac_addr,
  936. int mac_addr_is_aligned,
  937. uint8_t vdev_id,
  938. uint8_t chip_id,
  939. enum dp_mod_id mod_id)
  940. {
  941. return dp_peer_find_hash_find(soc, peer_mac_addr,
  942. mac_addr_is_aligned,
  943. vdev_id, mod_id);
  944. }
  945. #endif
  946. /**
  947. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  948. increase mld peer ref_cnt
  949. * @link_peer: link peer pointer
  950. * @mld_peer: mld peer pointer
  951. *
  952. * Return: none
  953. */
  954. static inline
  955. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  956. struct dp_peer *mld_peer)
  957. {
  958. /* increase mld_peer ref_cnt */
  959. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  960. link_peer->mld_peer = mld_peer;
  961. }
  962. /**
  963. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  964. decrease mld peer ref_cnt
  965. * @link_peer: link peer pointer
  966. *
  967. * Return: None
  968. */
  969. static inline
  970. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  971. {
  972. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  973. link_peer->mld_peer = NULL;
  974. }
  975. /**
  976. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  977. * @mld_peer: mld peer pointer
  978. *
  979. * Return: None
  980. */
  981. static inline
  982. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  983. {
  984. int i;
  985. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  986. mld_peer->num_links = 0;
  987. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  988. mld_peer->link_peers[i].is_valid = false;
  989. }
  990. /**
  991. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  992. * @mld_peer: mld peer pointer
  993. *
  994. * Return: None
  995. */
  996. static inline
  997. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  998. {
  999. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1000. }
  1001. /**
  1002. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1003. * @mld_peer: mld dp peer pointer
  1004. * @link_peer: link dp peer pointer
  1005. *
  1006. * Return: None
  1007. */
  1008. static inline
  1009. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1010. struct dp_peer *link_peer)
  1011. {
  1012. int i;
  1013. struct dp_peer_link_info *link_peer_info;
  1014. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1015. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1016. link_peer_info = &mld_peer->link_peers[i];
  1017. if (!link_peer_info->is_valid) {
  1018. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1019. link_peer->mac_addr.raw,
  1020. QDF_MAC_ADDR_SIZE);
  1021. link_peer_info->is_valid = true;
  1022. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1023. link_peer_info->chip_id =
  1024. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1025. mld_peer->num_links++;
  1026. break;
  1027. }
  1028. }
  1029. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1030. if (i == DP_MAX_MLO_LINKS)
  1031. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1032. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1033. }
  1034. /**
  1035. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1036. * @mld_peer: MLD dp peer pointer
  1037. * @link_peer: link dp peer pointer
  1038. *
  1039. * Return: number of links left after deletion
  1040. */
  1041. static inline
  1042. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1043. struct dp_peer *link_peer)
  1044. {
  1045. int i;
  1046. struct dp_peer_link_info *link_peer_info;
  1047. uint8_t num_links;
  1048. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1049. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1050. link_peer_info = &mld_peer->link_peers[i];
  1051. if (link_peer_info->is_valid &&
  1052. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1053. &link_peer_info->mac_addr)) {
  1054. link_peer_info->is_valid = false;
  1055. mld_peer->num_links--;
  1056. break;
  1057. }
  1058. }
  1059. num_links = mld_peer->num_links;
  1060. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1061. if (i == DP_MAX_MLO_LINKS)
  1062. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1063. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1064. return num_links;
  1065. }
  1066. /**
  1067. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1068. increase link peers ref_cnt
  1069. * @soc: dp_soc handle
  1070. * @mld_peer: dp mld peer pointer
  1071. * @mld_link_peers: structure that hold links peers ponter array and number
  1072. * @mod_id: id of module requesting reference
  1073. *
  1074. * Return: None
  1075. */
  1076. static inline
  1077. void dp_get_link_peers_ref_from_mld_peer(
  1078. struct dp_soc *soc,
  1079. struct dp_peer *mld_peer,
  1080. struct dp_mld_link_peers *mld_link_peers,
  1081. enum dp_mod_id mod_id)
  1082. {
  1083. struct dp_peer *peer;
  1084. uint8_t i = 0, j = 0;
  1085. struct dp_peer_link_info *link_peer_info;
  1086. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1087. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1088. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1089. link_peer_info = &mld_peer->link_peers[i];
  1090. if (link_peer_info->is_valid) {
  1091. peer = dp_link_peer_hash_find_by_chip_id(
  1092. soc,
  1093. link_peer_info->mac_addr.raw,
  1094. true,
  1095. link_peer_info->vdev_id,
  1096. link_peer_info->chip_id,
  1097. mod_id);
  1098. if (peer)
  1099. mld_link_peers->link_peers[j++] = peer;
  1100. }
  1101. }
  1102. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1103. mld_link_peers->num_links = j;
  1104. }
  1105. /**
  1106. * dp_release_link_peers_ref() - release all link peers reference
  1107. * @mld_link_peers: structure that hold links peers ponter array and number
  1108. * @mod_id: id of module requesting reference
  1109. *
  1110. * Return: None.
  1111. */
  1112. static inline
  1113. void dp_release_link_peers_ref(
  1114. struct dp_mld_link_peers *mld_link_peers,
  1115. enum dp_mod_id mod_id)
  1116. {
  1117. struct dp_peer *peer;
  1118. uint8_t i;
  1119. for (i = 0; i < mld_link_peers->num_links; i++) {
  1120. peer = mld_link_peers->link_peers[i];
  1121. if (peer)
  1122. dp_peer_unref_delete(peer, mod_id);
  1123. mld_link_peers->link_peers[i] = NULL;
  1124. }
  1125. mld_link_peers->num_links = 0;
  1126. }
  1127. /**
  1128. * dp_peer_get_tgt_peer_hash_find() - get MLD dp_peer handle
  1129. for processing
  1130. * @soc: soc handle
  1131. * @peer_mac_addr: peer mac address
  1132. * @mac_addr_is_aligned: is mac addr alligned
  1133. * @vdev_id: vdev_id
  1134. * @mod_id: id of module requesting reference
  1135. *
  1136. * for MLO connection, get corresponding MLD peer,
  1137. * otherwise get link peer for non-MLO case.
  1138. *
  1139. * return: peer in success
  1140. * NULL in failure
  1141. */
  1142. static inline
  1143. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1144. uint8_t *peer_mac,
  1145. int mac_addr_is_aligned,
  1146. uint8_t vdev_id,
  1147. enum dp_mod_id mod_id)
  1148. {
  1149. struct dp_peer *ta_peer = NULL;
  1150. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1151. peer_mac, 0, vdev_id,
  1152. mod_id);
  1153. if (peer) {
  1154. /* mlo connection link peer, get mld peer with reference */
  1155. if (IS_MLO_DP_LINK_PEER(peer)) {
  1156. /* increase mld peer ref_cnt */
  1157. if (QDF_STATUS_SUCCESS ==
  1158. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1159. ta_peer = peer->mld_peer;
  1160. else
  1161. ta_peer = NULL;
  1162. /* relese peer reference that added by hash find */
  1163. dp_peer_unref_delete(peer, mod_id);
  1164. } else {
  1165. /* mlo MLD peer or non-mlo link peer */
  1166. ta_peer = peer;
  1167. }
  1168. }
  1169. return ta_peer;
  1170. }
  1171. /**
  1172. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1173. * @soc : core DP soc context
  1174. * @peer_id : peer id from peer object can be retrieved
  1175. * @mod_id : ID ot module requesting reference
  1176. *
  1177. * for MLO connection, get corresponding MLD peer,
  1178. * otherwise get link peer for non-MLO case.
  1179. *
  1180. * return: peer in success
  1181. * NULL in failure
  1182. */
  1183. static inline
  1184. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1185. uint16_t peer_id,
  1186. enum dp_mod_id mod_id)
  1187. {
  1188. struct dp_peer *ta_peer = NULL;
  1189. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1190. if (peer) {
  1191. /* mlo connection link peer, get mld peer with reference */
  1192. if (IS_MLO_DP_LINK_PEER(peer)) {
  1193. /* increase mld peer ref_cnt */
  1194. if (QDF_STATUS_SUCCESS ==
  1195. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1196. ta_peer = peer->mld_peer;
  1197. else
  1198. ta_peer = NULL;
  1199. /* relese peer reference that added by hash find */
  1200. dp_peer_unref_delete(peer, mod_id);
  1201. } else {
  1202. /* mlo MLD peer or non-mlo link peer */
  1203. ta_peer = peer;
  1204. }
  1205. }
  1206. return ta_peer;
  1207. }
  1208. /**
  1209. * dp_peer_mlo_delete() - peer MLO related delete operation
  1210. * @soc: Soc handle
  1211. * @peer: DP peer handle
  1212. * Return: None
  1213. */
  1214. static inline
  1215. void dp_peer_mlo_delete(struct dp_soc *soc,
  1216. struct dp_peer *peer)
  1217. {
  1218. /* MLO connection link peer */
  1219. if (IS_MLO_DP_LINK_PEER(peer)) {
  1220. /* if last link peer deletion, delete MLD peer */
  1221. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1222. dp_peer_delete(soc, peer->mld_peer, NULL);
  1223. }
  1224. }
  1225. /**
  1226. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1227. * @soc: Soc handle
  1228. * @vdev_id: Vdev ID
  1229. * @peer_setup_info: peer setup information for MLO
  1230. */
  1231. QDF_STATUS dp_peer_mlo_setup(
  1232. struct dp_soc *soc,
  1233. struct dp_peer *peer,
  1234. uint8_t vdev_id,
  1235. struct cdp_peer_setup_info *setup_info);
  1236. #else
  1237. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1238. #define IS_MLO_DP_LINK_PEER(_peer) false
  1239. #define IS_MLO_DP_MLD_PEER(_peer) false
  1240. static inline
  1241. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1242. uint8_t *peer_mac,
  1243. int mac_addr_is_aligned,
  1244. uint8_t vdev_id,
  1245. enum dp_mod_id mod_id)
  1246. {
  1247. return dp_peer_find_hash_find(soc, peer_mac,
  1248. mac_addr_is_aligned, vdev_id,
  1249. mod_id);
  1250. }
  1251. static inline
  1252. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1253. uint16_t peer_id,
  1254. enum dp_mod_id mod_id)
  1255. {
  1256. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1257. }
  1258. static inline
  1259. QDF_STATUS dp_peer_mlo_setup(
  1260. struct dp_soc *soc,
  1261. struct dp_peer *peer,
  1262. uint8_t vdev_id,
  1263. struct cdp_peer_setup_info *setup_info)
  1264. {
  1265. return QDF_STATUS_SUCCESS;
  1266. }
  1267. static inline
  1268. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1269. {
  1270. }
  1271. static inline
  1272. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1273. {
  1274. }
  1275. static inline
  1276. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1277. {
  1278. }
  1279. static inline
  1280. void dp_peer_mlo_delete(struct dp_soc *soc,
  1281. struct dp_peer *peer)
  1282. {
  1283. }
  1284. static inline
  1285. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1286. struct dp_peer *link_peer)
  1287. {
  1288. }
  1289. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1290. {
  1291. return 0;
  1292. }
  1293. static inline struct dp_peer *
  1294. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1295. uint8_t *peer_mac_addr,
  1296. int mac_addr_is_aligned,
  1297. uint8_t vdev_id,
  1298. uint8_t chip_id,
  1299. enum dp_mod_id mod_id)
  1300. {
  1301. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1302. mac_addr_is_aligned,
  1303. vdev_id, mod_id);
  1304. }
  1305. #endif /* WLAN_FEATURE_11BE_MLO */
  1306. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  1307. /**
  1308. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  1309. * @soc: Soc handle
  1310. * @peer: DP peer handle for ML peer
  1311. * @peer_id: peer_id
  1312. * Return: None
  1313. */
  1314. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1315. struct dp_peer *peer,
  1316. uint16_t peer_id);
  1317. /**
  1318. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  1319. * @soc: Soc handle
  1320. * @peer_id: peer_id
  1321. * Return: None
  1322. */
  1323. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1324. uint16_t peer_id);
  1325. #else
  1326. static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1327. struct dp_peer *peer,
  1328. uint16_t peer_id)
  1329. {
  1330. }
  1331. static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1332. uint16_t peer_id)
  1333. {
  1334. }
  1335. #endif
  1336. static inline
  1337. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1338. {
  1339. uint8_t i;
  1340. if (IS_MLO_DP_MLD_PEER(peer)) {
  1341. dp_peer_info("skip for mld peer");
  1342. return QDF_STATUS_SUCCESS;
  1343. }
  1344. if (peer->rx_tid) {
  1345. QDF_BUG(0);
  1346. dp_peer_err("peer rx_tid mem already exist");
  1347. return QDF_STATUS_E_FAILURE;
  1348. }
  1349. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1350. sizeof(struct dp_rx_tid));
  1351. if (!peer->rx_tid) {
  1352. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1353. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1354. return QDF_STATUS_E_NOMEM;
  1355. }
  1356. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1357. for (i = 0; i < DP_MAX_TIDS; i++)
  1358. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1359. return QDF_STATUS_SUCCESS;
  1360. }
  1361. static inline
  1362. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1363. {
  1364. uint8_t i;
  1365. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1366. for (i = 0; i < DP_MAX_TIDS; i++)
  1367. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1368. qdf_mem_free(peer->rx_tid);
  1369. }
  1370. peer->rx_tid = NULL;
  1371. }
  1372. #endif /* _DP_PEER_H_ */