dp_peer.h 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #include "dp_internal.h"
  25. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  26. #include "hal_reo.h"
  27. #endif
  28. #define DP_INVALID_PEER_ID 0xffff
  29. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  30. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  31. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  32. #define DP_PEER_HASH_LOAD_MULT 2
  33. #define DP_PEER_HASH_LOAD_SHIFT 0
  34. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  35. #define DP_RX_CACHED_BUFQ_THRESH 64
  36. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  39. #define dp_peer_info(params...) \
  40. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  41. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  42. #ifdef REO_QDESC_HISTORY
  43. enum reo_qdesc_event_type {
  44. REO_QDESC_UPDATE_CB = 0,
  45. REO_QDESC_FREE,
  46. };
  47. struct reo_qdesc_event {
  48. qdf_dma_addr_t qdesc_addr;
  49. uint64_t ts;
  50. enum reo_qdesc_event_type type;
  51. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  52. };
  53. #endif
  54. struct ast_del_ctxt {
  55. bool age;
  56. int del_count;
  57. };
  58. #ifdef QCA_SUPPORT_WDS_EXTENDED
  59. /**
  60. * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
  61. *
  62. * @peer: DP peer context
  63. *
  64. * This API checks whether the peer is WDS_EXT peer or not
  65. *
  66. * Return: true in the wds_ext peer else flase
  67. */
  68. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  69. {
  70. return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
  71. }
  72. #else
  73. static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
  74. {
  75. return false;
  76. }
  77. #endif
  78. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  79. void *arg);
  80. /**
  81. * dp_peer_unref_delete() - unref and delete peer
  82. * @peer: Datapath peer handle
  83. * @id: ID of module releasing reference
  84. *
  85. */
  86. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  87. /**
  88. * dp_txrx_peer_unref_delete() - unref and delete peer
  89. * @handle: Datapath txrx ref handle
  90. * @id: Module ID of the caller
  91. *
  92. */
  93. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  94. /**
  95. * dp_peer_find_hash_find() - returns legacy or mlo link peer from
  96. * peer_hash_table matching vdev_id and mac_address
  97. * @soc: soc handle
  98. * @peer_mac_addr: peer mac address
  99. * @mac_addr_is_aligned: is mac addr aligned
  100. * @vdev_id: vdev_id
  101. * @mod_id: id of module requesting reference
  102. *
  103. * return: peer in success
  104. * NULL in failure
  105. */
  106. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  107. uint8_t *peer_mac_addr,
  108. int mac_addr_is_aligned,
  109. uint8_t vdev_id,
  110. enum dp_mod_id mod_id);
  111. /**
  112. * dp_peer_find_by_id_valid - check if peer exists for given id
  113. * @soc: core DP soc context
  114. * @peer_id: peer id from peer object can be retrieved
  115. *
  116. * Return: true if peer exists of false otherwise
  117. */
  118. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  119. /**
  120. * dp_peer_get_ref() - Returns peer object given the peer id
  121. *
  122. * @soc: core DP soc context
  123. * @peer: DP peer
  124. * @mod_id: id of module requesting the reference
  125. *
  126. * Return: QDF_STATUS_SUCCESS if reference held successfully
  127. * else QDF_STATUS_E_INVAL
  128. */
  129. static inline
  130. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  131. struct dp_peer *peer,
  132. enum dp_mod_id mod_id)
  133. {
  134. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  135. return QDF_STATUS_E_INVAL;
  136. if (mod_id > DP_MOD_ID_RX)
  137. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  138. return QDF_STATUS_SUCCESS;
  139. }
  140. /**
  141. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  142. *
  143. * @soc: core DP soc context
  144. * @peer_id: peer id from peer object can be retrieved
  145. * @mod_id: module id
  146. *
  147. * Return: struct dp_peer*: Pointer to DP peer object
  148. */
  149. static inline struct dp_peer *
  150. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  151. uint16_t peer_id,
  152. enum dp_mod_id mod_id)
  153. {
  154. struct dp_peer *peer;
  155. qdf_spin_lock_bh(&soc->peer_map_lock);
  156. peer = (peer_id >= soc->max_peer_id) ? NULL :
  157. soc->peer_id_to_obj_map[peer_id];
  158. if (!peer ||
  159. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  160. qdf_spin_unlock_bh(&soc->peer_map_lock);
  161. return NULL;
  162. }
  163. qdf_spin_unlock_bh(&soc->peer_map_lock);
  164. return peer;
  165. }
  166. /**
  167. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  168. * if peer state is active
  169. *
  170. * @soc: core DP soc context
  171. * @peer_id: peer id from peer object can be retrieved
  172. * @mod_id: ID of module requesting reference
  173. *
  174. * Return: struct dp_peer*: Pointer to DP peer object
  175. */
  176. static inline
  177. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  178. uint16_t peer_id,
  179. enum dp_mod_id mod_id)
  180. {
  181. struct dp_peer *peer;
  182. qdf_spin_lock_bh(&soc->peer_map_lock);
  183. peer = (peer_id >= soc->max_peer_id) ? NULL :
  184. soc->peer_id_to_obj_map[peer_id];
  185. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  186. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  187. qdf_spin_unlock_bh(&soc->peer_map_lock);
  188. return NULL;
  189. }
  190. qdf_spin_unlock_bh(&soc->peer_map_lock);
  191. return peer;
  192. }
  193. /**
  194. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  195. *
  196. * @soc: core DP soc context
  197. * @peer_id: peer id from peer object can be retrieved
  198. * @handle: reference handle
  199. * @mod_id: ID of module requesting reference
  200. *
  201. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  202. */
  203. static inline struct dp_txrx_peer *
  204. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  205. uint16_t peer_id,
  206. dp_txrx_ref_handle *handle,
  207. enum dp_mod_id mod_id)
  208. {
  209. struct dp_peer *peer;
  210. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  211. if (!peer)
  212. return NULL;
  213. if (!peer->txrx_peer) {
  214. dp_peer_unref_delete(peer, mod_id);
  215. return NULL;
  216. }
  217. *handle = (dp_txrx_ref_handle)peer;
  218. return peer->txrx_peer;
  219. }
  220. #ifdef PEER_CACHE_RX_PKTS
  221. /**
  222. * dp_rx_flush_rx_cached() - flush cached rx frames
  223. * @peer: peer
  224. * @drop: set flag to drop frames
  225. *
  226. * Return: None
  227. */
  228. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  229. #else
  230. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  231. {
  232. }
  233. #endif
  234. static inline void
  235. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  236. {
  237. qdf_spin_lock_bh(&peer->peer_info_lock);
  238. peer->state = OL_TXRX_PEER_STATE_DISC;
  239. qdf_spin_unlock_bh(&peer->peer_info_lock);
  240. dp_rx_flush_rx_cached(peer, true);
  241. }
  242. /**
  243. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  244. *
  245. * @vdev: DP vdev context
  246. * @func: function to be called for each peer
  247. * @arg: argument need to be passed to func
  248. * @mod_id: module_id
  249. *
  250. * Return: void
  251. */
  252. static inline void
  253. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  254. enum dp_mod_id mod_id)
  255. {
  256. struct dp_peer *peer;
  257. struct dp_peer *tmp_peer;
  258. struct dp_soc *soc = NULL;
  259. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  260. return;
  261. soc = vdev->pdev->soc;
  262. qdf_spin_lock_bh(&vdev->peer_list_lock);
  263. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  264. peer_list_elem,
  265. tmp_peer) {
  266. if (dp_peer_get_ref(soc, peer, mod_id) ==
  267. QDF_STATUS_SUCCESS) {
  268. (*func)(soc, peer, arg);
  269. dp_peer_unref_delete(peer, mod_id);
  270. }
  271. }
  272. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  273. }
  274. /**
  275. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  276. *
  277. * @pdev: DP pdev context
  278. * @func: function to be called for each peer
  279. * @arg: argument need to be passed to func
  280. * @mod_id: module_id
  281. *
  282. * Return: void
  283. */
  284. static inline void
  285. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  286. enum dp_mod_id mod_id)
  287. {
  288. struct dp_vdev *vdev;
  289. if (!pdev)
  290. return;
  291. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  292. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  293. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  294. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  295. }
  296. /**
  297. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  298. *
  299. * @soc: DP soc context
  300. * @func: function to be called for each peer
  301. * @arg: argument need to be passed to func
  302. * @mod_id: module_id
  303. *
  304. * Return: void
  305. */
  306. static inline void
  307. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  308. enum dp_mod_id mod_id)
  309. {
  310. struct dp_pdev *pdev;
  311. int i;
  312. if (!soc)
  313. return;
  314. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  315. pdev = soc->pdev_list[i];
  316. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  317. }
  318. }
  319. /**
  320. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  321. *
  322. * This API will cache the peers in local allocated memory and calls
  323. * iterate function outside the lock.
  324. *
  325. * As this API is allocating new memory it is suggested to use this
  326. * only when lock cannot be held
  327. *
  328. * @vdev: DP vdev context
  329. * @func: function to be called for each peer
  330. * @arg: argument need to be passed to func
  331. * @mod_id: module_id
  332. *
  333. * Return: void
  334. */
  335. static inline void
  336. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  337. dp_peer_iter_func *func,
  338. void *arg,
  339. enum dp_mod_id mod_id)
  340. {
  341. struct dp_peer *peer;
  342. struct dp_peer *tmp_peer;
  343. struct dp_soc *soc = NULL;
  344. struct dp_peer **peer_array = NULL;
  345. int i = 0;
  346. uint32_t num_peers = 0;
  347. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  348. return;
  349. num_peers = vdev->num_peers;
  350. soc = vdev->pdev->soc;
  351. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  352. if (!peer_array)
  353. return;
  354. qdf_spin_lock_bh(&vdev->peer_list_lock);
  355. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  356. peer_list_elem,
  357. tmp_peer) {
  358. if (i >= num_peers)
  359. break;
  360. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  361. peer_array[i] = peer;
  362. i = (i + 1);
  363. }
  364. }
  365. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  366. for (i = 0; i < num_peers; i++) {
  367. peer = peer_array[i];
  368. if (!peer)
  369. continue;
  370. (*func)(soc, peer, arg);
  371. dp_peer_unref_delete(peer, mod_id);
  372. }
  373. qdf_mem_free(peer_array);
  374. }
  375. /**
  376. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  377. *
  378. * This API will cache the peers in local allocated memory and calls
  379. * iterate function outside the lock.
  380. *
  381. * As this API is allocating new memory it is suggested to use this
  382. * only when lock cannot be held
  383. *
  384. * @pdev: DP pdev context
  385. * @func: function to be called for each peer
  386. * @arg: argument need to be passed to func
  387. * @mod_id: module_id
  388. *
  389. * Return: void
  390. */
  391. static inline void
  392. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  393. dp_peer_iter_func *func,
  394. void *arg,
  395. enum dp_mod_id mod_id)
  396. {
  397. struct dp_peer *peer;
  398. struct dp_peer *tmp_peer;
  399. struct dp_soc *soc = NULL;
  400. struct dp_vdev *vdev = NULL;
  401. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  402. int i = 0;
  403. int j = 0;
  404. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  405. if (!pdev || !pdev->soc)
  406. return;
  407. soc = pdev->soc;
  408. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  409. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  410. num_peers[i] = vdev->num_peers;
  411. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  412. sizeof(struct dp_peer *));
  413. if (!peer_array[i])
  414. break;
  415. qdf_spin_lock_bh(&vdev->peer_list_lock);
  416. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  417. peer_list_elem,
  418. tmp_peer) {
  419. if (j >= num_peers[i])
  420. break;
  421. if (dp_peer_get_ref(soc, peer, mod_id) ==
  422. QDF_STATUS_SUCCESS) {
  423. peer_array[i][j] = peer;
  424. j = (j + 1);
  425. }
  426. }
  427. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  428. i = (i + 1);
  429. }
  430. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  431. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  432. if (!peer_array[i])
  433. break;
  434. for (j = 0; j < num_peers[i]; j++) {
  435. peer = peer_array[i][j];
  436. if (!peer)
  437. continue;
  438. (*func)(soc, peer, arg);
  439. dp_peer_unref_delete(peer, mod_id);
  440. }
  441. qdf_mem_free(peer_array[i]);
  442. }
  443. }
  444. /**
  445. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  446. *
  447. * This API will cache the peers in local allocated memory and calls
  448. * iterate function outside the lock.
  449. *
  450. * As this API is allocating new memory it is suggested to use this
  451. * only when lock cannot be held
  452. *
  453. * @soc: DP soc context
  454. * @func: function to be called for each peer
  455. * @arg: argument need to be passed to func
  456. * @mod_id: module_id
  457. *
  458. * Return: void
  459. */
  460. static inline void
  461. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  462. dp_peer_iter_func *func,
  463. void *arg,
  464. enum dp_mod_id mod_id)
  465. {
  466. struct dp_pdev *pdev;
  467. int i;
  468. if (!soc)
  469. return;
  470. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  471. pdev = soc->pdev_list[i];
  472. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  473. }
  474. }
  475. #ifdef DP_PEER_STATE_DEBUG
  476. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  477. do { \
  478. if (!(_condition)) { \
  479. dp_alert("Invalid state shift from %u to %u peer " \
  480. QDF_MAC_ADDR_FMT, \
  481. (_peer)->peer_state, (_new_state), \
  482. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  483. QDF_ASSERT(0); \
  484. } \
  485. } while (0)
  486. #else
  487. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  488. do { \
  489. if (!(_condition)) { \
  490. dp_alert("Invalid state shift from %u to %u peer " \
  491. QDF_MAC_ADDR_FMT, \
  492. (_peer)->peer_state, (_new_state), \
  493. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  494. } \
  495. } while (0)
  496. #endif
  497. /**
  498. * dp_peer_state_cmp() - compare dp peer state
  499. *
  500. * @peer: DP peer
  501. * @state: state
  502. *
  503. * Return: true if state matches with peer state
  504. * false if it does not match
  505. */
  506. static inline bool
  507. dp_peer_state_cmp(struct dp_peer *peer,
  508. enum dp_peer_state state)
  509. {
  510. bool is_status_equal = false;
  511. qdf_spin_lock_bh(&peer->peer_state_lock);
  512. is_status_equal = (peer->peer_state == state);
  513. qdf_spin_unlock_bh(&peer->peer_state_lock);
  514. return is_status_equal;
  515. }
  516. /**
  517. * dp_print_ast_stats() - Dump AST table contents
  518. * @soc: Datapath soc handle
  519. *
  520. * Return: void
  521. */
  522. void dp_print_ast_stats(struct dp_soc *soc);
  523. /**
  524. * dp_rx_peer_map_handler() - handle peer map event from firmware
  525. * @soc: generic soc handle
  526. * @peer_id: peer_id from firmware
  527. * @hw_peer_id: ast index for this peer
  528. * @vdev_id: vdev ID
  529. * @peer_mac_addr: mac address of the peer
  530. * @ast_hash: ast hash value
  531. * @is_wds: flag to indicate peer map event for WDS ast entry
  532. *
  533. * associate the peer_id that firmware provided with peer entry
  534. * and update the ast table in the host with the hw_peer_id.
  535. *
  536. * Return: QDF_STATUS code
  537. */
  538. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  539. uint16_t hw_peer_id, uint8_t vdev_id,
  540. uint8_t *peer_mac_addr, uint16_t ast_hash,
  541. uint8_t is_wds);
  542. /**
  543. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  544. * @soc: generic soc handle
  545. * @peer_id: peer_id from firmware
  546. * @vdev_id: vdev ID
  547. * @peer_mac_addr: mac address of the peer or wds entry
  548. * @is_wds: flag to indicate peer map event for WDS ast entry
  549. * @free_wds_count: number of wds entries freed by FW with peer delete
  550. *
  551. * Return: none
  552. */
  553. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  554. uint8_t vdev_id, uint8_t *peer_mac_addr,
  555. uint8_t is_wds, uint32_t free_wds_count);
  556. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  557. /**
  558. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  559. * @soc: dp soc pointer
  560. * @vdev_id: vdev id
  561. * @peer_mac_addr: mac address of the peer
  562. *
  563. * This function resets the roamed peer auth status and mac address
  564. * after peer map indication of same peer is received from firmware.
  565. *
  566. * Return: None
  567. */
  568. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  569. uint8_t *peer_mac_addr);
  570. #else
  571. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  572. uint8_t *peer_mac_addr)
  573. {
  574. }
  575. #endif
  576. #ifdef WLAN_FEATURE_11BE_MLO
  577. /**
  578. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  579. * @soc: generic soc handle
  580. * @peer_id: ML peer_id from firmware
  581. * @peer_mac_addr: mac address of the peer
  582. * @mlo_flow_info: MLO AST flow info
  583. * @mlo_link_info: MLO link info
  584. *
  585. * associate the ML peer_id that firmware provided with peer entry
  586. * and update the ast table in the host with the hw_peer_id.
  587. *
  588. * Return: QDF_STATUS code
  589. */
  590. QDF_STATUS
  591. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  592. uint8_t *peer_mac_addr,
  593. struct dp_mlo_flow_override_info *mlo_flow_info,
  594. struct dp_mlo_link_info *mlo_link_info);
  595. /**
  596. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  597. * @soc: generic soc handle
  598. * @peer_id: peer_id from firmware
  599. *
  600. * Return: none
  601. */
  602. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  603. #endif
  604. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  605. enum cdp_sec_type sec_type, int is_unicast,
  606. u_int32_t *michael_key, u_int32_t *rx_pn);
  607. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  608. uint16_t peer_id, uint8_t *peer_mac);
  609. /**
  610. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  611. * @soc: SoC handle
  612. * @peer: peer to which ast node belongs
  613. * @mac_addr: MAC address of ast node
  614. * @type: AST entry type
  615. * @flags: AST configuration flags
  616. *
  617. * This API is used by WDS source port learning function to
  618. * add a new AST entry into peer AST list
  619. *
  620. * Return: QDF_STATUS code
  621. */
  622. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  623. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  624. uint32_t flags);
  625. /**
  626. * dp_peer_add_ast_hmwds() - Allocate and add hmwds AST entry into peer list
  627. * @soc: SoC handle
  628. * @peer: peer to which ast node belongs
  629. * @mac_addr: MAC address of ast node
  630. * @type: AST entry type
  631. * @flags: AST configuration flags
  632. *
  633. * This function adds new HMWDS AST entry into peer AST list
  634. *
  635. * Return: QDF_STATUS code
  636. */
  637. QDF_STATUS dp_peer_add_ast_hmwds(struct dp_soc *soc, struct dp_peer *peer,
  638. uint8_t *mac_addr,
  639. enum cdp_txrx_ast_entry_type type,
  640. uint32_t flags);
  641. /**
  642. * dp_peer_del_ast() - Delete and free AST entry
  643. * @soc: SoC handle
  644. * @ast_entry: AST entry of the node
  645. *
  646. * This function removes the AST entry from peer and soc tables
  647. * It assumes caller has taken the ast lock to protect the access to these
  648. * tables
  649. *
  650. * Return: None
  651. */
  652. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  653. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  654. struct dp_ast_entry *ast_entry);
  655. /**
  656. * dp_peer_update_ast() - Delete and free AST entry
  657. * @soc: SoC handle
  658. * @peer: peer to which ast node belongs
  659. * @ast_entry: AST entry of the node
  660. * @flags: wds or hmwds
  661. *
  662. * This function update the AST entry to the roamed peer and soc tables
  663. * It assumes caller has taken the ast lock to protect the access to these
  664. * tables
  665. *
  666. * Return: 0 if ast entry is updated successfully
  667. * -1 failure
  668. */
  669. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  670. struct dp_ast_entry *ast_entry, uint32_t flags);
  671. /**
  672. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  673. * @soc: SoC handle
  674. * @ast_mac_addr: Mac address
  675. * @pdev_id: pdev Id
  676. *
  677. * It assumes caller has taken the ast lock to protect the access to
  678. * AST hash table
  679. *
  680. * Return: AST entry
  681. */
  682. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  683. uint8_t *ast_mac_addr,
  684. uint8_t pdev_id);
  685. /**
  686. * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
  687. * @soc: SoC handle
  688. * @ast_mac_addr: Mac address
  689. * @vdev_id: vdev Id
  690. *
  691. * It assumes caller has taken the ast lock to protect the access to
  692. * AST hash table
  693. *
  694. * Return: AST entry
  695. */
  696. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  697. uint8_t *ast_mac_addr,
  698. uint8_t vdev_id);
  699. /**
  700. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  701. * @soc: SoC handle
  702. * @ast_mac_addr: Mac address
  703. *
  704. * It assumes caller has taken the ast lock to protect the access to
  705. * AST hash table
  706. *
  707. * Return: AST entry
  708. */
  709. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  710. uint8_t *ast_mac_addr);
  711. /**
  712. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  713. * @soc: SoC handle
  714. * @ast_entry: AST entry of the node
  715. *
  716. * This function gets the pdev_id from the ast entry.
  717. *
  718. * Return: (uint8_t) pdev_id
  719. */
  720. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  721. struct dp_ast_entry *ast_entry);
  722. /**
  723. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  724. * @soc: SoC handle
  725. * @ast_entry: AST entry of the node
  726. *
  727. * This function gets the next hop from the ast entry.
  728. *
  729. * Return: (uint8_t) next_hop
  730. */
  731. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  732. struct dp_ast_entry *ast_entry);
  733. /**
  734. * dp_peer_ast_set_type() - set type from the ast entry
  735. * @soc: SoC handle
  736. * @ast_entry: AST entry of the node
  737. * @type: AST entry type
  738. *
  739. * This function sets the type in the ast entry.
  740. *
  741. * Return:
  742. */
  743. void dp_peer_ast_set_type(struct dp_soc *soc,
  744. struct dp_ast_entry *ast_entry,
  745. enum cdp_txrx_ast_entry_type type);
  746. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  747. struct dp_ast_entry *ast_entry,
  748. struct dp_peer *peer);
  749. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  750. void dp_peer_ast_send_multi_wds_del(
  751. struct dp_soc *soc, uint8_t vdev_id,
  752. struct peer_del_multi_wds_entries *wds_list);
  753. #endif
  754. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  755. struct cdp_soc *dp_soc,
  756. void *cookie,
  757. enum cdp_ast_free_status status);
  758. /**
  759. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  760. * @soc: SoC handle
  761. * @ase: Address search entry
  762. *
  763. * This function removes the AST entry from soc AST hash table
  764. * It assumes caller has taken the ast lock to protect the access to this table
  765. *
  766. * Return: None
  767. */
  768. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  769. struct dp_ast_entry *ase);
  770. /**
  771. * dp_peer_free_ast_entry() - Free up the ast entry memory
  772. * @soc: SoC handle
  773. * @ast_entry: Address search entry
  774. *
  775. * This API is used to free up the memory associated with
  776. * AST entry.
  777. *
  778. * Return: None
  779. */
  780. void dp_peer_free_ast_entry(struct dp_soc *soc,
  781. struct dp_ast_entry *ast_entry);
  782. /**
  783. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  784. * @soc: SoC handle
  785. * @ast_entry: Address search entry
  786. * @peer: peer
  787. *
  788. * This API is used to remove/unlink AST entry from the peer list
  789. * and hash list.
  790. *
  791. * Return: None
  792. */
  793. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  794. struct dp_ast_entry *ast_entry,
  795. struct dp_peer *peer);
  796. /**
  797. * dp_peer_mec_detach_entry() - Detach the MEC entry
  798. * @soc: SoC handle
  799. * @mecentry: MEC entry of the node
  800. * @ptr: pointer to free list
  801. *
  802. * The MEC entry is detached from MEC table and added to free_list
  803. * to free the object outside lock
  804. *
  805. * Return: None
  806. */
  807. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  808. void *ptr);
  809. /**
  810. * dp_peer_mec_free_list() - free the MEC entry from free_list
  811. * @soc: SoC handle
  812. * @ptr: pointer to free list
  813. *
  814. * Return: None
  815. */
  816. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  817. /**
  818. * dp_peer_mec_add_entry()
  819. * @soc: SoC handle
  820. * @vdev: vdev to which mec node belongs
  821. * @mac_addr: MAC address of mec node
  822. *
  823. * This function allocates and adds MEC entry to MEC table.
  824. * It assumes caller has taken the mec lock to protect the access to these
  825. * tables
  826. *
  827. * Return: QDF_STATUS
  828. */
  829. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  830. struct dp_vdev *vdev,
  831. uint8_t *mac_addr);
  832. /**
  833. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
  834. * within pdev
  835. * @soc: SoC handle
  836. * @pdev_id: pdev Id
  837. * @mec_mac_addr: MAC address of mec node
  838. *
  839. * It assumes caller has taken the mec_lock to protect the access to
  840. * MEC hash table
  841. *
  842. * Return: MEC entry
  843. */
  844. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  845. uint8_t pdev_id,
  846. uint8_t *mec_mac_addr);
  847. #define DP_AST_ASSERT(_condition) \
  848. do { \
  849. if (!(_condition)) { \
  850. dp_print_ast_stats(soc);\
  851. QDF_BUG(_condition); \
  852. } \
  853. } while (0)
  854. /**
  855. * dp_peer_update_inactive_time() - Update inactive time for peer
  856. * @pdev: pdev object
  857. * @tag_type: htt_tlv_tag type
  858. * @tag_buf: buf message
  859. */
  860. void
  861. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  862. uint32_t *tag_buf);
  863. #ifndef QCA_MULTIPASS_SUPPORT
  864. static inline
  865. /**
  866. * dp_peer_set_vlan_id() - set vlan_id for this peer
  867. * @cdp_soc: soc handle
  868. * @vdev_id: id of vdev object
  869. * @peer_mac: mac address
  870. * @vlan_id: vlan id for peer
  871. *
  872. * Return: void
  873. */
  874. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  875. uint8_t vdev_id, uint8_t *peer_mac,
  876. uint16_t vlan_id)
  877. {
  878. }
  879. /**
  880. * dp_set_vlan_groupkey() - set vlan map for vdev
  881. * @soc_hdl: pointer to soc
  882. * @vdev_id: id of vdev handle
  883. * @vlan_id: vlan_id
  884. * @group_key: group key for vlan
  885. *
  886. * Return: set success/failure
  887. */
  888. static inline
  889. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  890. uint16_t vlan_id, uint16_t group_key)
  891. {
  892. return QDF_STATUS_SUCCESS;
  893. }
  894. /**
  895. * dp_peer_multipass_list_init() - initialize multipass peer list
  896. * @vdev: pointer to vdev
  897. *
  898. * Return: void
  899. */
  900. static inline
  901. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  902. {
  903. }
  904. /**
  905. * dp_peer_multipass_list_remove() - remove peer from special peer list
  906. * @peer: peer handle
  907. *
  908. * Return: void
  909. */
  910. static inline
  911. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  912. {
  913. }
  914. #else
  915. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  916. uint8_t vdev_id, uint8_t *peer_mac,
  917. uint16_t vlan_id);
  918. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  919. uint16_t vlan_id, uint16_t group_key);
  920. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  921. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  922. #endif
  923. #ifndef QCA_PEER_MULTIQ_SUPPORT
  924. /**
  925. * dp_peer_reset_flowq_map() - reset peer flowq map table
  926. * @peer: dp peer handle
  927. *
  928. * Return: none
  929. */
  930. static inline
  931. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  932. {
  933. }
  934. /**
  935. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  936. * @soc_hdl: generic soc handle
  937. * @is_wds: flag to indicate if peer is wds
  938. * @peer_id: peer_id from htt peer map message
  939. * @peer_mac_addr: mac address of the peer
  940. * @ast_info: ast flow override information from peer map
  941. *
  942. * Return: none
  943. */
  944. static inline
  945. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  946. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  947. struct dp_ast_flow_override_info *ast_info)
  948. {
  949. }
  950. #else
  951. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  952. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  953. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  954. struct dp_ast_flow_override_info *ast_info);
  955. #endif
  956. #ifdef QCA_PEER_EXT_STATS
  957. /**
  958. * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
  959. * @soc: DP SoC context
  960. * @txrx_peer: DP txrx peer context
  961. *
  962. * Allocate the peer delay stats context
  963. *
  964. * Return: QDF_STATUS_SUCCESS if allocation is
  965. * successful
  966. */
  967. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  968. struct dp_txrx_peer *txrx_peer);
  969. /**
  970. * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
  971. * @soc: DP SoC context
  972. * @txrx_peer: txrx DP peer context
  973. *
  974. * Free the peer delay stats context
  975. *
  976. * Return: Void
  977. */
  978. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  979. struct dp_txrx_peer *txrx_peer);
  980. /**
  981. * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
  982. * @txrx_peer: dp_txrx_peer handle
  983. *
  984. * Return: void
  985. */
  986. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  987. #else
  988. static inline
  989. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  990. struct dp_txrx_peer *txrx_peer)
  991. {
  992. return QDF_STATUS_SUCCESS;
  993. }
  994. static inline
  995. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  996. struct dp_txrx_peer *txrx_peer)
  997. {
  998. }
  999. static inline
  1000. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1001. {
  1002. }
  1003. #endif
  1004. #ifdef WLAN_PEER_JITTER
  1005. /**
  1006. * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
  1007. * @pdev: Datapath pdev handle
  1008. * @txrx_peer: dp_txrx_peer handle
  1009. *
  1010. * Return: QDF_STATUS
  1011. */
  1012. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1013. struct dp_txrx_peer *txrx_peer);
  1014. /**
  1015. * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
  1016. * @pdev: Datapath pdev handle
  1017. * @txrx_peer: dp_txrx_peer handle
  1018. *
  1019. * Return: void
  1020. */
  1021. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1022. struct dp_txrx_peer *txrx_peer);
  1023. /**
  1024. * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
  1025. * @txrx_peer: dp_txrx_peer handle
  1026. *
  1027. * Return: void
  1028. */
  1029. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  1030. #else
  1031. static inline
  1032. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  1033. struct dp_txrx_peer *txrx_peer)
  1034. {
  1035. return QDF_STATUS_SUCCESS;
  1036. }
  1037. static inline
  1038. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  1039. struct dp_txrx_peer *txrx_peer)
  1040. {
  1041. }
  1042. static inline
  1043. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  1044. {
  1045. }
  1046. #endif
  1047. #ifndef CONFIG_SAWF_DEF_QUEUES
  1048. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  1049. struct dp_peer *peer)
  1050. {
  1051. return QDF_STATUS_SUCCESS;
  1052. }
  1053. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  1054. struct dp_peer *peer)
  1055. {
  1056. return QDF_STATUS_SUCCESS;
  1057. }
  1058. #endif
  1059. #ifndef CONFIG_SAWF
  1060. static inline
  1061. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  1062. struct dp_txrx_peer *txrx_peer)
  1063. {
  1064. return QDF_STATUS_SUCCESS;
  1065. }
  1066. static inline
  1067. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  1068. struct dp_txrx_peer *txrx_peer)
  1069. {
  1070. return QDF_STATUS_SUCCESS;
  1071. }
  1072. #endif
  1073. /**
  1074. * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
  1075. * @soc: DP soc
  1076. * @vdev: vdev
  1077. * @mod_id: id of module requesting reference
  1078. *
  1079. * Return: VDEV BSS peer
  1080. */
  1081. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  1082. struct dp_vdev *vdev,
  1083. enum dp_mod_id mod_id);
  1084. /**
  1085. * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
  1086. * @soc: DP soc
  1087. * @vdev: vdev
  1088. * @mod_id: id of module requesting reference
  1089. *
  1090. * Return: VDEV self peer
  1091. */
  1092. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  1093. struct dp_vdev *vdev,
  1094. enum dp_mod_id mod_id);
  1095. void dp_peer_ast_table_detach(struct dp_soc *soc);
  1096. /**
  1097. * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
  1098. * @soc: soc handle
  1099. *
  1100. * Return: none
  1101. */
  1102. void dp_peer_find_map_detach(struct dp_soc *soc);
  1103. void dp_soc_wds_detach(struct dp_soc *soc);
  1104. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  1105. /**
  1106. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  1107. * @soc: SoC handle
  1108. *
  1109. * Return: QDF_STATUS
  1110. */
  1111. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  1112. /**
  1113. * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
  1114. * @soc: SoC handle
  1115. *
  1116. * Return: QDF_STATUS
  1117. */
  1118. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  1119. /**
  1120. * dp_del_wds_entry_wrapper() - delete a WDS AST entry
  1121. * @soc: DP soc structure pointer
  1122. * @vdev_id: vdev_id
  1123. * @wds_macaddr: MAC address of ast node
  1124. * @type: type from enum cdp_txrx_ast_entry_type
  1125. * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
  1126. *
  1127. * This API is used to delete an AST entry from fw
  1128. *
  1129. * Return: None
  1130. */
  1131. void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
  1132. uint8_t *wds_macaddr, uint8_t type,
  1133. uint8_t delete_in_fw);
  1134. void dp_soc_wds_attach(struct dp_soc *soc);
  1135. /**
  1136. * dp_peer_mec_hash_detach() - Free MEC Hash table
  1137. * @soc: SoC handle
  1138. *
  1139. * Return: None
  1140. */
  1141. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  1142. /**
  1143. * dp_peer_ast_hash_detach() - Free AST Hash table
  1144. * @soc: SoC handle
  1145. *
  1146. * Return: None
  1147. */
  1148. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  1149. #ifdef FEATURE_AST
  1150. /**
  1151. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  1152. * @soc: datapath soc handle
  1153. * @peer: datapath peer handle
  1154. *
  1155. * Delete the AST entries belonging to a peer
  1156. */
  1157. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1158. struct dp_peer *peer)
  1159. {
  1160. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  1161. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  1162. /*
  1163. * Delete peer self ast entry. This is done to handle scenarios
  1164. * where peer is freed before peer map is received(for ex in case
  1165. * of auth disallow due to ACL) in such cases self ast is not added
  1166. * to peer->ast_list.
  1167. */
  1168. if (peer->self_ast_entry) {
  1169. dp_peer_del_ast(soc, peer->self_ast_entry);
  1170. peer->self_ast_entry = NULL;
  1171. }
  1172. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  1173. dp_peer_del_ast(soc, ast_entry);
  1174. }
  1175. /**
  1176. * dp_print_peer_ast_entries() - Dump AST entries of peer
  1177. * @soc: Datapath soc handle
  1178. * @peer: Datapath peer
  1179. * @arg: argument to iterate function
  1180. *
  1181. * Return: void
  1182. */
  1183. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  1184. void *arg);
  1185. #else
  1186. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  1187. struct dp_peer *peer, void *arg)
  1188. {
  1189. }
  1190. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  1191. struct dp_peer *peer)
  1192. {
  1193. }
  1194. #endif
  1195. #ifdef FEATURE_MEC
  1196. /**
  1197. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  1198. * @soc: SoC handle
  1199. *
  1200. * Return: none
  1201. */
  1202. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  1203. /**
  1204. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  1205. * @soc: SoC handle
  1206. *
  1207. * Return: none
  1208. */
  1209. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  1210. /**
  1211. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  1212. * @soc: Datapath SOC
  1213. *
  1214. * Return: None
  1215. */
  1216. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  1217. #else
  1218. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  1219. {
  1220. }
  1221. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  1222. {
  1223. }
  1224. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  1225. {
  1226. }
  1227. #endif
  1228. static inline int dp_peer_find_mac_addr_cmp(
  1229. union dp_align_mac_addr *mac_addr1,
  1230. union dp_align_mac_addr *mac_addr2)
  1231. {
  1232. /*
  1233. * Intentionally use & rather than &&.
  1234. * because the operands are binary rather than generic boolean,
  1235. * the functionality is equivalent.
  1236. * Using && has the advantage of short-circuited evaluation,
  1237. * but using & has the advantage of no conditional branching,
  1238. * which is a more significant benefit.
  1239. */
  1240. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1241. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1242. }
  1243. /**
  1244. * dp_peer_delete() - delete DP peer
  1245. *
  1246. * @soc: Datatpath soc
  1247. * @peer: Datapath peer
  1248. * @arg: argument to iter function
  1249. *
  1250. * Return: void
  1251. */
  1252. void dp_peer_delete(struct dp_soc *soc,
  1253. struct dp_peer *peer,
  1254. void *arg);
  1255. /**
  1256. * dp_mlo_peer_delete() - delete MLO DP peer
  1257. *
  1258. * @soc: Datapath soc
  1259. * @peer: Datapath peer
  1260. * @arg: argument to iter function
  1261. *
  1262. * Return: void
  1263. */
  1264. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  1265. #ifdef WLAN_FEATURE_11BE_MLO
  1266. /* is MLO connection mld peer */
  1267. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1268. /* set peer type */
  1269. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1270. ((_peer)->peer_type = (_type_val))
  1271. /* is legacy peer */
  1272. #define IS_DP_LEGACY_PEER(_peer) \
  1273. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1274. /* is MLO connection link peer */
  1275. #define IS_MLO_DP_LINK_PEER(_peer) \
  1276. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1277. /* is MLO connection mld peer */
  1278. #define IS_MLO_DP_MLD_PEER(_peer) \
  1279. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1280. /* Get Mld peer from link peer */
  1281. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1282. ((link_peer)->mld_peer)
  1283. #ifdef WLAN_MLO_MULTI_CHIP
  1284. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1285. struct dp_peer *
  1286. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1287. uint8_t *peer_mac_addr,
  1288. int mac_addr_is_aligned,
  1289. uint8_t vdev_id,
  1290. uint8_t chip_id,
  1291. enum dp_mod_id mod_id);
  1292. #else
  1293. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1294. {
  1295. return 0;
  1296. }
  1297. static inline struct dp_peer *
  1298. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1299. uint8_t *peer_mac_addr,
  1300. int mac_addr_is_aligned,
  1301. uint8_t vdev_id,
  1302. uint8_t chip_id,
  1303. enum dp_mod_id mod_id)
  1304. {
  1305. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1306. mac_addr_is_aligned,
  1307. vdev_id, mod_id);
  1308. }
  1309. #endif
  1310. /**
  1311. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1312. * matching mac_address
  1313. * @soc: soc handle
  1314. * @peer_mac_addr: mld peer mac address
  1315. * @mac_addr_is_aligned: is mac addr aligned
  1316. * @vdev_id: vdev_id
  1317. * @mod_id: id of module requesting reference
  1318. *
  1319. * Return: peer in success
  1320. * NULL in failure
  1321. */
  1322. static inline
  1323. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1324. uint8_t *peer_mac_addr,
  1325. int mac_addr_is_aligned,
  1326. uint8_t vdev_id,
  1327. enum dp_mod_id mod_id)
  1328. {
  1329. if (soc->arch_ops.mlo_peer_find_hash_find)
  1330. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1331. peer_mac_addr,
  1332. mac_addr_is_aligned,
  1333. mod_id, vdev_id);
  1334. return NULL;
  1335. }
  1336. /**
  1337. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1338. * peer_type
  1339. * @soc: DP SOC handle
  1340. * @peer_info: peer information for hash find
  1341. * @mod_id: ID of module requesting reference
  1342. *
  1343. * Return: peer handle
  1344. */
  1345. static inline
  1346. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1347. struct cdp_peer_info *peer_info,
  1348. enum dp_mod_id mod_id)
  1349. {
  1350. struct dp_peer *peer = NULL;
  1351. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1352. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1353. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1354. peer_info->mac_addr_is_aligned,
  1355. peer_info->vdev_id,
  1356. mod_id);
  1357. if (peer)
  1358. return peer;
  1359. }
  1360. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1361. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1362. peer = dp_mld_peer_find_hash_find(
  1363. soc, peer_info->mac_addr,
  1364. peer_info->mac_addr_is_aligned,
  1365. peer_info->vdev_id,
  1366. mod_id);
  1367. return peer;
  1368. }
  1369. /**
  1370. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1371. * increase mld peer ref_cnt
  1372. * @link_peer: link peer pointer
  1373. * @mld_peer: mld peer pointer
  1374. *
  1375. * Return: none
  1376. */
  1377. static inline
  1378. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1379. struct dp_peer *mld_peer)
  1380. {
  1381. /* increase mld_peer ref_cnt */
  1382. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1383. link_peer->mld_peer = mld_peer;
  1384. }
  1385. /**
  1386. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1387. * decrease mld peer ref_cnt
  1388. * @link_peer: link peer pointer
  1389. *
  1390. * Return: None
  1391. */
  1392. static inline
  1393. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1394. {
  1395. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1396. link_peer->mld_peer = NULL;
  1397. }
  1398. /**
  1399. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1400. * @mld_peer: mld peer pointer
  1401. *
  1402. * Return: None
  1403. */
  1404. static inline
  1405. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1406. {
  1407. int i;
  1408. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1409. mld_peer->num_links = 0;
  1410. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1411. mld_peer->link_peers[i].is_valid = false;
  1412. }
  1413. /**
  1414. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1415. * @mld_peer: mld peer pointer
  1416. *
  1417. * Return: None
  1418. */
  1419. static inline
  1420. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1421. {
  1422. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1423. }
  1424. /**
  1425. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1426. * @mld_peer: mld dp peer pointer
  1427. * @link_peer: link dp peer pointer
  1428. *
  1429. * Return: None
  1430. */
  1431. static inline
  1432. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1433. struct dp_peer *link_peer)
  1434. {
  1435. int i;
  1436. struct dp_peer_link_info *link_peer_info;
  1437. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1438. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1439. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1440. link_peer_info = &mld_peer->link_peers[i];
  1441. if (!link_peer_info->is_valid) {
  1442. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1443. link_peer->mac_addr.raw,
  1444. QDF_MAC_ADDR_SIZE);
  1445. link_peer_info->is_valid = true;
  1446. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1447. link_peer_info->chip_id =
  1448. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1449. mld_peer->num_links++;
  1450. break;
  1451. }
  1452. }
  1453. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1454. dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1455. "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1456. "idx %u num_links %u",
  1457. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1458. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1459. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1460. i, mld_peer->num_links);
  1461. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
  1462. mld_peer, link_peer, i,
  1463. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1464. }
  1465. /**
  1466. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1467. * @mld_peer: MLD dp peer pointer
  1468. * @link_peer: link dp peer pointer
  1469. *
  1470. * Return: number of links left after deletion
  1471. */
  1472. static inline
  1473. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1474. struct dp_peer *link_peer)
  1475. {
  1476. int i;
  1477. struct dp_peer_link_info *link_peer_info;
  1478. uint8_t num_links;
  1479. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1480. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1481. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1482. link_peer_info = &mld_peer->link_peers[i];
  1483. if (link_peer_info->is_valid &&
  1484. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1485. &link_peer_info->mac_addr)) {
  1486. link_peer_info->is_valid = false;
  1487. mld_peer->num_links--;
  1488. break;
  1489. }
  1490. }
  1491. num_links = mld_peer->num_links;
  1492. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1493. dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1494. "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1495. "idx %u num_links %u",
  1496. (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
  1497. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1498. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1499. i, mld_peer->num_links);
  1500. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
  1501. mld_peer, link_peer, i,
  1502. (i != DP_MAX_MLO_LINKS) ? 1 : 0);
  1503. return num_links;
  1504. }
  1505. /**
  1506. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1507. * increase link peers ref_cnt
  1508. * @soc: dp_soc handle
  1509. * @mld_peer: dp mld peer pointer
  1510. * @mld_link_peers: structure that hold links peers pointer array and number
  1511. * @mod_id: id of module requesting reference
  1512. *
  1513. * Return: None
  1514. */
  1515. static inline
  1516. void dp_get_link_peers_ref_from_mld_peer(
  1517. struct dp_soc *soc,
  1518. struct dp_peer *mld_peer,
  1519. struct dp_mld_link_peers *mld_link_peers,
  1520. enum dp_mod_id mod_id)
  1521. {
  1522. struct dp_peer *peer;
  1523. uint8_t i = 0, j = 0;
  1524. struct dp_peer_link_info *link_peer_info;
  1525. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1526. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1527. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1528. link_peer_info = &mld_peer->link_peers[i];
  1529. if (link_peer_info->is_valid) {
  1530. peer = dp_link_peer_hash_find_by_chip_id(
  1531. soc,
  1532. link_peer_info->mac_addr.raw,
  1533. true,
  1534. link_peer_info->vdev_id,
  1535. link_peer_info->chip_id,
  1536. mod_id);
  1537. if (peer)
  1538. mld_link_peers->link_peers[j++] = peer;
  1539. }
  1540. }
  1541. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1542. mld_link_peers->num_links = j;
  1543. }
  1544. /**
  1545. * dp_release_link_peers_ref() - release all link peers reference
  1546. * @mld_link_peers: structure that hold links peers pointer array and number
  1547. * @mod_id: id of module requesting reference
  1548. *
  1549. * Return: None.
  1550. */
  1551. static inline
  1552. void dp_release_link_peers_ref(
  1553. struct dp_mld_link_peers *mld_link_peers,
  1554. enum dp_mod_id mod_id)
  1555. {
  1556. struct dp_peer *peer;
  1557. uint8_t i;
  1558. for (i = 0; i < mld_link_peers->num_links; i++) {
  1559. peer = mld_link_peers->link_peers[i];
  1560. if (peer)
  1561. dp_peer_unref_delete(peer, mod_id);
  1562. mld_link_peers->link_peers[i] = NULL;
  1563. }
  1564. mld_link_peers->num_links = 0;
  1565. }
  1566. /**
  1567. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1568. * @soc: Datapath soc handle
  1569. * @peer_id: peer id
  1570. * @lmac_id: lmac id to find the link peer on given lmac
  1571. *
  1572. * Return: peer_id of link peer if found
  1573. * else return HTT_INVALID_PEER
  1574. */
  1575. static inline
  1576. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1577. uint8_t lmac_id)
  1578. {
  1579. uint8_t i;
  1580. struct dp_peer *peer;
  1581. struct dp_peer *link_peer;
  1582. struct dp_soc *link_peer_soc;
  1583. struct dp_mld_link_peers link_peers_info;
  1584. uint16_t link_peer_id = HTT_INVALID_PEER;
  1585. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1586. if (!peer)
  1587. return HTT_INVALID_PEER;
  1588. if (IS_MLO_DP_MLD_PEER(peer)) {
  1589. /* get link peers with reference */
  1590. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1591. DP_MOD_ID_CDP);
  1592. for (i = 0; i < link_peers_info.num_links; i++) {
  1593. link_peer = link_peers_info.link_peers[i];
  1594. link_peer_soc = link_peer->vdev->pdev->soc;
  1595. if ((link_peer_soc == soc) &&
  1596. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1597. link_peer_id = link_peer->peer_id;
  1598. break;
  1599. }
  1600. }
  1601. /* release link peers reference */
  1602. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1603. } else {
  1604. link_peer_id = peer_id;
  1605. }
  1606. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1607. return link_peer_id;
  1608. }
  1609. /**
  1610. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1611. * @soc: soc handle
  1612. * @peer_mac: peer mac address
  1613. * @mac_addr_is_aligned: is mac addr aligned
  1614. * @vdev_id: vdev_id
  1615. * @mod_id: id of module requesting reference
  1616. *
  1617. * for MLO connection, get corresponding MLD peer,
  1618. * otherwise get link peer for non-MLO case.
  1619. *
  1620. * Return: peer in success
  1621. * NULL in failure
  1622. */
  1623. static inline
  1624. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1625. uint8_t *peer_mac,
  1626. int mac_addr_is_aligned,
  1627. uint8_t vdev_id,
  1628. enum dp_mod_id mod_id)
  1629. {
  1630. struct dp_peer *ta_peer = NULL;
  1631. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1632. peer_mac, 0, vdev_id,
  1633. mod_id);
  1634. if (peer) {
  1635. /* mlo connection link peer, get mld peer with reference */
  1636. if (IS_MLO_DP_LINK_PEER(peer)) {
  1637. /* increase mld peer ref_cnt */
  1638. if (QDF_STATUS_SUCCESS ==
  1639. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1640. ta_peer = peer->mld_peer;
  1641. else
  1642. ta_peer = NULL;
  1643. /* release peer reference that added by hash find */
  1644. dp_peer_unref_delete(peer, mod_id);
  1645. } else {
  1646. /* mlo MLD peer or non-mlo link peer */
  1647. ta_peer = peer;
  1648. }
  1649. } else {
  1650. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1651. QDF_MAC_ADDR_REF(peer_mac));
  1652. }
  1653. return ta_peer;
  1654. }
  1655. /**
  1656. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1657. * @soc: core DP soc context
  1658. * @peer_id: peer id from peer object can be retrieved
  1659. * @mod_id: ID of module requesting reference
  1660. *
  1661. * for MLO connection, get corresponding MLD peer,
  1662. * otherwise get link peer for non-MLO case.
  1663. *
  1664. * Return: peer in success
  1665. * NULL in failure
  1666. */
  1667. static inline
  1668. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1669. uint16_t peer_id,
  1670. enum dp_mod_id mod_id)
  1671. {
  1672. struct dp_peer *ta_peer = NULL;
  1673. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1674. if (peer) {
  1675. /* mlo connection link peer, get mld peer with reference */
  1676. if (IS_MLO_DP_LINK_PEER(peer)) {
  1677. /* increase mld peer ref_cnt */
  1678. if (QDF_STATUS_SUCCESS ==
  1679. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1680. ta_peer = peer->mld_peer;
  1681. else
  1682. ta_peer = NULL;
  1683. /* release peer reference that added by hash find */
  1684. dp_peer_unref_delete(peer, mod_id);
  1685. } else {
  1686. /* mlo MLD peer or non-mlo link peer */
  1687. ta_peer = peer;
  1688. }
  1689. }
  1690. return ta_peer;
  1691. }
  1692. /**
  1693. * dp_peer_mlo_delete() - peer MLO related delete operation
  1694. * @peer: DP peer handle
  1695. * Return: None
  1696. */
  1697. static inline
  1698. void dp_peer_mlo_delete(struct dp_peer *peer)
  1699. {
  1700. struct dp_peer *ml_peer;
  1701. struct dp_soc *soc;
  1702. dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
  1703. QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
  1704. /* MLO connection link peer */
  1705. if (IS_MLO_DP_LINK_PEER(peer)) {
  1706. ml_peer = peer->mld_peer;
  1707. soc = ml_peer->vdev->pdev->soc;
  1708. /* if last link peer deletion, delete MLD peer */
  1709. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1710. dp_peer_delete(soc, peer->mld_peer, NULL);
  1711. }
  1712. }
  1713. /**
  1714. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1715. * @soc: Soc handle
  1716. * @peer: DP peer handle
  1717. * @vdev_id: Vdev ID
  1718. * @setup_info: peer setup information for MLO
  1719. */
  1720. QDF_STATUS dp_peer_mlo_setup(
  1721. struct dp_soc *soc,
  1722. struct dp_peer *peer,
  1723. uint8_t vdev_id,
  1724. struct cdp_peer_setup_info *setup_info);
  1725. /**
  1726. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1727. * @peer: datapath peer
  1728. *
  1729. * Return: MLD peer in case of MLO Link peer
  1730. * Peer itself in other cases
  1731. */
  1732. static inline
  1733. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1734. {
  1735. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1736. }
  1737. /**
  1738. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1739. * peer id
  1740. * @soc: core DP soc context
  1741. * @peer_id: peer id
  1742. * @mod_id: ID of module requesting reference
  1743. *
  1744. * Return: primary link peer for the MLO peer
  1745. * legacy peer itself in case of legacy peer
  1746. */
  1747. static inline
  1748. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1749. uint16_t peer_id,
  1750. enum dp_mod_id mod_id)
  1751. {
  1752. uint8_t i;
  1753. struct dp_mld_link_peers link_peers_info;
  1754. struct dp_peer *peer;
  1755. struct dp_peer *link_peer;
  1756. struct dp_peer *primary_peer = NULL;
  1757. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1758. if (!peer)
  1759. return NULL;
  1760. if (IS_MLO_DP_MLD_PEER(peer)) {
  1761. /* get link peers with reference */
  1762. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1763. mod_id);
  1764. for (i = 0; i < link_peers_info.num_links; i++) {
  1765. link_peer = link_peers_info.link_peers[i];
  1766. if (link_peer->primary_link) {
  1767. primary_peer = link_peer;
  1768. /*
  1769. * Take additional reference over
  1770. * primary link peer.
  1771. */
  1772. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1773. break;
  1774. }
  1775. }
  1776. /* release link peers reference */
  1777. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1778. dp_peer_unref_delete(peer, mod_id);
  1779. } else {
  1780. primary_peer = peer;
  1781. }
  1782. return primary_peer;
  1783. }
  1784. /**
  1785. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1786. * @peer: Datapath peer
  1787. *
  1788. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1789. * dp_txrx_peer from peer itself for other cases
  1790. */
  1791. static inline
  1792. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1793. {
  1794. return IS_MLO_DP_LINK_PEER(peer) ?
  1795. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1796. }
  1797. /**
  1798. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1799. * @peer: Datapath peer
  1800. *
  1801. * Return: true if peer is primary link peer or legacy peer
  1802. * false otherwise
  1803. */
  1804. static inline
  1805. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1806. {
  1807. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1808. return true;
  1809. else if (IS_DP_LEGACY_PEER(peer))
  1810. return true;
  1811. else
  1812. return false;
  1813. }
  1814. /**
  1815. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1816. *
  1817. * @soc: core DP soc context
  1818. * @peer_id: peer id from peer object can be retrieved
  1819. * @handle: reference handle
  1820. * @mod_id: ID of module requesting reference
  1821. *
  1822. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1823. */
  1824. static inline struct dp_txrx_peer *
  1825. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1826. uint16_t peer_id,
  1827. dp_txrx_ref_handle *handle,
  1828. enum dp_mod_id mod_id)
  1829. {
  1830. struct dp_peer *peer;
  1831. struct dp_txrx_peer *txrx_peer;
  1832. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1833. if (!peer)
  1834. return NULL;
  1835. txrx_peer = dp_get_txrx_peer(peer);
  1836. if (txrx_peer) {
  1837. *handle = (dp_txrx_ref_handle)peer;
  1838. return txrx_peer;
  1839. }
  1840. dp_peer_unref_delete(peer, mod_id);
  1841. return NULL;
  1842. }
  1843. /**
  1844. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1845. *
  1846. * @soc: core DP soc context
  1847. *
  1848. * Return: void
  1849. */
  1850. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1851. #else
  1852. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1853. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1854. /* is legacy peer */
  1855. #define IS_DP_LEGACY_PEER(_peer) true
  1856. #define IS_MLO_DP_LINK_PEER(_peer) false
  1857. #define IS_MLO_DP_MLD_PEER(_peer) false
  1858. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1859. static inline
  1860. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1861. struct cdp_peer_info *peer_info,
  1862. enum dp_mod_id mod_id)
  1863. {
  1864. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1865. peer_info->mac_addr_is_aligned,
  1866. peer_info->vdev_id,
  1867. mod_id);
  1868. }
  1869. static inline
  1870. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1871. uint8_t *peer_mac,
  1872. int mac_addr_is_aligned,
  1873. uint8_t vdev_id,
  1874. enum dp_mod_id mod_id)
  1875. {
  1876. return dp_peer_find_hash_find(soc, peer_mac,
  1877. mac_addr_is_aligned, vdev_id,
  1878. mod_id);
  1879. }
  1880. static inline
  1881. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1882. uint16_t peer_id,
  1883. enum dp_mod_id mod_id)
  1884. {
  1885. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1886. }
  1887. static inline
  1888. QDF_STATUS dp_peer_mlo_setup(
  1889. struct dp_soc *soc,
  1890. struct dp_peer *peer,
  1891. uint8_t vdev_id,
  1892. struct cdp_peer_setup_info *setup_info)
  1893. {
  1894. return QDF_STATUS_SUCCESS;
  1895. }
  1896. static inline
  1897. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1898. {
  1899. }
  1900. static inline
  1901. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1902. {
  1903. }
  1904. static inline
  1905. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1906. {
  1907. }
  1908. static inline
  1909. void dp_peer_mlo_delete(struct dp_peer *peer)
  1910. {
  1911. }
  1912. static inline
  1913. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1914. struct dp_peer *link_peer)
  1915. {
  1916. }
  1917. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1918. {
  1919. return 0;
  1920. }
  1921. static inline struct dp_peer *
  1922. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1923. uint8_t *peer_mac_addr,
  1924. int mac_addr_is_aligned,
  1925. uint8_t vdev_id,
  1926. uint8_t chip_id,
  1927. enum dp_mod_id mod_id)
  1928. {
  1929. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1930. mac_addr_is_aligned,
  1931. vdev_id, mod_id);
  1932. }
  1933. static inline
  1934. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1935. {
  1936. return peer;
  1937. }
  1938. static inline
  1939. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1940. uint16_t peer_id,
  1941. enum dp_mod_id mod_id)
  1942. {
  1943. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1944. }
  1945. static inline
  1946. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1947. {
  1948. return peer->txrx_peer;
  1949. }
  1950. static inline
  1951. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1952. {
  1953. return true;
  1954. }
  1955. /**
  1956. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1957. *
  1958. * @soc: core DP soc context
  1959. * @peer_id: peer id from peer object can be retrieved
  1960. * @handle: reference handle
  1961. * @mod_id: ID of module requesting reference
  1962. *
  1963. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1964. */
  1965. static inline struct dp_txrx_peer *
  1966. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1967. uint16_t peer_id,
  1968. dp_txrx_ref_handle *handle,
  1969. enum dp_mod_id mod_id)
  1970. {
  1971. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1972. }
  1973. static inline
  1974. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1975. uint8_t lmac_id)
  1976. {
  1977. return peer_id;
  1978. }
  1979. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  1980. {
  1981. }
  1982. #endif /* WLAN_FEATURE_11BE_MLO */
  1983. static inline
  1984. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1985. {
  1986. uint8_t i;
  1987. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1988. sizeof(struct dp_rx_tid_defrag));
  1989. for (i = 0; i < DP_MAX_TIDS; i++)
  1990. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1991. }
  1992. static inline
  1993. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1994. {
  1995. uint8_t i;
  1996. for (i = 0; i < DP_MAX_TIDS; i++)
  1997. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1998. }
  1999. #ifdef PEER_CACHE_RX_PKTS
  2000. static inline
  2001. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2002. {
  2003. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  2004. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  2005. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  2006. DP_RX_CACHED_BUFQ_THRESH);
  2007. }
  2008. static inline
  2009. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2010. {
  2011. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  2012. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  2013. }
  2014. #else
  2015. static inline
  2016. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  2017. {
  2018. }
  2019. static inline
  2020. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  2021. {
  2022. }
  2023. #endif
  2024. /**
  2025. * dp_peer_update_state() - update dp peer state
  2026. *
  2027. * @soc: core DP soc context
  2028. * @peer: DP peer
  2029. * @state: new state
  2030. *
  2031. * Return: None
  2032. */
  2033. static inline void
  2034. dp_peer_update_state(struct dp_soc *soc,
  2035. struct dp_peer *peer,
  2036. enum dp_peer_state state)
  2037. {
  2038. uint8_t peer_state;
  2039. qdf_spin_lock_bh(&peer->peer_state_lock);
  2040. peer_state = peer->peer_state;
  2041. switch (state) {
  2042. case DP_PEER_STATE_INIT:
  2043. DP_PEER_STATE_ASSERT
  2044. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  2045. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  2046. break;
  2047. case DP_PEER_STATE_ACTIVE:
  2048. DP_PEER_STATE_ASSERT(peer, state,
  2049. (peer_state == DP_PEER_STATE_INIT));
  2050. break;
  2051. case DP_PEER_STATE_LOGICAL_DELETE:
  2052. DP_PEER_STATE_ASSERT(peer, state,
  2053. (peer_state == DP_PEER_STATE_ACTIVE) ||
  2054. (peer_state == DP_PEER_STATE_INIT));
  2055. break;
  2056. case DP_PEER_STATE_INACTIVE:
  2057. if (IS_MLO_DP_MLD_PEER(peer))
  2058. DP_PEER_STATE_ASSERT
  2059. (peer, state,
  2060. (peer_state == DP_PEER_STATE_ACTIVE));
  2061. else
  2062. DP_PEER_STATE_ASSERT
  2063. (peer, state,
  2064. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2065. break;
  2066. case DP_PEER_STATE_FREED:
  2067. if (peer->sta_self_peer)
  2068. DP_PEER_STATE_ASSERT
  2069. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  2070. else
  2071. DP_PEER_STATE_ASSERT
  2072. (peer, state,
  2073. (peer_state == DP_PEER_STATE_INACTIVE) ||
  2074. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  2075. break;
  2076. default:
  2077. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2078. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  2079. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2080. return;
  2081. }
  2082. peer->peer_state = state;
  2083. qdf_spin_unlock_bh(&peer->peer_state_lock);
  2084. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  2085. peer_state, state,
  2086. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  2087. }
  2088. #ifdef REO_SHARED_QREF_TABLE_EN
  2089. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2090. struct dp_peer *peer);
  2091. #else
  2092. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  2093. struct dp_peer *peer) {}
  2094. #endif
  2095. /**
  2096. * dp_peer_check_wds_ext_peer() - Check WDS ext peer
  2097. *
  2098. * @peer: DP peer
  2099. *
  2100. * Return: True for WDS ext peer, false otherwise
  2101. */
  2102. bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
  2103. /**
  2104. * dp_gen_ml_peer_id() - Generate MLD peer id for DP
  2105. *
  2106. * @soc: DP soc context
  2107. * @peer_id: mld peer id
  2108. *
  2109. * Return: DP MLD peer id
  2110. */
  2111. uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
  2112. #endif /* _DP_PEER_H_ */