dp_peer.h 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #include "dp_internal.h"
  25. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  26. #include "hal_reo.h"
  27. #endif
  28. #define DP_INVALID_PEER_ID 0xffff
  29. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  30. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  31. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  32. #define DP_PEER_HASH_LOAD_MULT 2
  33. #define DP_PEER_HASH_LOAD_SHIFT 0
  34. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  35. #define DP_RX_CACHED_BUFQ_THRESH 64
  36. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  39. #define dp_peer_info(params...) \
  40. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  41. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  42. #ifdef REO_QDESC_HISTORY
  43. enum reo_qdesc_event_type {
  44. REO_QDESC_UPDATE_CB = 0,
  45. REO_QDESC_FREE,
  46. };
  47. struct reo_qdesc_event {
  48. qdf_dma_addr_t qdesc_addr;
  49. uint64_t ts;
  50. enum reo_qdesc_event_type type;
  51. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  52. };
  53. #endif
  54. struct ast_del_ctxt {
  55. bool age;
  56. int del_count;
  57. };
  58. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  59. void *arg);
  60. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  61. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  62. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  63. uint8_t *peer_mac_addr,
  64. int mac_addr_is_aligned,
  65. uint8_t vdev_id,
  66. enum dp_mod_id id);
  67. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  68. #ifdef DP_UMAC_HW_RESET_SUPPORT
  69. void dp_reset_tid_q_setup(struct dp_soc *soc);
  70. #endif
  71. /**
  72. * dp_peer_get_ref() - Returns peer object given the peer id
  73. *
  74. * @soc : core DP soc context
  75. * @peer : DP peer
  76. * @mod_id : id of module requesting the reference
  77. *
  78. * Return: QDF_STATUS_SUCCESS if reference held successfully
  79. * else QDF_STATUS_E_INVAL
  80. */
  81. static inline
  82. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  83. struct dp_peer *peer,
  84. enum dp_mod_id mod_id)
  85. {
  86. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  87. return QDF_STATUS_E_INVAL;
  88. if (mod_id > DP_MOD_ID_RX)
  89. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  90. return QDF_STATUS_SUCCESS;
  91. }
  92. /**
  93. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  94. *
  95. * @soc : core DP soc context
  96. * @peer_id : peer id from peer object can be retrieved
  97. * @mod_id : module id
  98. *
  99. * Return: struct dp_peer*: Pointer to DP peer object
  100. */
  101. static inline struct dp_peer *
  102. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  103. uint16_t peer_id,
  104. enum dp_mod_id mod_id)
  105. {
  106. struct dp_peer *peer;
  107. qdf_spin_lock_bh(&soc->peer_map_lock);
  108. peer = (peer_id >= soc->max_peer_id) ? NULL :
  109. soc->peer_id_to_obj_map[peer_id];
  110. if (!peer ||
  111. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  112. qdf_spin_unlock_bh(&soc->peer_map_lock);
  113. return NULL;
  114. }
  115. qdf_spin_unlock_bh(&soc->peer_map_lock);
  116. return peer;
  117. }
  118. /**
  119. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  120. * if peer state is active
  121. *
  122. * @soc : core DP soc context
  123. * @peer_id : peer id from peer object can be retrieved
  124. * @mod_id : ID of module requesting reference
  125. *
  126. * Return: struct dp_peer*: Pointer to DP peer object
  127. */
  128. static inline
  129. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  130. uint16_t peer_id,
  131. enum dp_mod_id mod_id)
  132. {
  133. struct dp_peer *peer;
  134. qdf_spin_lock_bh(&soc->peer_map_lock);
  135. peer = (peer_id >= soc->max_peer_id) ? NULL :
  136. soc->peer_id_to_obj_map[peer_id];
  137. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  138. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  139. qdf_spin_unlock_bh(&soc->peer_map_lock);
  140. return NULL;
  141. }
  142. qdf_spin_unlock_bh(&soc->peer_map_lock);
  143. return peer;
  144. }
  145. /**
  146. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  147. *
  148. * @soc : core DP soc context
  149. * @peer_id : peer id from peer object can be retrieved
  150. * @handle : reference handle
  151. * @mod_id : ID of module requesting reference
  152. *
  153. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  154. */
  155. static inline struct dp_txrx_peer *
  156. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  157. uint16_t peer_id,
  158. dp_txrx_ref_handle *handle,
  159. enum dp_mod_id mod_id)
  160. {
  161. struct dp_peer *peer;
  162. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  163. if (!peer)
  164. return NULL;
  165. if (!peer->txrx_peer) {
  166. dp_peer_unref_delete(peer, mod_id);
  167. return NULL;
  168. }
  169. *handle = (dp_txrx_ref_handle)peer;
  170. return peer->txrx_peer;
  171. }
  172. #ifdef PEER_CACHE_RX_PKTS
  173. /**
  174. * dp_rx_flush_rx_cached() - flush cached rx frames
  175. * @peer: peer
  176. * @drop: set flag to drop frames
  177. *
  178. * Return: None
  179. */
  180. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  181. #else
  182. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  183. {
  184. }
  185. #endif
  186. static inline void
  187. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  188. {
  189. qdf_spin_lock_bh(&peer->peer_info_lock);
  190. peer->state = OL_TXRX_PEER_STATE_DISC;
  191. qdf_spin_unlock_bh(&peer->peer_info_lock);
  192. dp_rx_flush_rx_cached(peer, true);
  193. }
  194. /**
  195. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  196. *
  197. * @vdev : DP vdev context
  198. * @func : function to be called for each peer
  199. * @arg : argument need to be passed to func
  200. * @mod_id : module_id
  201. *
  202. * Return: void
  203. */
  204. static inline void
  205. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  206. enum dp_mod_id mod_id)
  207. {
  208. struct dp_peer *peer;
  209. struct dp_peer *tmp_peer;
  210. struct dp_soc *soc = NULL;
  211. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  212. return;
  213. soc = vdev->pdev->soc;
  214. qdf_spin_lock_bh(&vdev->peer_list_lock);
  215. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  216. peer_list_elem,
  217. tmp_peer) {
  218. if (dp_peer_get_ref(soc, peer, mod_id) ==
  219. QDF_STATUS_SUCCESS) {
  220. (*func)(soc, peer, arg);
  221. dp_peer_unref_delete(peer, mod_id);
  222. }
  223. }
  224. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  225. }
  226. /**
  227. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  228. *
  229. * @pdev : DP pdev context
  230. * @func : function to be called for each peer
  231. * @arg : argument need to be passed to func
  232. * @mod_id : module_id
  233. *
  234. * Return: void
  235. */
  236. static inline void
  237. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  238. enum dp_mod_id mod_id)
  239. {
  240. struct dp_vdev *vdev;
  241. if (!pdev)
  242. return;
  243. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  244. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  245. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  246. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  247. }
  248. /**
  249. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  250. *
  251. * @soc : DP soc context
  252. * @func : function to be called for each peer
  253. * @arg : argument need to be passed to func
  254. * @mod_id : module_id
  255. *
  256. * Return: void
  257. */
  258. static inline void
  259. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  260. enum dp_mod_id mod_id)
  261. {
  262. struct dp_pdev *pdev;
  263. int i;
  264. if (!soc)
  265. return;
  266. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  267. pdev = soc->pdev_list[i];
  268. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  269. }
  270. }
  271. /**
  272. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  273. *
  274. * This API will cache the peers in local allocated memory and calls
  275. * iterate function outside the lock.
  276. *
  277. * As this API is allocating new memory it is suggested to use this
  278. * only when lock cannot be held
  279. *
  280. * @vdev : DP vdev context
  281. * @func : function to be called for each peer
  282. * @arg : argument need to be passed to func
  283. * @mod_id : module_id
  284. *
  285. * Return: void
  286. */
  287. static inline void
  288. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  289. dp_peer_iter_func *func,
  290. void *arg,
  291. enum dp_mod_id mod_id)
  292. {
  293. struct dp_peer *peer;
  294. struct dp_peer *tmp_peer;
  295. struct dp_soc *soc = NULL;
  296. struct dp_peer **peer_array = NULL;
  297. int i = 0;
  298. uint32_t num_peers = 0;
  299. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  300. return;
  301. num_peers = vdev->num_peers;
  302. soc = vdev->pdev->soc;
  303. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  304. if (!peer_array)
  305. return;
  306. qdf_spin_lock_bh(&vdev->peer_list_lock);
  307. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  308. peer_list_elem,
  309. tmp_peer) {
  310. if (i >= num_peers)
  311. break;
  312. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  313. peer_array[i] = peer;
  314. i = (i + 1);
  315. }
  316. }
  317. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  318. for (i = 0; i < num_peers; i++) {
  319. peer = peer_array[i];
  320. if (!peer)
  321. continue;
  322. (*func)(soc, peer, arg);
  323. dp_peer_unref_delete(peer, mod_id);
  324. }
  325. qdf_mem_free(peer_array);
  326. }
  327. /**
  328. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  329. *
  330. * This API will cache the peers in local allocated memory and calls
  331. * iterate function outside the lock.
  332. *
  333. * As this API is allocating new memory it is suggested to use this
  334. * only when lock cannot be held
  335. *
  336. * @pdev : DP pdev context
  337. * @func : function to be called for each peer
  338. * @arg : argument need to be passed to func
  339. * @mod_id : module_id
  340. *
  341. * Return: void
  342. */
  343. static inline void
  344. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  345. dp_peer_iter_func *func,
  346. void *arg,
  347. enum dp_mod_id mod_id)
  348. {
  349. struct dp_peer *peer;
  350. struct dp_peer *tmp_peer;
  351. struct dp_soc *soc = NULL;
  352. struct dp_vdev *vdev = NULL;
  353. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  354. int i = 0;
  355. int j = 0;
  356. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  357. if (!pdev || !pdev->soc)
  358. return;
  359. soc = pdev->soc;
  360. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  361. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  362. num_peers[i] = vdev->num_peers;
  363. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  364. sizeof(struct dp_peer *));
  365. if (!peer_array[i])
  366. break;
  367. qdf_spin_lock_bh(&vdev->peer_list_lock);
  368. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  369. peer_list_elem,
  370. tmp_peer) {
  371. if (j >= num_peers[i])
  372. break;
  373. if (dp_peer_get_ref(soc, peer, mod_id) ==
  374. QDF_STATUS_SUCCESS) {
  375. peer_array[i][j] = peer;
  376. j = (j + 1);
  377. }
  378. }
  379. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  380. i = (i + 1);
  381. }
  382. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  383. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  384. if (!peer_array[i])
  385. break;
  386. for (j = 0; j < num_peers[i]; j++) {
  387. peer = peer_array[i][j];
  388. if (!peer)
  389. continue;
  390. (*func)(soc, peer, arg);
  391. dp_peer_unref_delete(peer, mod_id);
  392. }
  393. qdf_mem_free(peer_array[i]);
  394. }
  395. }
  396. /**
  397. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  398. *
  399. * This API will cache the peers in local allocated memory and calls
  400. * iterate function outside the lock.
  401. *
  402. * As this API is allocating new memory it is suggested to use this
  403. * only when lock cannot be held
  404. *
  405. * @soc : DP soc context
  406. * @func : function to be called for each peer
  407. * @arg : argument need to be passed to func
  408. * @mod_id : module_id
  409. *
  410. * Return: void
  411. */
  412. static inline void
  413. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  414. dp_peer_iter_func *func,
  415. void *arg,
  416. enum dp_mod_id mod_id)
  417. {
  418. struct dp_pdev *pdev;
  419. int i;
  420. if (!soc)
  421. return;
  422. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  423. pdev = soc->pdev_list[i];
  424. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  425. }
  426. }
  427. #ifdef DP_PEER_STATE_DEBUG
  428. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  429. do { \
  430. if (!(_condition)) { \
  431. dp_alert("Invalid state shift from %u to %u peer " \
  432. QDF_MAC_ADDR_FMT, \
  433. (_peer)->peer_state, (_new_state), \
  434. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  435. QDF_ASSERT(0); \
  436. } \
  437. } while (0)
  438. #else
  439. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  440. do { \
  441. if (!(_condition)) { \
  442. dp_alert("Invalid state shift from %u to %u peer " \
  443. QDF_MAC_ADDR_FMT, \
  444. (_peer)->peer_state, (_new_state), \
  445. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  446. } \
  447. } while (0)
  448. #endif
  449. /**
  450. * dp_peer_state_cmp() - compare dp peer state
  451. *
  452. * @peer : DP peer
  453. * @state : state
  454. *
  455. * Return: true if state matches with peer state
  456. * false if it does not match
  457. */
  458. static inline bool
  459. dp_peer_state_cmp(struct dp_peer *peer,
  460. enum dp_peer_state state)
  461. {
  462. bool is_status_equal = false;
  463. qdf_spin_lock_bh(&peer->peer_state_lock);
  464. is_status_equal = (peer->peer_state == state);
  465. qdf_spin_unlock_bh(&peer->peer_state_lock);
  466. return is_status_equal;
  467. }
  468. void dp_print_ast_stats(struct dp_soc *soc);
  469. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  470. uint16_t hw_peer_id, uint8_t vdev_id,
  471. uint8_t *peer_mac_addr, uint16_t ast_hash,
  472. uint8_t is_wds);
  473. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  474. uint8_t vdev_id, uint8_t *peer_mac_addr,
  475. uint8_t is_wds, uint32_t free_wds_count);
  476. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  477. /**
  478. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  479. * @soc - dp soc pointer
  480. * @vdev_id - vdev id
  481. * @peer_mac_addr - mac address of the peer
  482. *
  483. * This function resets the roamed peer auth status and mac address
  484. * after peer map indication of same peer is received from firmware.
  485. *
  486. * Return: None
  487. */
  488. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  489. uint8_t *peer_mac_addr);
  490. #else
  491. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  492. uint8_t *peer_mac_addr)
  493. {
  494. }
  495. #endif
  496. #ifdef WLAN_FEATURE_11BE_MLO
  497. /**
  498. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  499. * @soc_handle - generic soc handle
  500. * @peer_id - ML peer_id from firmware
  501. * @peer_mac_addr - mac address of the peer
  502. * @mlo_ast_flow_info: MLO AST flow info
  503. * @mlo_link_info - MLO link info
  504. *
  505. * associate the ML peer_id that firmware provided with peer entry
  506. * and update the ast table in the host with the hw_peer_id.
  507. *
  508. * Return: QDF_STATUS code
  509. */
  510. QDF_STATUS
  511. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  512. uint8_t *peer_mac_addr,
  513. struct dp_mlo_flow_override_info *mlo_flow_info,
  514. struct dp_mlo_link_info *mlo_link_info);
  515. /**
  516. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  517. * @soc_handle - generic soc handle
  518. * @peeri_id - peer_id from firmware
  519. *
  520. * Return: none
  521. */
  522. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  523. #endif
  524. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  525. enum cdp_sec_type sec_type, int is_unicast,
  526. u_int32_t *michael_key, u_int32_t *rx_pn);
  527. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  528. uint8_t tid, uint16_t win_sz);
  529. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  530. uint16_t peer_id, uint8_t *peer_mac);
  531. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  532. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  533. uint32_t flags);
  534. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  535. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  536. struct dp_ast_entry *ast_entry);
  537. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  538. struct dp_ast_entry *ast_entry, uint32_t flags);
  539. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  540. uint8_t *ast_mac_addr,
  541. uint8_t pdev_id);
  542. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  543. uint8_t *ast_mac_addr,
  544. uint8_t vdev_id);
  545. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  546. uint8_t *ast_mac_addr);
  547. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  548. struct dp_ast_entry *ast_entry);
  549. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  550. struct dp_ast_entry *ast_entry);
  551. void dp_peer_ast_set_type(struct dp_soc *soc,
  552. struct dp_ast_entry *ast_entry,
  553. enum cdp_txrx_ast_entry_type type);
  554. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  555. struct dp_ast_entry *ast_entry,
  556. struct dp_peer *peer);
  557. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  558. void dp_peer_ast_send_multi_wds_del(
  559. struct dp_soc *soc, uint8_t vdev_id,
  560. struct peer_del_multi_wds_entries *wds_list);
  561. #endif
  562. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  563. struct cdp_soc *dp_soc,
  564. void *cookie,
  565. enum cdp_ast_free_status status);
  566. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  567. struct dp_ast_entry *ase);
  568. void dp_peer_free_ast_entry(struct dp_soc *soc,
  569. struct dp_ast_entry *ast_entry);
  570. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  571. struct dp_ast_entry *ast_entry,
  572. struct dp_peer *peer);
  573. /**
  574. * dp_peer_mec_detach_entry() - Detach the MEC entry
  575. * @soc: SoC handle
  576. * @mecentry: MEC entry of the node
  577. * @ptr: pointer to free list
  578. *
  579. * The MEC entry is detached from MEC table and added to free_list
  580. * to free the object outside lock
  581. *
  582. * Return: None
  583. */
  584. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  585. void *ptr);
  586. /**
  587. * dp_peer_mec_free_list() - free the MEC entry from free_list
  588. * @soc: SoC handle
  589. * @ptr: pointer to free list
  590. *
  591. * Return: None
  592. */
  593. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  594. /**
  595. * dp_peer_mec_add_entry()
  596. * @soc: SoC handle
  597. * @vdev: vdev to which mec node belongs
  598. * @mac_addr: MAC address of mec node
  599. *
  600. * This function allocates and adds MEC entry to MEC table.
  601. * It assumes caller has taken the mec lock to protect the access to these
  602. * tables
  603. *
  604. * Return: QDF_STATUS
  605. */
  606. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  607. struct dp_vdev *vdev,
  608. uint8_t *mac_addr);
  609. /**
  610. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  611. * within pdev
  612. * @soc: SoC handle
  613. *
  614. * It assumes caller has taken the mec_lock to protect the access to
  615. * MEC hash table
  616. *
  617. * Return: MEC entry
  618. */
  619. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  620. uint8_t pdev_id,
  621. uint8_t *mec_mac_addr);
  622. #define DP_AST_ASSERT(_condition) \
  623. do { \
  624. if (!(_condition)) { \
  625. dp_print_ast_stats(soc);\
  626. QDF_BUG(_condition); \
  627. } \
  628. } while (0)
  629. /**
  630. * dp_peer_update_inactive_time - Update inactive time for peer
  631. * @pdev: pdev object
  632. * @tag_type: htt_tlv_tag type
  633. * #tag_buf: buf message
  634. */
  635. void
  636. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  637. uint32_t *tag_buf);
  638. #ifndef QCA_MULTIPASS_SUPPORT
  639. /**
  640. * dp_peer_set_vlan_id: set vlan_id for this peer
  641. * @cdp_soc: soc handle
  642. * @vdev_id: id of vdev object
  643. * @peer_mac: mac address
  644. * @vlan_id: vlan id for peer
  645. *
  646. * return: void
  647. */
  648. static inline
  649. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  650. uint8_t vdev_id, uint8_t *peer_mac,
  651. uint16_t vlan_id)
  652. {
  653. }
  654. /**
  655. * dp_set_vlan_groupkey: set vlan map for vdev
  656. * @soc: pointer to soc
  657. * @vdev_id: id of vdev handle
  658. * @vlan_id: vlan_id
  659. * @group_key: group key for vlan
  660. *
  661. * return: set success/failure
  662. */
  663. static inline
  664. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  665. uint16_t vlan_id, uint16_t group_key)
  666. {
  667. return QDF_STATUS_SUCCESS;
  668. }
  669. /**
  670. * dp_peer_multipass_list_init: initialize multipass peer list
  671. * @vdev: pointer to vdev
  672. *
  673. * return: void
  674. */
  675. static inline
  676. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  677. {
  678. }
  679. /**
  680. * dp_peer_multipass_list_remove: remove peer from special peer list
  681. * @peer: peer handle
  682. *
  683. * return: void
  684. */
  685. static inline
  686. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  687. {
  688. }
  689. #else
  690. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  691. uint8_t vdev_id, uint8_t *peer_mac,
  692. uint16_t vlan_id);
  693. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  694. uint16_t vlan_id, uint16_t group_key);
  695. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  696. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  697. #endif
  698. #ifndef QCA_PEER_MULTIQ_SUPPORT
  699. /**
  700. * dp_peer_reset_flowq_map() - reset peer flowq map table
  701. * @peer - dp peer handle
  702. *
  703. * Return: none
  704. */
  705. static inline
  706. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  707. {
  708. }
  709. /**
  710. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  711. * @soc - generic soc handle
  712. * @is_wds - flag to indicate if peer is wds
  713. * @peer_id - peer_id from htt peer map message
  714. * @peer_mac_addr - mac address of the peer
  715. * @ast_info - ast flow override information from peer map
  716. *
  717. * Return: none
  718. */
  719. static inline
  720. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  721. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  722. struct dp_ast_flow_override_info *ast_info)
  723. {
  724. }
  725. #else
  726. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  727. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  728. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  729. struct dp_ast_flow_override_info *ast_info);
  730. #endif
  731. /*
  732. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  733. * after deleting the entries (ie., setting valid=0)
  734. *
  735. * @soc: DP SOC handle
  736. * @cb_ctxt: Callback context
  737. * @reo_status: REO command status
  738. */
  739. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  740. void *cb_ctxt,
  741. union hal_reo_status *reo_status);
  742. #ifdef QCA_PEER_EXT_STATS
  743. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  744. struct dp_txrx_peer *txrx_peer);
  745. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  746. struct dp_txrx_peer *txrx_peer);
  747. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  748. #else
  749. static inline
  750. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  751. struct dp_txrx_peer *txrx_peer)
  752. {
  753. return QDF_STATUS_SUCCESS;
  754. }
  755. static inline
  756. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  757. struct dp_txrx_peer *txrx_peer)
  758. {
  759. }
  760. static inline
  761. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  762. {
  763. }
  764. #endif
  765. #ifdef WLAN_PEER_JITTER
  766. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  767. struct dp_txrx_peer *txrx_peer);
  768. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  769. struct dp_txrx_peer *txrx_peer);
  770. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  771. #else
  772. static inline
  773. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  774. struct dp_txrx_peer *txrx_peer)
  775. {
  776. return QDF_STATUS_SUCCESS;
  777. }
  778. static inline
  779. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  780. struct dp_txrx_peer *txrx_peer)
  781. {
  782. }
  783. static inline
  784. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  785. {
  786. }
  787. #endif
  788. #ifndef CONFIG_SAWF_DEF_QUEUES
  789. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  790. struct dp_peer *peer)
  791. {
  792. return QDF_STATUS_SUCCESS;
  793. }
  794. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  795. struct dp_peer *peer)
  796. {
  797. return QDF_STATUS_SUCCESS;
  798. }
  799. #endif
  800. #ifndef CONFIG_SAWF
  801. static inline
  802. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  803. struct dp_txrx_peer *txrx_peer)
  804. {
  805. return QDF_STATUS_SUCCESS;
  806. }
  807. static inline
  808. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  809. struct dp_txrx_peer *txrx_peer)
  810. {
  811. return QDF_STATUS_SUCCESS;
  812. }
  813. #endif
  814. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  815. struct dp_vdev *vdev,
  816. enum dp_mod_id mod_id);
  817. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  818. struct dp_vdev *vdev,
  819. enum dp_mod_id mod_id);
  820. void dp_peer_ast_table_detach(struct dp_soc *soc);
  821. void dp_peer_find_map_detach(struct dp_soc *soc);
  822. void dp_soc_wds_detach(struct dp_soc *soc);
  823. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  824. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  825. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  826. void dp_soc_wds_attach(struct dp_soc *soc);
  827. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  828. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  829. #ifdef FEATURE_AST
  830. /*
  831. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  832. * @soc - datapath soc handle
  833. * @peer - datapath peer handle
  834. *
  835. * Delete the AST entries belonging to a peer
  836. */
  837. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  838. struct dp_peer *peer)
  839. {
  840. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  841. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  842. /*
  843. * Delete peer self ast entry. This is done to handle scenarios
  844. * where peer is freed before peer map is received(for ex in case
  845. * of auth disallow due to ACL) in such cases self ast is not added
  846. * to peer->ast_list.
  847. */
  848. if (peer->self_ast_entry) {
  849. dp_peer_del_ast(soc, peer->self_ast_entry);
  850. peer->self_ast_entry = NULL;
  851. }
  852. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  853. dp_peer_del_ast(soc, ast_entry);
  854. }
  855. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  856. void *arg);
  857. #else
  858. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  859. struct dp_peer *peer, void *arg)
  860. {
  861. }
  862. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  863. struct dp_peer *peer)
  864. {
  865. }
  866. #endif
  867. #ifdef FEATURE_MEC
  868. /**
  869. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  870. * @soc: SoC handle
  871. *
  872. * Return: none
  873. */
  874. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  875. /**
  876. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  877. * @soc: SoC handle
  878. *
  879. * Return: none
  880. */
  881. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  882. /**
  883. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  884. * @soc: Datapath SOC
  885. *
  886. * Return: None
  887. */
  888. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  889. #else
  890. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  891. {
  892. }
  893. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  894. {
  895. }
  896. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  897. {
  898. }
  899. #endif
  900. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  901. /**
  902. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  903. * @soc : dp_soc handle
  904. * @peer: peer
  905. *
  906. * This function is used to send cache flush cmd to reo and
  907. * to register the callback to handle the dumping of the reo
  908. * queue stas from DDR
  909. *
  910. * Return: none
  911. */
  912. void dp_send_cache_flush_for_rx_tid(
  913. struct dp_soc *soc, struct dp_peer *peer);
  914. /**
  915. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  916. * @soc : cdp_soc_t handle
  917. * @vdev_id: vdev id
  918. *
  919. * Handler to get rx tid info from DDR after h/w cache is
  920. * invalidated first using the cache flush cmd.
  921. *
  922. * Return: none
  923. */
  924. void dp_get_rx_reo_queue_info(
  925. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  926. /**
  927. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  928. * @soc : dp_soc handle
  929. * @cb_ctxt - callback context
  930. * @reo_status: vdev id
  931. *
  932. * This is the callback function registered after sending the reo cmd
  933. * to flush the h/w cache and invalidate it. In the callback the reo
  934. * queue desc info is dumped from DDR.
  935. *
  936. * Return: none
  937. */
  938. void dp_dump_rx_reo_queue_info(
  939. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  940. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  941. static inline void dp_get_rx_reo_queue_info(
  942. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  943. {
  944. }
  945. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  946. static inline int dp_peer_find_mac_addr_cmp(
  947. union dp_align_mac_addr *mac_addr1,
  948. union dp_align_mac_addr *mac_addr2)
  949. {
  950. /*
  951. * Intentionally use & rather than &&.
  952. * because the operands are binary rather than generic boolean,
  953. * the functionality is equivalent.
  954. * Using && has the advantage of short-circuited evaluation,
  955. * but using & has the advantage of no conditional branching,
  956. * which is a more significant benefit.
  957. */
  958. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  959. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  960. }
  961. /**
  962. * dp_peer_delete() - delete DP peer
  963. *
  964. * @soc: Datatpath soc
  965. * @peer: Datapath peer
  966. * @arg: argument to iter function
  967. *
  968. * Return: void
  969. */
  970. void dp_peer_delete(struct dp_soc *soc,
  971. struct dp_peer *peer,
  972. void *arg);
  973. /**
  974. * dp_mlo_peer_delete() - delete MLO DP peer
  975. *
  976. * @soc: Datapath soc
  977. * @peer: Datapath peer
  978. * @arg: argument to iter function
  979. *
  980. * Return: void
  981. */
  982. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  983. #ifdef WLAN_FEATURE_11BE_MLO
  984. /* is MLO connection mld peer */
  985. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  986. /* set peer type */
  987. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  988. ((_peer)->peer_type = (_type_val))
  989. /* is legacy peer */
  990. #define IS_DP_LEGACY_PEER(_peer) \
  991. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  992. /* is MLO connection link peer */
  993. #define IS_MLO_DP_LINK_PEER(_peer) \
  994. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  995. /* is MLO connection mld peer */
  996. #define IS_MLO_DP_MLD_PEER(_peer) \
  997. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  998. /* Get Mld peer from link peer */
  999. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1000. ((link_peer)->mld_peer)
  1001. #ifdef WLAN_MLO_MULTI_CHIP
  1002. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1003. struct dp_peer *
  1004. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1005. uint8_t *peer_mac_addr,
  1006. int mac_addr_is_aligned,
  1007. uint8_t vdev_id,
  1008. uint8_t chip_id,
  1009. enum dp_mod_id mod_id);
  1010. #else
  1011. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1012. {
  1013. return 0;
  1014. }
  1015. static inline struct dp_peer *
  1016. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1017. uint8_t *peer_mac_addr,
  1018. int mac_addr_is_aligned,
  1019. uint8_t vdev_id,
  1020. uint8_t chip_id,
  1021. enum dp_mod_id mod_id)
  1022. {
  1023. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1024. mac_addr_is_aligned,
  1025. vdev_id, mod_id);
  1026. }
  1027. #endif
  1028. /*
  1029. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1030. * matching mac_address
  1031. * @soc: soc handle
  1032. * @peer_mac_addr: mld peer mac address
  1033. * @mac_addr_is_aligned: is mac addr aligned
  1034. * @vdev_id: vdev_id
  1035. * @mod_id: id of module requesting reference
  1036. *
  1037. * return: peer in sucsess
  1038. * NULL in failure
  1039. */
  1040. static inline
  1041. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1042. uint8_t *peer_mac_addr,
  1043. int mac_addr_is_aligned,
  1044. uint8_t vdev_id,
  1045. enum dp_mod_id mod_id)
  1046. {
  1047. if (soc->arch_ops.mlo_peer_find_hash_find)
  1048. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1049. peer_mac_addr,
  1050. mac_addr_is_aligned,
  1051. mod_id, vdev_id);
  1052. return NULL;
  1053. }
  1054. /**
  1055. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1056. peer_type
  1057. * @soc: DP SOC handle
  1058. * @peer_info: peer information for hash find
  1059. * @mod_id: ID of module requesting reference
  1060. *
  1061. * Return: peer handle
  1062. */
  1063. static inline
  1064. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1065. struct cdp_peer_info *peer_info,
  1066. enum dp_mod_id mod_id)
  1067. {
  1068. struct dp_peer *peer = NULL;
  1069. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1070. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1071. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1072. peer_info->mac_addr_is_aligned,
  1073. peer_info->vdev_id,
  1074. mod_id);
  1075. if (peer)
  1076. return peer;
  1077. }
  1078. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1079. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1080. peer = dp_mld_peer_find_hash_find(
  1081. soc, peer_info->mac_addr,
  1082. peer_info->mac_addr_is_aligned,
  1083. peer_info->vdev_id,
  1084. mod_id);
  1085. return peer;
  1086. }
  1087. /**
  1088. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1089. increase mld peer ref_cnt
  1090. * @link_peer: link peer pointer
  1091. * @mld_peer: mld peer pointer
  1092. *
  1093. * Return: none
  1094. */
  1095. static inline
  1096. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1097. struct dp_peer *mld_peer)
  1098. {
  1099. /* increase mld_peer ref_cnt */
  1100. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1101. link_peer->mld_peer = mld_peer;
  1102. }
  1103. /**
  1104. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1105. decrease mld peer ref_cnt
  1106. * @link_peer: link peer pointer
  1107. *
  1108. * Return: None
  1109. */
  1110. static inline
  1111. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1112. {
  1113. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1114. link_peer->mld_peer = NULL;
  1115. }
  1116. /**
  1117. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1118. * @mld_peer: mld peer pointer
  1119. *
  1120. * Return: None
  1121. */
  1122. static inline
  1123. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1124. {
  1125. int i;
  1126. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1127. mld_peer->num_links = 0;
  1128. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1129. mld_peer->link_peers[i].is_valid = false;
  1130. }
  1131. /**
  1132. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1133. * @mld_peer: mld peer pointer
  1134. *
  1135. * Return: None
  1136. */
  1137. static inline
  1138. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1139. {
  1140. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1141. }
  1142. /**
  1143. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1144. * @mld_peer: mld dp peer pointer
  1145. * @link_peer: link dp peer pointer
  1146. *
  1147. * Return: None
  1148. */
  1149. static inline
  1150. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1151. struct dp_peer *link_peer)
  1152. {
  1153. int i;
  1154. struct dp_peer_link_info *link_peer_info;
  1155. bool action_done = false;
  1156. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1157. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1158. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1159. link_peer_info = &mld_peer->link_peers[i];
  1160. if (!link_peer_info->is_valid) {
  1161. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1162. link_peer->mac_addr.raw,
  1163. QDF_MAC_ADDR_SIZE);
  1164. link_peer_info->is_valid = true;
  1165. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1166. link_peer_info->chip_id =
  1167. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1168. mld_peer->num_links++;
  1169. action_done = true;
  1170. break;
  1171. }
  1172. }
  1173. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1174. if (i == DP_MAX_MLO_LINKS)
  1175. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1176. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1177. else
  1178. dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1179. "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1180. "idx %u num_links %u",
  1181. action_done ? "Successful" : "Failed",
  1182. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1183. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1184. i, mld_peer->num_links);
  1185. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
  1186. mld_peer, link_peer, i,
  1187. action_done ? 1 : 0);
  1188. }
  1189. /**
  1190. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1191. * @mld_peer: MLD dp peer pointer
  1192. * @link_peer: link dp peer pointer
  1193. *
  1194. * Return: number of links left after deletion
  1195. */
  1196. static inline
  1197. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1198. struct dp_peer *link_peer)
  1199. {
  1200. int i;
  1201. struct dp_peer_link_info *link_peer_info;
  1202. uint8_t num_links;
  1203. bool action_done = false;
  1204. struct dp_soc *soc = mld_peer->vdev->pdev->soc;
  1205. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1206. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1207. link_peer_info = &mld_peer->link_peers[i];
  1208. if (link_peer_info->is_valid &&
  1209. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1210. &link_peer_info->mac_addr)) {
  1211. link_peer_info->is_valid = false;
  1212. mld_peer->num_links--;
  1213. action_done = true;
  1214. break;
  1215. }
  1216. }
  1217. num_links = mld_peer->num_links;
  1218. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1219. if (i == DP_MAX_MLO_LINKS)
  1220. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1221. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1222. else
  1223. dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
  1224. "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
  1225. "idx %u num_links %u",
  1226. action_done ? "Successful" : "Failed",
  1227. mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
  1228. link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
  1229. i, mld_peer->num_links);
  1230. dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
  1231. mld_peer, link_peer, i,
  1232. action_done ? 1 : 0);
  1233. return num_links;
  1234. }
  1235. /**
  1236. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1237. increase link peers ref_cnt
  1238. * @soc: dp_soc handle
  1239. * @mld_peer: dp mld peer pointer
  1240. * @mld_link_peers: structure that hold links peers pointer array and number
  1241. * @mod_id: id of module requesting reference
  1242. *
  1243. * Return: None
  1244. */
  1245. static inline
  1246. void dp_get_link_peers_ref_from_mld_peer(
  1247. struct dp_soc *soc,
  1248. struct dp_peer *mld_peer,
  1249. struct dp_mld_link_peers *mld_link_peers,
  1250. enum dp_mod_id mod_id)
  1251. {
  1252. struct dp_peer *peer;
  1253. uint8_t i = 0, j = 0;
  1254. struct dp_peer_link_info *link_peer_info;
  1255. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1256. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1257. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1258. link_peer_info = &mld_peer->link_peers[i];
  1259. if (link_peer_info->is_valid) {
  1260. peer = dp_link_peer_hash_find_by_chip_id(
  1261. soc,
  1262. link_peer_info->mac_addr.raw,
  1263. true,
  1264. link_peer_info->vdev_id,
  1265. link_peer_info->chip_id,
  1266. mod_id);
  1267. if (peer)
  1268. mld_link_peers->link_peers[j++] = peer;
  1269. }
  1270. }
  1271. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1272. mld_link_peers->num_links = j;
  1273. }
  1274. /**
  1275. * dp_release_link_peers_ref() - release all link peers reference
  1276. * @mld_link_peers: structure that hold links peers pointer array and number
  1277. * @mod_id: id of module requesting reference
  1278. *
  1279. * Return: None.
  1280. */
  1281. static inline
  1282. void dp_release_link_peers_ref(
  1283. struct dp_mld_link_peers *mld_link_peers,
  1284. enum dp_mod_id mod_id)
  1285. {
  1286. struct dp_peer *peer;
  1287. uint8_t i;
  1288. for (i = 0; i < mld_link_peers->num_links; i++) {
  1289. peer = mld_link_peers->link_peers[i];
  1290. if (peer)
  1291. dp_peer_unref_delete(peer, mod_id);
  1292. mld_link_peers->link_peers[i] = NULL;
  1293. }
  1294. mld_link_peers->num_links = 0;
  1295. }
  1296. /**
  1297. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1298. * @soc: Datapath soc handle
  1299. * @peer_id: peer id
  1300. * @lmac_id: lmac id to find the link peer on given lmac
  1301. *
  1302. * Return: peer_id of link peer if found
  1303. * else return HTT_INVALID_PEER
  1304. */
  1305. static inline
  1306. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1307. uint8_t lmac_id)
  1308. {
  1309. uint8_t i;
  1310. struct dp_peer *peer;
  1311. struct dp_peer *link_peer;
  1312. struct dp_soc *link_peer_soc;
  1313. struct dp_mld_link_peers link_peers_info;
  1314. uint16_t link_peer_id = HTT_INVALID_PEER;
  1315. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1316. if (!peer)
  1317. return HTT_INVALID_PEER;
  1318. if (IS_MLO_DP_MLD_PEER(peer)) {
  1319. /* get link peers with reference */
  1320. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1321. DP_MOD_ID_CDP);
  1322. for (i = 0; i < link_peers_info.num_links; i++) {
  1323. link_peer = link_peers_info.link_peers[i];
  1324. link_peer_soc = link_peer->vdev->pdev->soc;
  1325. if ((link_peer_soc == soc) &&
  1326. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1327. link_peer_id = link_peer->peer_id;
  1328. break;
  1329. }
  1330. }
  1331. /* release link peers reference */
  1332. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1333. } else {
  1334. link_peer_id = peer_id;
  1335. }
  1336. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1337. return link_peer_id;
  1338. }
  1339. /**
  1340. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1341. * @soc: soc handle
  1342. * @peer_mac_addr: peer mac address
  1343. * @mac_addr_is_aligned: is mac addr aligned
  1344. * @vdev_id: vdev_id
  1345. * @mod_id: id of module requesting reference
  1346. *
  1347. * for MLO connection, get corresponding MLD peer,
  1348. * otherwise get link peer for non-MLO case.
  1349. *
  1350. * return: peer in success
  1351. * NULL in failure
  1352. */
  1353. static inline
  1354. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1355. uint8_t *peer_mac,
  1356. int mac_addr_is_aligned,
  1357. uint8_t vdev_id,
  1358. enum dp_mod_id mod_id)
  1359. {
  1360. struct dp_peer *ta_peer = NULL;
  1361. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1362. peer_mac, 0, vdev_id,
  1363. mod_id);
  1364. if (peer) {
  1365. /* mlo connection link peer, get mld peer with reference */
  1366. if (IS_MLO_DP_LINK_PEER(peer)) {
  1367. /* increase mld peer ref_cnt */
  1368. if (QDF_STATUS_SUCCESS ==
  1369. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1370. ta_peer = peer->mld_peer;
  1371. else
  1372. ta_peer = NULL;
  1373. /* release peer reference that added by hash find */
  1374. dp_peer_unref_delete(peer, mod_id);
  1375. } else {
  1376. /* mlo MLD peer or non-mlo link peer */
  1377. ta_peer = peer;
  1378. }
  1379. } else {
  1380. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1381. QDF_MAC_ADDR_REF(peer_mac));
  1382. }
  1383. return ta_peer;
  1384. }
  1385. /**
  1386. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1387. * @soc : core DP soc context
  1388. * @peer_id : peer id from peer object can be retrieved
  1389. * @mod_id : ID of module requesting reference
  1390. *
  1391. * for MLO connection, get corresponding MLD peer,
  1392. * otherwise get link peer for non-MLO case.
  1393. *
  1394. * return: peer in success
  1395. * NULL in failure
  1396. */
  1397. static inline
  1398. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1399. uint16_t peer_id,
  1400. enum dp_mod_id mod_id)
  1401. {
  1402. struct dp_peer *ta_peer = NULL;
  1403. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1404. if (peer) {
  1405. /* mlo connection link peer, get mld peer with reference */
  1406. if (IS_MLO_DP_LINK_PEER(peer)) {
  1407. /* increase mld peer ref_cnt */
  1408. if (QDF_STATUS_SUCCESS ==
  1409. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1410. ta_peer = peer->mld_peer;
  1411. else
  1412. ta_peer = NULL;
  1413. /* release peer reference that added by hash find */
  1414. dp_peer_unref_delete(peer, mod_id);
  1415. } else {
  1416. /* mlo MLD peer or non-mlo link peer */
  1417. ta_peer = peer;
  1418. }
  1419. }
  1420. return ta_peer;
  1421. }
  1422. /**
  1423. * dp_peer_mlo_delete() - peer MLO related delete operation
  1424. * @peer: DP peer handle
  1425. * Return: None
  1426. */
  1427. static inline
  1428. void dp_peer_mlo_delete(struct dp_peer *peer)
  1429. {
  1430. struct dp_peer *ml_peer;
  1431. struct dp_soc *soc;
  1432. dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
  1433. QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
  1434. /* MLO connection link peer */
  1435. if (IS_MLO_DP_LINK_PEER(peer)) {
  1436. ml_peer = peer->mld_peer;
  1437. soc = ml_peer->vdev->pdev->soc;
  1438. /* if last link peer deletion, delete MLD peer */
  1439. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1440. dp_peer_delete(soc, peer->mld_peer, NULL);
  1441. }
  1442. }
  1443. /**
  1444. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1445. * @soc: Soc handle
  1446. * @vdev_id: Vdev ID
  1447. * @peer_setup_info: peer setup information for MLO
  1448. */
  1449. QDF_STATUS dp_peer_mlo_setup(
  1450. struct dp_soc *soc,
  1451. struct dp_peer *peer,
  1452. uint8_t vdev_id,
  1453. struct cdp_peer_setup_info *setup_info);
  1454. /**
  1455. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1456. * @peer: datapath peer
  1457. *
  1458. * Return: MLD peer in case of MLO Link peer
  1459. * Peer itself in other cases
  1460. */
  1461. static inline
  1462. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1463. {
  1464. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1465. }
  1466. /**
  1467. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1468. * peer id
  1469. * @soc: core DP soc context
  1470. * @peer_id: peer id
  1471. * @mod_id: ID of module requesting reference
  1472. *
  1473. * Return: primary link peer for the MLO peer
  1474. * legacy peer itself in case of legacy peer
  1475. */
  1476. static inline
  1477. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1478. uint16_t peer_id,
  1479. enum dp_mod_id mod_id)
  1480. {
  1481. uint8_t i;
  1482. struct dp_mld_link_peers link_peers_info;
  1483. struct dp_peer *peer;
  1484. struct dp_peer *link_peer;
  1485. struct dp_peer *primary_peer = NULL;
  1486. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1487. if (!peer)
  1488. return NULL;
  1489. if (IS_MLO_DP_MLD_PEER(peer)) {
  1490. /* get link peers with reference */
  1491. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1492. mod_id);
  1493. for (i = 0; i < link_peers_info.num_links; i++) {
  1494. link_peer = link_peers_info.link_peers[i];
  1495. if (link_peer->primary_link) {
  1496. primary_peer = link_peer;
  1497. /*
  1498. * Take additional reference over
  1499. * primary link peer.
  1500. */
  1501. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1502. break;
  1503. }
  1504. }
  1505. /* release link peers reference */
  1506. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1507. dp_peer_unref_delete(peer, mod_id);
  1508. } else {
  1509. primary_peer = peer;
  1510. }
  1511. return primary_peer;
  1512. }
  1513. /**
  1514. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1515. * @peer: Datapath peer
  1516. *
  1517. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1518. * dp_txrx_peer from peer itself for other cases
  1519. */
  1520. static inline
  1521. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1522. {
  1523. return IS_MLO_DP_LINK_PEER(peer) ?
  1524. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1525. }
  1526. /**
  1527. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1528. * @peer: Datapath peer
  1529. *
  1530. * Return: true if peer is primary link peer or legacy peer
  1531. * false otherwise
  1532. */
  1533. static inline
  1534. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1535. {
  1536. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1537. return true;
  1538. else if (IS_DP_LEGACY_PEER(peer))
  1539. return true;
  1540. else
  1541. return false;
  1542. }
  1543. /**
  1544. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1545. *
  1546. * @soc : core DP soc context
  1547. * @peer_id : peer id from peer object can be retrieved
  1548. * @handle : reference handle
  1549. * @mod_id : ID of module requesting reference
  1550. *
  1551. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1552. */
  1553. static inline struct dp_txrx_peer *
  1554. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1555. uint16_t peer_id,
  1556. dp_txrx_ref_handle *handle,
  1557. enum dp_mod_id mod_id)
  1558. {
  1559. struct dp_peer *peer;
  1560. struct dp_txrx_peer *txrx_peer;
  1561. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1562. if (!peer)
  1563. return NULL;
  1564. txrx_peer = dp_get_txrx_peer(peer);
  1565. if (txrx_peer) {
  1566. *handle = (dp_txrx_ref_handle)peer;
  1567. return txrx_peer;
  1568. }
  1569. dp_peer_unref_delete(peer, mod_id);
  1570. return NULL;
  1571. }
  1572. /**
  1573. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1574. *
  1575. * @soc : core DP soc context
  1576. *
  1577. * Return: void
  1578. */
  1579. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1580. #else
  1581. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1582. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1583. /* is legacy peer */
  1584. #define IS_DP_LEGACY_PEER(_peer) true
  1585. #define IS_MLO_DP_LINK_PEER(_peer) false
  1586. #define IS_MLO_DP_MLD_PEER(_peer) false
  1587. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1588. static inline
  1589. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1590. struct cdp_peer_info *peer_info,
  1591. enum dp_mod_id mod_id)
  1592. {
  1593. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1594. peer_info->mac_addr_is_aligned,
  1595. peer_info->vdev_id,
  1596. mod_id);
  1597. }
  1598. static inline
  1599. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1600. uint8_t *peer_mac,
  1601. int mac_addr_is_aligned,
  1602. uint8_t vdev_id,
  1603. enum dp_mod_id mod_id)
  1604. {
  1605. return dp_peer_find_hash_find(soc, peer_mac,
  1606. mac_addr_is_aligned, vdev_id,
  1607. mod_id);
  1608. }
  1609. static inline
  1610. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1611. uint16_t peer_id,
  1612. enum dp_mod_id mod_id)
  1613. {
  1614. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1615. }
  1616. static inline
  1617. QDF_STATUS dp_peer_mlo_setup(
  1618. struct dp_soc *soc,
  1619. struct dp_peer *peer,
  1620. uint8_t vdev_id,
  1621. struct cdp_peer_setup_info *setup_info)
  1622. {
  1623. return QDF_STATUS_SUCCESS;
  1624. }
  1625. static inline
  1626. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1627. {
  1628. }
  1629. static inline
  1630. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1631. {
  1632. }
  1633. static inline
  1634. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1635. {
  1636. }
  1637. static inline
  1638. void dp_peer_mlo_delete(struct dp_peer *peer)
  1639. {
  1640. }
  1641. static inline
  1642. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1643. struct dp_peer *link_peer)
  1644. {
  1645. }
  1646. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1647. {
  1648. return 0;
  1649. }
  1650. static inline struct dp_peer *
  1651. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1652. uint8_t *peer_mac_addr,
  1653. int mac_addr_is_aligned,
  1654. uint8_t vdev_id,
  1655. uint8_t chip_id,
  1656. enum dp_mod_id mod_id)
  1657. {
  1658. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1659. mac_addr_is_aligned,
  1660. vdev_id, mod_id);
  1661. }
  1662. static inline
  1663. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1664. {
  1665. return peer;
  1666. }
  1667. static inline
  1668. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1669. uint16_t peer_id,
  1670. enum dp_mod_id mod_id)
  1671. {
  1672. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1673. }
  1674. static inline
  1675. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1676. {
  1677. return peer->txrx_peer;
  1678. }
  1679. static inline
  1680. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1681. {
  1682. return true;
  1683. }
  1684. /**
  1685. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1686. *
  1687. * @soc : core DP soc context
  1688. * @peer_id : peer id from peer object can be retrieved
  1689. * @handle : reference handle
  1690. * @mod_id : ID of module requesting reference
  1691. *
  1692. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1693. */
  1694. static inline struct dp_txrx_peer *
  1695. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1696. uint16_t peer_id,
  1697. dp_txrx_ref_handle *handle,
  1698. enum dp_mod_id mod_id)
  1699. {
  1700. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1701. }
  1702. static inline
  1703. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1704. uint8_t lmac_id)
  1705. {
  1706. return peer_id;
  1707. }
  1708. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  1709. {
  1710. }
  1711. #endif /* WLAN_FEATURE_11BE_MLO */
  1712. static inline
  1713. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1714. {
  1715. uint8_t i;
  1716. if (IS_MLO_DP_MLD_PEER(peer)) {
  1717. dp_peer_info("skip for mld peer");
  1718. return QDF_STATUS_SUCCESS;
  1719. }
  1720. if (peer->rx_tid) {
  1721. QDF_BUG(0);
  1722. dp_peer_err("peer rx_tid mem already exist");
  1723. return QDF_STATUS_E_FAILURE;
  1724. }
  1725. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1726. sizeof(struct dp_rx_tid));
  1727. if (!peer->rx_tid) {
  1728. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1729. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1730. return QDF_STATUS_E_NOMEM;
  1731. }
  1732. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1733. for (i = 0; i < DP_MAX_TIDS; i++)
  1734. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1735. return QDF_STATUS_SUCCESS;
  1736. }
  1737. static inline
  1738. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1739. {
  1740. uint8_t i;
  1741. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1742. for (i = 0; i < DP_MAX_TIDS; i++)
  1743. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1744. qdf_mem_free(peer->rx_tid);
  1745. }
  1746. peer->rx_tid = NULL;
  1747. }
  1748. static inline
  1749. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1750. {
  1751. uint8_t i;
  1752. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1753. sizeof(struct dp_rx_tid_defrag));
  1754. for (i = 0; i < DP_MAX_TIDS; i++)
  1755. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1756. }
  1757. static inline
  1758. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1759. {
  1760. uint8_t i;
  1761. for (i = 0; i < DP_MAX_TIDS; i++)
  1762. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1763. }
  1764. #ifdef PEER_CACHE_RX_PKTS
  1765. static inline
  1766. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1767. {
  1768. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  1769. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  1770. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  1771. DP_RX_CACHED_BUFQ_THRESH);
  1772. }
  1773. static inline
  1774. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1775. {
  1776. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  1777. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  1778. }
  1779. #else
  1780. static inline
  1781. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1782. {
  1783. }
  1784. static inline
  1785. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1786. {
  1787. }
  1788. #endif
  1789. /**
  1790. * dp_peer_update_state() - update dp peer state
  1791. *
  1792. * @soc : core DP soc context
  1793. * @peer : DP peer
  1794. * @state : new state
  1795. *
  1796. * Return: None
  1797. */
  1798. static inline void
  1799. dp_peer_update_state(struct dp_soc *soc,
  1800. struct dp_peer *peer,
  1801. enum dp_peer_state state)
  1802. {
  1803. uint8_t peer_state;
  1804. qdf_spin_lock_bh(&peer->peer_state_lock);
  1805. peer_state = peer->peer_state;
  1806. switch (state) {
  1807. case DP_PEER_STATE_INIT:
  1808. DP_PEER_STATE_ASSERT
  1809. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  1810. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  1811. break;
  1812. case DP_PEER_STATE_ACTIVE:
  1813. DP_PEER_STATE_ASSERT(peer, state,
  1814. (peer_state == DP_PEER_STATE_INIT));
  1815. break;
  1816. case DP_PEER_STATE_LOGICAL_DELETE:
  1817. DP_PEER_STATE_ASSERT(peer, state,
  1818. (peer_state == DP_PEER_STATE_ACTIVE) ||
  1819. (peer_state == DP_PEER_STATE_INIT));
  1820. break;
  1821. case DP_PEER_STATE_INACTIVE:
  1822. if (IS_MLO_DP_MLD_PEER(peer))
  1823. DP_PEER_STATE_ASSERT
  1824. (peer, state,
  1825. (peer_state == DP_PEER_STATE_ACTIVE));
  1826. else
  1827. DP_PEER_STATE_ASSERT
  1828. (peer, state,
  1829. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  1830. break;
  1831. case DP_PEER_STATE_FREED:
  1832. if (peer->sta_self_peer)
  1833. DP_PEER_STATE_ASSERT
  1834. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  1835. else
  1836. DP_PEER_STATE_ASSERT
  1837. (peer, state,
  1838. (peer_state == DP_PEER_STATE_INACTIVE) ||
  1839. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  1840. break;
  1841. default:
  1842. qdf_spin_unlock_bh(&peer->peer_state_lock);
  1843. dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
  1844. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1845. return;
  1846. }
  1847. peer->peer_state = state;
  1848. qdf_spin_unlock_bh(&peer->peer_state_lock);
  1849. dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
  1850. peer_state, state,
  1851. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1852. }
  1853. #ifdef REO_SHARED_QREF_TABLE_EN
  1854. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1855. struct dp_peer *peer);
  1856. #else
  1857. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1858. struct dp_peer *peer) {}
  1859. #endif
  1860. /**
  1861. * dp_peer_check_wds_ext_peer() - Check WDS ext peer
  1862. *
  1863. * @peer: DP peer
  1864. *
  1865. * Return: True for WDS ext peer, false otherwise
  1866. */
  1867. bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
  1868. #endif /* _DP_PEER_H_ */