dp_peer.h 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  25. #include "hal_reo.h"
  26. #endif
  27. #define DP_INVALID_PEER_ID 0xffff
  28. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  29. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  30. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  31. #define DP_PEER_HASH_LOAD_MULT 2
  32. #define DP_PEER_HASH_LOAD_SHIFT 0
  33. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  34. #define DP_RX_CACHED_BUFQ_THRESH 64
  35. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  36. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_info(params...) \
  39. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  40. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  41. #ifdef REO_QDESC_HISTORY
  42. enum reo_qdesc_event_type {
  43. REO_QDESC_UPDATE_CB = 0,
  44. REO_QDESC_FREE,
  45. };
  46. struct reo_qdesc_event {
  47. qdf_dma_addr_t qdesc_addr;
  48. uint64_t ts;
  49. enum reo_qdesc_event_type type;
  50. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  51. };
  52. #endif
  53. struct ast_del_ctxt {
  54. bool age;
  55. int del_count;
  56. };
  57. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  58. void *arg);
  59. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  60. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  61. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  62. uint8_t *peer_mac_addr,
  63. int mac_addr_is_aligned,
  64. uint8_t vdev_id,
  65. enum dp_mod_id id);
  66. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  67. /**
  68. * dp_peer_get_ref() - Returns peer object given the peer id
  69. *
  70. * @soc : core DP soc context
  71. * @peer : DP peer
  72. * @mod_id : id of module requesting the reference
  73. *
  74. * Return: QDF_STATUS_SUCCESS if reference held successfully
  75. * else QDF_STATUS_E_INVAL
  76. */
  77. static inline
  78. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  79. struct dp_peer *peer,
  80. enum dp_mod_id mod_id)
  81. {
  82. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  83. return QDF_STATUS_E_INVAL;
  84. if (mod_id > DP_MOD_ID_RX)
  85. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  86. return QDF_STATUS_SUCCESS;
  87. }
  88. /**
  89. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  90. *
  91. * @soc : core DP soc context
  92. * @peer_id : peer id from peer object can be retrieved
  93. * @mod_id : module id
  94. *
  95. * Return: struct dp_peer*: Pointer to DP peer object
  96. */
  97. static inline struct dp_peer *
  98. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  99. uint16_t peer_id,
  100. enum dp_mod_id mod_id)
  101. {
  102. struct dp_peer *peer;
  103. qdf_spin_lock_bh(&soc->peer_map_lock);
  104. peer = (peer_id >= soc->max_peer_id) ? NULL :
  105. soc->peer_id_to_obj_map[peer_id];
  106. if (!peer ||
  107. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  108. qdf_spin_unlock_bh(&soc->peer_map_lock);
  109. return NULL;
  110. }
  111. qdf_spin_unlock_bh(&soc->peer_map_lock);
  112. return peer;
  113. }
  114. /**
  115. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  116. * if peer state is active
  117. *
  118. * @soc : core DP soc context
  119. * @peer_id : peer id from peer object can be retrieved
  120. * @mod_id : ID ot module requesting reference
  121. *
  122. * Return: struct dp_peer*: Pointer to DP peer object
  123. */
  124. static inline
  125. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  126. uint16_t peer_id,
  127. enum dp_mod_id mod_id)
  128. {
  129. struct dp_peer *peer;
  130. qdf_spin_lock_bh(&soc->peer_map_lock);
  131. peer = (peer_id >= soc->max_peer_id) ? NULL :
  132. soc->peer_id_to_obj_map[peer_id];
  133. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  134. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  135. qdf_spin_unlock_bh(&soc->peer_map_lock);
  136. return NULL;
  137. }
  138. qdf_spin_unlock_bh(&soc->peer_map_lock);
  139. return peer;
  140. }
  141. /**
  142. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  143. *
  144. * @soc : core DP soc context
  145. * @peer_id : peer id from peer object can be retrieved
  146. * @handle : reference handle
  147. * @mod_id : ID ot module requesting reference
  148. *
  149. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  150. */
  151. static inline struct dp_txrx_peer *
  152. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  153. uint16_t peer_id,
  154. dp_txrx_ref_handle *handle,
  155. enum dp_mod_id mod_id)
  156. {
  157. struct dp_peer *peer;
  158. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  159. if (!peer)
  160. return NULL;
  161. if (!peer->txrx_peer) {
  162. dp_peer_unref_delete(peer, mod_id);
  163. return NULL;
  164. }
  165. *handle = (dp_txrx_ref_handle)peer;
  166. return peer->txrx_peer;
  167. }
  168. #ifdef PEER_CACHE_RX_PKTS
  169. /**
  170. * dp_rx_flush_rx_cached() - flush cached rx frames
  171. * @peer: peer
  172. * @drop: set flag to drop frames
  173. *
  174. * Return: None
  175. */
  176. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  177. #else
  178. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  179. {
  180. }
  181. #endif
  182. static inline void
  183. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  184. {
  185. qdf_spin_lock_bh(&peer->peer_info_lock);
  186. peer->state = OL_TXRX_PEER_STATE_DISC;
  187. qdf_spin_unlock_bh(&peer->peer_info_lock);
  188. dp_rx_flush_rx_cached(peer, true);
  189. }
  190. /**
  191. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  192. *
  193. * @vdev : DP vdev context
  194. * @func : function to be called for each peer
  195. * @arg : argument need to be passed to func
  196. * @mod_id : module_id
  197. *
  198. * Return: void
  199. */
  200. static inline void
  201. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  202. enum dp_mod_id mod_id)
  203. {
  204. struct dp_peer *peer;
  205. struct dp_peer *tmp_peer;
  206. struct dp_soc *soc = NULL;
  207. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  208. return;
  209. soc = vdev->pdev->soc;
  210. qdf_spin_lock_bh(&vdev->peer_list_lock);
  211. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  212. peer_list_elem,
  213. tmp_peer) {
  214. if (dp_peer_get_ref(soc, peer, mod_id) ==
  215. QDF_STATUS_SUCCESS) {
  216. (*func)(soc, peer, arg);
  217. dp_peer_unref_delete(peer, mod_id);
  218. }
  219. }
  220. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  221. }
  222. /**
  223. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  224. *
  225. * @pdev : DP pdev context
  226. * @func : function to be called for each peer
  227. * @arg : argument need to be passed to func
  228. * @mod_id : module_id
  229. *
  230. * Return: void
  231. */
  232. static inline void
  233. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  234. enum dp_mod_id mod_id)
  235. {
  236. struct dp_vdev *vdev;
  237. if (!pdev)
  238. return;
  239. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  240. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  241. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  242. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  243. }
  244. /**
  245. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  246. *
  247. * @soc : DP soc context
  248. * @func : function to be called for each peer
  249. * @arg : argument need to be passed to func
  250. * @mod_id : module_id
  251. *
  252. * Return: void
  253. */
  254. static inline void
  255. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  256. enum dp_mod_id mod_id)
  257. {
  258. struct dp_pdev *pdev;
  259. int i;
  260. if (!soc)
  261. return;
  262. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  263. pdev = soc->pdev_list[i];
  264. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  265. }
  266. }
  267. /**
  268. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  269. *
  270. * This API will cache the peers in local allocated memory and calls
  271. * iterate function outside the lock.
  272. *
  273. * As this API is allocating new memory it is suggested to use this
  274. * only when lock cannot be held
  275. *
  276. * @vdev : DP vdev context
  277. * @func : function to be called for each peer
  278. * @arg : argument need to be passed to func
  279. * @mod_id : module_id
  280. *
  281. * Return: void
  282. */
  283. static inline void
  284. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  285. dp_peer_iter_func *func,
  286. void *arg,
  287. enum dp_mod_id mod_id)
  288. {
  289. struct dp_peer *peer;
  290. struct dp_peer *tmp_peer;
  291. struct dp_soc *soc = NULL;
  292. struct dp_peer **peer_array = NULL;
  293. int i = 0;
  294. uint32_t num_peers = 0;
  295. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  296. return;
  297. num_peers = vdev->num_peers;
  298. soc = vdev->pdev->soc;
  299. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  300. if (!peer_array)
  301. return;
  302. qdf_spin_lock_bh(&vdev->peer_list_lock);
  303. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  304. peer_list_elem,
  305. tmp_peer) {
  306. if (i >= num_peers)
  307. break;
  308. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  309. peer_array[i] = peer;
  310. i = (i + 1);
  311. }
  312. }
  313. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  314. for (i = 0; i < num_peers; i++) {
  315. peer = peer_array[i];
  316. if (!peer)
  317. continue;
  318. (*func)(soc, peer, arg);
  319. dp_peer_unref_delete(peer, mod_id);
  320. }
  321. qdf_mem_free(peer_array);
  322. }
  323. /**
  324. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  325. *
  326. * This API will cache the peers in local allocated memory and calls
  327. * iterate function outside the lock.
  328. *
  329. * As this API is allocating new memory it is suggested to use this
  330. * only when lock cannot be held
  331. *
  332. * @pdev : DP pdev context
  333. * @func : function to be called for each peer
  334. * @arg : argument need to be passed to func
  335. * @mod_id : module_id
  336. *
  337. * Return: void
  338. */
  339. static inline void
  340. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  341. dp_peer_iter_func *func,
  342. void *arg,
  343. enum dp_mod_id mod_id)
  344. {
  345. struct dp_peer *peer;
  346. struct dp_peer *tmp_peer;
  347. struct dp_soc *soc = NULL;
  348. struct dp_vdev *vdev = NULL;
  349. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  350. int i = 0;
  351. int j = 0;
  352. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  353. if (!pdev || !pdev->soc)
  354. return;
  355. soc = pdev->soc;
  356. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  357. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  358. num_peers[i] = vdev->num_peers;
  359. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  360. sizeof(struct dp_peer *));
  361. if (!peer_array[i])
  362. break;
  363. qdf_spin_lock_bh(&vdev->peer_list_lock);
  364. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  365. peer_list_elem,
  366. tmp_peer) {
  367. if (j >= num_peers[i])
  368. break;
  369. if (dp_peer_get_ref(soc, peer, mod_id) ==
  370. QDF_STATUS_SUCCESS) {
  371. peer_array[i][j] = peer;
  372. j = (j + 1);
  373. }
  374. }
  375. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  376. i = (i + 1);
  377. }
  378. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  379. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  380. if (!peer_array[i])
  381. break;
  382. for (j = 0; j < num_peers[i]; j++) {
  383. peer = peer_array[i][j];
  384. if (!peer)
  385. continue;
  386. (*func)(soc, peer, arg);
  387. dp_peer_unref_delete(peer, mod_id);
  388. }
  389. qdf_mem_free(peer_array[i]);
  390. }
  391. }
  392. /**
  393. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  394. *
  395. * This API will cache the peers in local allocated memory and calls
  396. * iterate function outside the lock.
  397. *
  398. * As this API is allocating new memory it is suggested to use this
  399. * only when lock cannot be held
  400. *
  401. * @soc : DP soc context
  402. * @func : function to be called for each peer
  403. * @arg : argument need to be passed to func
  404. * @mod_id : module_id
  405. *
  406. * Return: void
  407. */
  408. static inline void
  409. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  410. dp_peer_iter_func *func,
  411. void *arg,
  412. enum dp_mod_id mod_id)
  413. {
  414. struct dp_pdev *pdev;
  415. int i;
  416. if (!soc)
  417. return;
  418. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  419. pdev = soc->pdev_list[i];
  420. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  421. }
  422. }
  423. #ifdef DP_PEER_STATE_DEBUG
  424. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  425. do { \
  426. if (!(_condition)) { \
  427. dp_alert("Invalid state shift from %u to %u peer " \
  428. QDF_MAC_ADDR_FMT, \
  429. (_peer)->peer_state, (_new_state), \
  430. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  431. QDF_ASSERT(0); \
  432. } \
  433. } while (0)
  434. #else
  435. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  436. do { \
  437. if (!(_condition)) { \
  438. dp_alert("Invalid state shift from %u to %u peer " \
  439. QDF_MAC_ADDR_FMT, \
  440. (_peer)->peer_state, (_new_state), \
  441. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  442. } \
  443. } while (0)
  444. #endif
  445. /**
  446. * dp_peer_state_cmp() - compare dp peer state
  447. *
  448. * @peer : DP peer
  449. * @state : state
  450. *
  451. * Return: true if state matches with peer state
  452. * false if it does not match
  453. */
  454. static inline bool
  455. dp_peer_state_cmp(struct dp_peer *peer,
  456. enum dp_peer_state state)
  457. {
  458. bool is_status_equal = false;
  459. qdf_spin_lock_bh(&peer->peer_state_lock);
  460. is_status_equal = (peer->peer_state == state);
  461. qdf_spin_unlock_bh(&peer->peer_state_lock);
  462. return is_status_equal;
  463. }
  464. /**
  465. * dp_peer_update_state() - update dp peer state
  466. *
  467. * @soc : core DP soc context
  468. * @peer : DP peer
  469. * @state : new state
  470. *
  471. * Return: None
  472. */
  473. static inline void
  474. dp_peer_update_state(struct dp_soc *soc,
  475. struct dp_peer *peer,
  476. enum dp_peer_state state)
  477. {
  478. uint8_t peer_state;
  479. qdf_spin_lock_bh(&peer->peer_state_lock);
  480. peer_state = peer->peer_state;
  481. switch (state) {
  482. case DP_PEER_STATE_INIT:
  483. DP_PEER_STATE_ASSERT
  484. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  485. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  486. break;
  487. case DP_PEER_STATE_ACTIVE:
  488. DP_PEER_STATE_ASSERT(peer, state,
  489. (peer_state == DP_PEER_STATE_INIT));
  490. break;
  491. case DP_PEER_STATE_LOGICAL_DELETE:
  492. DP_PEER_STATE_ASSERT(peer, state,
  493. (peer_state == DP_PEER_STATE_ACTIVE) ||
  494. (peer_state == DP_PEER_STATE_INIT));
  495. break;
  496. case DP_PEER_STATE_INACTIVE:
  497. DP_PEER_STATE_ASSERT
  498. (peer, state,
  499. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  500. break;
  501. case DP_PEER_STATE_FREED:
  502. if (peer->sta_self_peer)
  503. DP_PEER_STATE_ASSERT
  504. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  505. else
  506. DP_PEER_STATE_ASSERT
  507. (peer, state,
  508. (peer_state == DP_PEER_STATE_INACTIVE) ||
  509. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  510. break;
  511. default:
  512. qdf_spin_unlock_bh(&peer->peer_state_lock);
  513. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  514. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  515. return;
  516. }
  517. peer->peer_state = state;
  518. qdf_spin_unlock_bh(&peer->peer_state_lock);
  519. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  520. peer_state, state,
  521. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  522. }
  523. void dp_print_ast_stats(struct dp_soc *soc);
  524. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  525. uint16_t hw_peer_id, uint8_t vdev_id,
  526. uint8_t *peer_mac_addr, uint16_t ast_hash,
  527. uint8_t is_wds);
  528. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  529. uint8_t vdev_id, uint8_t *peer_mac_addr,
  530. uint8_t is_wds, uint32_t free_wds_count);
  531. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  532. /**
  533. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  534. * @soc - dp soc pointer
  535. * @vdev_id - vdev id
  536. * @peer_mac_addr - mac address of the peer
  537. *
  538. * This function resets the roamed peer auth status and mac address
  539. * after peer map indication of same peer is received from firmware.
  540. *
  541. * Return: None
  542. */
  543. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  544. uint8_t *peer_mac_addr);
  545. #else
  546. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  547. uint8_t *peer_mac_addr)
  548. {
  549. }
  550. #endif
  551. #ifdef WLAN_FEATURE_11BE_MLO
  552. /**
  553. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  554. * @soc_handle - genereic soc handle
  555. * @peer_id - ML peer_id from firmware
  556. * @peer_mac_addr - mac address of the peer
  557. * @mlo_ast_flow_info: MLO AST flow info
  558. * @mlo_link_info - MLO link info
  559. *
  560. * associate the ML peer_id that firmware provided with peer entry
  561. * and update the ast table in the host with the hw_peer_id.
  562. *
  563. * Return: QDF_STATUS code
  564. */
  565. QDF_STATUS
  566. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  567. uint8_t *peer_mac_addr,
  568. struct dp_mlo_flow_override_info *mlo_flow_info,
  569. struct dp_mlo_link_info *mlo_link_info);
  570. /**
  571. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  572. * @soc_handle - genereic soc handle
  573. * @peeri_id - peer_id from firmware
  574. *
  575. * Return: none
  576. */
  577. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  578. #endif
  579. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  580. enum cdp_sec_type sec_type, int is_unicast,
  581. u_int32_t *michael_key, u_int32_t *rx_pn);
  582. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  583. uint8_t tid, uint16_t win_sz);
  584. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  585. uint16_t peer_id, uint8_t *peer_mac);
  586. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  587. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  588. uint32_t flags);
  589. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  590. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  591. struct dp_ast_entry *ast_entry);
  592. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  593. struct dp_ast_entry *ast_entry, uint32_t flags);
  594. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  595. uint8_t *ast_mac_addr,
  596. uint8_t pdev_id);
  597. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  598. uint8_t *ast_mac_addr,
  599. uint8_t vdev_id);
  600. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  601. uint8_t *ast_mac_addr);
  602. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  603. struct dp_ast_entry *ast_entry);
  604. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  605. struct dp_ast_entry *ast_entry);
  606. void dp_peer_ast_set_type(struct dp_soc *soc,
  607. struct dp_ast_entry *ast_entry,
  608. enum cdp_txrx_ast_entry_type type);
  609. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  610. struct dp_ast_entry *ast_entry,
  611. struct dp_peer *peer);
  612. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  613. void dp_peer_ast_send_multi_wds_del(
  614. struct dp_soc *soc, uint8_t vdev_id,
  615. struct peer_del_multi_wds_entries *wds_list);
  616. #endif
  617. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  618. struct cdp_soc *dp_soc,
  619. void *cookie,
  620. enum cdp_ast_free_status status);
  621. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  622. struct dp_ast_entry *ase);
  623. void dp_peer_free_ast_entry(struct dp_soc *soc,
  624. struct dp_ast_entry *ast_entry);
  625. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  626. struct dp_ast_entry *ast_entry,
  627. struct dp_peer *peer);
  628. /**
  629. * dp_peer_mec_detach_entry() - Detach the MEC entry
  630. * @soc: SoC handle
  631. * @mecentry: MEC entry of the node
  632. * @ptr: pointer to free list
  633. *
  634. * The MEC entry is detached from MEC table and added to free_list
  635. * to free the object outside lock
  636. *
  637. * Return: None
  638. */
  639. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  640. void *ptr);
  641. /**
  642. * dp_peer_mec_free_list() - free the MEC entry from free_list
  643. * @soc: SoC handle
  644. * @ptr: pointer to free list
  645. *
  646. * Return: None
  647. */
  648. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  649. /**
  650. * dp_peer_mec_add_entry()
  651. * @soc: SoC handle
  652. * @vdev: vdev to which mec node belongs
  653. * @mac_addr: MAC address of mec node
  654. *
  655. * This function allocates and adds MEC entry to MEC table.
  656. * It assumes caller has taken the mec lock to protect the access to these
  657. * tables
  658. *
  659. * Return: QDF_STATUS
  660. */
  661. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  662. struct dp_vdev *vdev,
  663. uint8_t *mac_addr);
  664. /**
  665. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  666. * within pdev
  667. * @soc: SoC handle
  668. *
  669. * It assumes caller has taken the mec_lock to protect the access to
  670. * MEC hash table
  671. *
  672. * Return: MEC entry
  673. */
  674. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  675. uint8_t pdev_id,
  676. uint8_t *mec_mac_addr);
  677. #define DP_AST_ASSERT(_condition) \
  678. do { \
  679. if (!(_condition)) { \
  680. dp_print_ast_stats(soc);\
  681. QDF_BUG(_condition); \
  682. } \
  683. } while (0)
  684. /**
  685. * dp_peer_update_inactive_time - Update inactive time for peer
  686. * @pdev: pdev object
  687. * @tag_type: htt_tlv_tag type
  688. * #tag_buf: buf message
  689. */
  690. void
  691. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  692. uint32_t *tag_buf);
  693. #ifndef QCA_MULTIPASS_SUPPORT
  694. /**
  695. * dp_peer_set_vlan_id: set vlan_id for this peer
  696. * @cdp_soc: soc handle
  697. * @vdev_id: id of vdev object
  698. * @peer_mac: mac address
  699. * @vlan_id: vlan id for peer
  700. *
  701. * return: void
  702. */
  703. static inline
  704. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  705. uint8_t vdev_id, uint8_t *peer_mac,
  706. uint16_t vlan_id)
  707. {
  708. }
  709. /**
  710. * dp_set_vlan_groupkey: set vlan map for vdev
  711. * @soc: pointer to soc
  712. * @vdev_id: id of vdev handle
  713. * @vlan_id: vlan_id
  714. * @group_key: group key for vlan
  715. *
  716. * return: set success/failure
  717. */
  718. static inline
  719. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  720. uint16_t vlan_id, uint16_t group_key)
  721. {
  722. return QDF_STATUS_SUCCESS;
  723. }
  724. /**
  725. * dp_peer_multipass_list_init: initialize multipass peer list
  726. * @vdev: pointer to vdev
  727. *
  728. * return: void
  729. */
  730. static inline
  731. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  732. {
  733. }
  734. /**
  735. * dp_peer_multipass_list_remove: remove peer from special peer list
  736. * @peer: peer handle
  737. *
  738. * return: void
  739. */
  740. static inline
  741. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  742. {
  743. }
  744. #else
  745. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  746. uint8_t vdev_id, uint8_t *peer_mac,
  747. uint16_t vlan_id);
  748. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  749. uint16_t vlan_id, uint16_t group_key);
  750. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  751. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  752. #endif
  753. #ifndef QCA_PEER_MULTIQ_SUPPORT
  754. /**
  755. * dp_peer_reset_flowq_map() - reset peer flowq map table
  756. * @peer - dp peer handle
  757. *
  758. * Return: none
  759. */
  760. static inline
  761. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  762. {
  763. }
  764. /**
  765. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  766. * @soc - genereic soc handle
  767. * @is_wds - flag to indicate if peer is wds
  768. * @peer_id - peer_id from htt peer map message
  769. * @peer_mac_addr - mac address of the peer
  770. * @ast_info - ast flow override information from peer map
  771. *
  772. * Return: none
  773. */
  774. static inline
  775. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  776. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  777. struct dp_ast_flow_override_info *ast_info)
  778. {
  779. }
  780. #else
  781. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  782. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  783. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  784. struct dp_ast_flow_override_info *ast_info);
  785. #endif
  786. /*
  787. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  788. * after deleting the entries (ie., setting valid=0)
  789. *
  790. * @soc: DP SOC handle
  791. * @cb_ctxt: Callback context
  792. * @reo_status: REO command status
  793. */
  794. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  795. void *cb_ctxt,
  796. union hal_reo_status *reo_status);
  797. #ifdef QCA_PEER_EXT_STATS
  798. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  799. struct dp_txrx_peer *txrx_peer);
  800. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  801. struct dp_txrx_peer *txrx_peer);
  802. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  803. #else
  804. static inline
  805. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  806. struct dp_txrx_peer *txrx_peer)
  807. {
  808. return QDF_STATUS_SUCCESS;
  809. }
  810. static inline
  811. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  812. struct dp_txrx_peer *txrx_peer)
  813. {
  814. }
  815. static inline
  816. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  817. {
  818. }
  819. #endif
  820. #ifdef WLAN_PEER_JITTER
  821. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  822. struct dp_txrx_peer *txrx_peer);
  823. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  824. struct dp_txrx_peer *txrx_peer);
  825. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  826. #else
  827. static inline
  828. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  829. struct dp_txrx_peer *txrx_peer)
  830. {
  831. return QDF_STATUS_SUCCESS;
  832. }
  833. static inline
  834. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  835. struct dp_txrx_peer *txrx_peer)
  836. {
  837. }
  838. static inline
  839. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  840. {
  841. }
  842. #endif
  843. #ifndef CONFIG_SAWF_DEF_QUEUES
  844. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  845. struct dp_peer *peer)
  846. {
  847. return QDF_STATUS_SUCCESS;
  848. }
  849. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  850. struct dp_peer *peer)
  851. {
  852. return QDF_STATUS_SUCCESS;
  853. }
  854. #endif
  855. #ifndef CONFIG_SAWF
  856. static inline
  857. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  858. struct dp_txrx_peer *txrx_peer)
  859. {
  860. return QDF_STATUS_SUCCESS;
  861. }
  862. static inline
  863. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  864. struct dp_txrx_peer *txrx_peer)
  865. {
  866. return QDF_STATUS_SUCCESS;
  867. }
  868. #endif
  869. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  870. struct dp_vdev *vdev,
  871. enum dp_mod_id mod_id);
  872. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  873. struct dp_vdev *vdev,
  874. enum dp_mod_id mod_id);
  875. void dp_peer_ast_table_detach(struct dp_soc *soc);
  876. void dp_peer_find_map_detach(struct dp_soc *soc);
  877. void dp_soc_wds_detach(struct dp_soc *soc);
  878. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  879. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  880. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  881. void dp_soc_wds_attach(struct dp_soc *soc);
  882. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  883. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  884. #ifdef FEATURE_AST
  885. /*
  886. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  887. * @soc - datapath soc handle
  888. * @peer - datapath peer handle
  889. *
  890. * Delete the AST entries belonging to a peer
  891. */
  892. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  893. struct dp_peer *peer)
  894. {
  895. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  896. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  897. /*
  898. * Delete peer self ast entry. This is done to handle scenarios
  899. * where peer is freed before peer map is received(for ex in case
  900. * of auth disallow due to ACL) in such cases self ast is not added
  901. * to peer->ast_list.
  902. */
  903. if (peer->self_ast_entry) {
  904. dp_peer_del_ast(soc, peer->self_ast_entry);
  905. peer->self_ast_entry = NULL;
  906. }
  907. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  908. dp_peer_del_ast(soc, ast_entry);
  909. }
  910. void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
  911. void *arg);
  912. #else
  913. static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
  914. struct dp_peer *peer, void *arg)
  915. {
  916. }
  917. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  918. struct dp_peer *peer)
  919. {
  920. }
  921. #endif
  922. #ifdef FEATURE_MEC
  923. /**
  924. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  925. * @soc: SoC handle
  926. *
  927. * Return: none
  928. */
  929. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  930. /**
  931. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  932. * @soc: SoC handle
  933. *
  934. * Return: none
  935. */
  936. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  937. /**
  938. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  939. * @soc: Datapath SOC
  940. *
  941. * Return: None
  942. */
  943. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  944. #else
  945. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  946. {
  947. }
  948. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  949. {
  950. }
  951. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  952. {
  953. }
  954. #endif
  955. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  956. /**
  957. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  958. * @soc : dp_soc handle
  959. * @peer: peer
  960. *
  961. * This function is used to send cache flush cmd to reo and
  962. * to register the callback to handle the dumping of the reo
  963. * queue stas from DDR
  964. *
  965. * Return: none
  966. */
  967. void dp_send_cache_flush_for_rx_tid(
  968. struct dp_soc *soc, struct dp_peer *peer);
  969. /**
  970. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  971. * @soc : cdp_soc_t handle
  972. * @vdev_id: vdev id
  973. *
  974. * Handler to get rx tid info from DDR after h/w cache is
  975. * invalidated first using the cache flush cmd.
  976. *
  977. * Return: none
  978. */
  979. void dp_get_rx_reo_queue_info(
  980. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  981. /**
  982. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  983. * @soc : dp_soc handle
  984. * @cb_ctxt - callback context
  985. * @reo_status: vdev id
  986. *
  987. * This is the callback function registered after sending the reo cmd
  988. * to flush the h/w cache and invalidate it. In the callback the reo
  989. * queue desc info is dumped from DDR.
  990. *
  991. * Return: none
  992. */
  993. void dp_dump_rx_reo_queue_info(
  994. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  995. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  996. static inline void dp_get_rx_reo_queue_info(
  997. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  998. {
  999. }
  1000. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  1001. static inline int dp_peer_find_mac_addr_cmp(
  1002. union dp_align_mac_addr *mac_addr1,
  1003. union dp_align_mac_addr *mac_addr2)
  1004. {
  1005. /*
  1006. * Intentionally use & rather than &&.
  1007. * because the operands are binary rather than generic boolean,
  1008. * the functionality is equivalent.
  1009. * Using && has the advantage of short-circuited evaluation,
  1010. * but using & has the advantage of no conditional branching,
  1011. * which is a more significant benefit.
  1012. */
  1013. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1014. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1015. }
  1016. /**
  1017. * dp_peer_delete() - delete DP peer
  1018. *
  1019. * @soc: Datatpath soc
  1020. * @peer: Datapath peer
  1021. * @arg: argument to iter function
  1022. *
  1023. * Return: void
  1024. */
  1025. void dp_peer_delete(struct dp_soc *soc,
  1026. struct dp_peer *peer,
  1027. void *arg);
  1028. /**
  1029. * dp_mlo_peer_delete() - delete MLO DP peer
  1030. *
  1031. * @soc: Datapath soc
  1032. * @peer: Datapath peer
  1033. * @arg: argument to iter function
  1034. *
  1035. * Return: void
  1036. */
  1037. void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
  1038. #ifdef WLAN_FEATURE_11BE_MLO
  1039. /* is MLO connection mld peer */
  1040. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1041. /* set peer type */
  1042. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1043. ((_peer)->peer_type = (_type_val))
  1044. /* is legacy peer */
  1045. #define IS_DP_LEGACY_PEER(_peer) \
  1046. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1047. /* is MLO connection link peer */
  1048. #define IS_MLO_DP_LINK_PEER(_peer) \
  1049. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1050. /* is MLO connection mld peer */
  1051. #define IS_MLO_DP_MLD_PEER(_peer) \
  1052. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1053. /* Get Mld peer from link peer */
  1054. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1055. ((link_peer)->mld_peer)
  1056. #ifdef WLAN_MLO_MULTI_CHIP
  1057. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1058. struct dp_peer *
  1059. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1060. uint8_t *peer_mac_addr,
  1061. int mac_addr_is_aligned,
  1062. uint8_t vdev_id,
  1063. uint8_t chip_id,
  1064. enum dp_mod_id mod_id);
  1065. #else
  1066. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1067. {
  1068. return 0;
  1069. }
  1070. static inline struct dp_peer *
  1071. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1072. uint8_t *peer_mac_addr,
  1073. int mac_addr_is_aligned,
  1074. uint8_t vdev_id,
  1075. uint8_t chip_id,
  1076. enum dp_mod_id mod_id)
  1077. {
  1078. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1079. mac_addr_is_aligned,
  1080. vdev_id, mod_id);
  1081. }
  1082. #endif
  1083. /*
  1084. * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
  1085. * matching mac_address
  1086. * @soc: soc handle
  1087. * @peer_mac_addr: mld peer mac address
  1088. * @mac_addr_is_aligned: is mac addr alligned
  1089. * @vdev_id: vdev_id
  1090. * @mod_id: id of module requesting reference
  1091. *
  1092. * return: peer in sucsess
  1093. * NULL in failure
  1094. */
  1095. static inline
  1096. struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
  1097. uint8_t *peer_mac_addr,
  1098. int mac_addr_is_aligned,
  1099. uint8_t vdev_id,
  1100. enum dp_mod_id mod_id)
  1101. {
  1102. if (soc->arch_ops.mlo_peer_find_hash_find)
  1103. return soc->arch_ops.mlo_peer_find_hash_find(soc,
  1104. peer_mac_addr,
  1105. mac_addr_is_aligned,
  1106. mod_id, vdev_id);
  1107. return NULL;
  1108. }
  1109. /**
  1110. * dp_peer_hash_find_wrapper() - find link peer or mld per according to
  1111. peer_type
  1112. * @soc: DP SOC handle
  1113. * @peer_info: peer information for hash find
  1114. * @mod_id: ID of module requesting reference
  1115. *
  1116. * Return: peer hanlde
  1117. */
  1118. static inline
  1119. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1120. struct cdp_peer_info *peer_info,
  1121. enum dp_mod_id mod_id)
  1122. {
  1123. struct dp_peer *peer = NULL;
  1124. if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
  1125. peer_info->peer_type == CDP_WILD_PEER_TYPE) {
  1126. peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1127. peer_info->mac_addr_is_aligned,
  1128. peer_info->vdev_id,
  1129. mod_id);
  1130. if (peer)
  1131. return peer;
  1132. }
  1133. if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
  1134. peer_info->peer_type == CDP_WILD_PEER_TYPE)
  1135. peer = dp_mld_peer_find_hash_find(
  1136. soc, peer_info->mac_addr,
  1137. peer_info->mac_addr_is_aligned,
  1138. peer_info->vdev_id,
  1139. mod_id);
  1140. return peer;
  1141. }
  1142. /**
  1143. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1144. increase mld peer ref_cnt
  1145. * @link_peer: link peer pointer
  1146. * @mld_peer: mld peer pointer
  1147. *
  1148. * Return: none
  1149. */
  1150. static inline
  1151. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1152. struct dp_peer *mld_peer)
  1153. {
  1154. /* increase mld_peer ref_cnt */
  1155. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1156. link_peer->mld_peer = mld_peer;
  1157. }
  1158. /**
  1159. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1160. decrease mld peer ref_cnt
  1161. * @link_peer: link peer pointer
  1162. *
  1163. * Return: None
  1164. */
  1165. static inline
  1166. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1167. {
  1168. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1169. link_peer->mld_peer = NULL;
  1170. }
  1171. /**
  1172. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1173. * @mld_peer: mld peer pointer
  1174. *
  1175. * Return: None
  1176. */
  1177. static inline
  1178. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1179. {
  1180. int i;
  1181. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1182. mld_peer->num_links = 0;
  1183. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1184. mld_peer->link_peers[i].is_valid = false;
  1185. }
  1186. /**
  1187. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1188. * @mld_peer: mld peer pointer
  1189. *
  1190. * Return: None
  1191. */
  1192. static inline
  1193. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1194. {
  1195. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1196. }
  1197. /**
  1198. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1199. * @mld_peer: mld dp peer pointer
  1200. * @link_peer: link dp peer pointer
  1201. *
  1202. * Return: None
  1203. */
  1204. static inline
  1205. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1206. struct dp_peer *link_peer)
  1207. {
  1208. int i;
  1209. struct dp_peer_link_info *link_peer_info;
  1210. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1211. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1212. link_peer_info = &mld_peer->link_peers[i];
  1213. if (!link_peer_info->is_valid) {
  1214. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1215. link_peer->mac_addr.raw,
  1216. QDF_MAC_ADDR_SIZE);
  1217. link_peer_info->is_valid = true;
  1218. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1219. link_peer_info->chip_id =
  1220. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1221. mld_peer->num_links++;
  1222. break;
  1223. }
  1224. }
  1225. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1226. if (i == DP_MAX_MLO_LINKS)
  1227. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1228. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1229. }
  1230. /**
  1231. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1232. * @mld_peer: MLD dp peer pointer
  1233. * @link_peer: link dp peer pointer
  1234. *
  1235. * Return: number of links left after deletion
  1236. */
  1237. static inline
  1238. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1239. struct dp_peer *link_peer)
  1240. {
  1241. int i;
  1242. struct dp_peer_link_info *link_peer_info;
  1243. uint8_t num_links;
  1244. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1245. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1246. link_peer_info = &mld_peer->link_peers[i];
  1247. if (link_peer_info->is_valid &&
  1248. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1249. &link_peer_info->mac_addr)) {
  1250. link_peer_info->is_valid = false;
  1251. mld_peer->num_links--;
  1252. break;
  1253. }
  1254. }
  1255. num_links = mld_peer->num_links;
  1256. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1257. if (i == DP_MAX_MLO_LINKS)
  1258. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1259. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1260. return num_links;
  1261. }
  1262. /**
  1263. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1264. increase link peers ref_cnt
  1265. * @soc: dp_soc handle
  1266. * @mld_peer: dp mld peer pointer
  1267. * @mld_link_peers: structure that hold links peers ponter array and number
  1268. * @mod_id: id of module requesting reference
  1269. *
  1270. * Return: None
  1271. */
  1272. static inline
  1273. void dp_get_link_peers_ref_from_mld_peer(
  1274. struct dp_soc *soc,
  1275. struct dp_peer *mld_peer,
  1276. struct dp_mld_link_peers *mld_link_peers,
  1277. enum dp_mod_id mod_id)
  1278. {
  1279. struct dp_peer *peer;
  1280. uint8_t i = 0, j = 0;
  1281. struct dp_peer_link_info *link_peer_info;
  1282. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1283. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1284. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1285. link_peer_info = &mld_peer->link_peers[i];
  1286. if (link_peer_info->is_valid) {
  1287. peer = dp_link_peer_hash_find_by_chip_id(
  1288. soc,
  1289. link_peer_info->mac_addr.raw,
  1290. true,
  1291. link_peer_info->vdev_id,
  1292. link_peer_info->chip_id,
  1293. mod_id);
  1294. if (peer)
  1295. mld_link_peers->link_peers[j++] = peer;
  1296. }
  1297. }
  1298. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1299. mld_link_peers->num_links = j;
  1300. }
  1301. /**
  1302. * dp_release_link_peers_ref() - release all link peers reference
  1303. * @mld_link_peers: structure that hold links peers ponter array and number
  1304. * @mod_id: id of module requesting reference
  1305. *
  1306. * Return: None.
  1307. */
  1308. static inline
  1309. void dp_release_link_peers_ref(
  1310. struct dp_mld_link_peers *mld_link_peers,
  1311. enum dp_mod_id mod_id)
  1312. {
  1313. struct dp_peer *peer;
  1314. uint8_t i;
  1315. for (i = 0; i < mld_link_peers->num_links; i++) {
  1316. peer = mld_link_peers->link_peers[i];
  1317. if (peer)
  1318. dp_peer_unref_delete(peer, mod_id);
  1319. mld_link_peers->link_peers[i] = NULL;
  1320. }
  1321. mld_link_peers->num_links = 0;
  1322. }
  1323. /**
  1324. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1325. * @soc: Datapath soc handle
  1326. * @peer_id: peer id
  1327. * @lmac_id: lmac id to find the link peer on given lmac
  1328. *
  1329. * Return: peer_id of link peer if found
  1330. * else return HTT_INVALID_PEER
  1331. */
  1332. static inline
  1333. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1334. uint8_t lmac_id)
  1335. {
  1336. uint8_t i;
  1337. struct dp_peer *peer;
  1338. struct dp_peer *link_peer;
  1339. struct dp_soc *link_peer_soc;
  1340. struct dp_mld_link_peers link_peers_info;
  1341. uint16_t link_peer_id = HTT_INVALID_PEER;
  1342. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1343. if (!peer)
  1344. return HTT_INVALID_PEER;
  1345. if (IS_MLO_DP_MLD_PEER(peer)) {
  1346. /* get link peers with reference */
  1347. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1348. DP_MOD_ID_CDP);
  1349. for (i = 0; i < link_peers_info.num_links; i++) {
  1350. link_peer = link_peers_info.link_peers[i];
  1351. link_peer_soc = link_peer->vdev->pdev->soc;
  1352. if ((link_peer_soc == soc) &&
  1353. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1354. link_peer_id = link_peer->peer_id;
  1355. break;
  1356. }
  1357. }
  1358. /* release link peers reference */
  1359. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1360. } else {
  1361. link_peer_id = peer_id;
  1362. }
  1363. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1364. return link_peer_id;
  1365. }
  1366. /**
  1367. * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
  1368. * @soc: soc handle
  1369. * @peer_mac_addr: peer mac address
  1370. * @mac_addr_is_aligned: is mac addr alligned
  1371. * @vdev_id: vdev_id
  1372. * @mod_id: id of module requesting reference
  1373. *
  1374. * for MLO connection, get corresponding MLD peer,
  1375. * otherwise get link peer for non-MLO case.
  1376. *
  1377. * return: peer in success
  1378. * NULL in failure
  1379. */
  1380. static inline
  1381. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1382. uint8_t *peer_mac,
  1383. int mac_addr_is_aligned,
  1384. uint8_t vdev_id,
  1385. enum dp_mod_id mod_id)
  1386. {
  1387. struct dp_peer *ta_peer = NULL;
  1388. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1389. peer_mac, 0, vdev_id,
  1390. mod_id);
  1391. if (peer) {
  1392. /* mlo connection link peer, get mld peer with reference */
  1393. if (IS_MLO_DP_LINK_PEER(peer)) {
  1394. /* increase mld peer ref_cnt */
  1395. if (QDF_STATUS_SUCCESS ==
  1396. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1397. ta_peer = peer->mld_peer;
  1398. else
  1399. ta_peer = NULL;
  1400. /* relese peer reference that added by hash find */
  1401. dp_peer_unref_delete(peer, mod_id);
  1402. } else {
  1403. /* mlo MLD peer or non-mlo link peer */
  1404. ta_peer = peer;
  1405. }
  1406. } else {
  1407. dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT,
  1408. QDF_MAC_ADDR_REF(peer_mac));
  1409. }
  1410. return ta_peer;
  1411. }
  1412. /**
  1413. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1414. * @soc : core DP soc context
  1415. * @peer_id : peer id from peer object can be retrieved
  1416. * @mod_id : ID ot module requesting reference
  1417. *
  1418. * for MLO connection, get corresponding MLD peer,
  1419. * otherwise get link peer for non-MLO case.
  1420. *
  1421. * return: peer in success
  1422. * NULL in failure
  1423. */
  1424. static inline
  1425. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1426. uint16_t peer_id,
  1427. enum dp_mod_id mod_id)
  1428. {
  1429. struct dp_peer *ta_peer = NULL;
  1430. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1431. if (peer) {
  1432. /* mlo connection link peer, get mld peer with reference */
  1433. if (IS_MLO_DP_LINK_PEER(peer)) {
  1434. /* increase mld peer ref_cnt */
  1435. if (QDF_STATUS_SUCCESS ==
  1436. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1437. ta_peer = peer->mld_peer;
  1438. else
  1439. ta_peer = NULL;
  1440. /* relese peer reference that added by hash find */
  1441. dp_peer_unref_delete(peer, mod_id);
  1442. } else {
  1443. /* mlo MLD peer or non-mlo link peer */
  1444. ta_peer = peer;
  1445. }
  1446. }
  1447. return ta_peer;
  1448. }
  1449. /**
  1450. * dp_peer_mlo_delete() - peer MLO related delete operation
  1451. * @peer: DP peer handle
  1452. * Return: None
  1453. */
  1454. static inline
  1455. void dp_peer_mlo_delete(struct dp_peer *peer)
  1456. {
  1457. struct dp_peer *ml_peer;
  1458. struct dp_soc *soc;
  1459. /* MLO connection link peer */
  1460. if (IS_MLO_DP_LINK_PEER(peer)) {
  1461. ml_peer = peer->mld_peer;
  1462. soc = ml_peer->vdev->pdev->soc;
  1463. /* if last link peer deletion, delete MLD peer */
  1464. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1465. dp_peer_delete(soc, peer->mld_peer, NULL);
  1466. }
  1467. }
  1468. /**
  1469. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1470. * @soc: Soc handle
  1471. * @vdev_id: Vdev ID
  1472. * @peer_setup_info: peer setup information for MLO
  1473. */
  1474. QDF_STATUS dp_peer_mlo_setup(
  1475. struct dp_soc *soc,
  1476. struct dp_peer *peer,
  1477. uint8_t vdev_id,
  1478. struct cdp_peer_setup_info *setup_info);
  1479. /**
  1480. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1481. * @peer: datapath peer
  1482. *
  1483. * Return: MLD peer in case of MLO Link peer
  1484. * Peer itself in other cases
  1485. */
  1486. static inline
  1487. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1488. {
  1489. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1490. }
  1491. /**
  1492. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1493. * peer id
  1494. * @soc: core DP soc context
  1495. * @peer_id: peer id
  1496. * @mod_id: ID of module requesting reference
  1497. *
  1498. * Return: primary link peer for the MLO peer
  1499. * legacy peer itself in case of legacy peer
  1500. */
  1501. static inline
  1502. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1503. uint16_t peer_id,
  1504. enum dp_mod_id mod_id)
  1505. {
  1506. uint8_t i;
  1507. struct dp_mld_link_peers link_peers_info;
  1508. struct dp_peer *peer;
  1509. struct dp_peer *link_peer;
  1510. struct dp_peer *primary_peer = NULL;
  1511. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1512. if (!peer)
  1513. return NULL;
  1514. if (IS_MLO_DP_MLD_PEER(peer)) {
  1515. /* get link peers with reference */
  1516. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1517. mod_id);
  1518. for (i = 0; i < link_peers_info.num_links; i++) {
  1519. link_peer = link_peers_info.link_peers[i];
  1520. if (link_peer->primary_link) {
  1521. primary_peer = link_peer;
  1522. /*
  1523. * Take additional reference over
  1524. * primary link peer.
  1525. */
  1526. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1527. break;
  1528. }
  1529. }
  1530. /* release link peers reference */
  1531. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1532. dp_peer_unref_delete(peer, mod_id);
  1533. } else {
  1534. primary_peer = peer;
  1535. }
  1536. return primary_peer;
  1537. }
  1538. /**
  1539. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1540. * @peer: Datapath peer
  1541. *
  1542. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1543. * dp_txrx_peer from peer itself for other cases
  1544. */
  1545. static inline
  1546. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1547. {
  1548. return IS_MLO_DP_LINK_PEER(peer) ?
  1549. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1550. }
  1551. /**
  1552. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1553. * @peer: Datapath peer
  1554. *
  1555. * Return: true if peer is primary link peer or legacy peer
  1556. * false otherwise
  1557. */
  1558. static inline
  1559. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1560. {
  1561. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1562. return true;
  1563. else if (IS_DP_LEGACY_PEER(peer))
  1564. return true;
  1565. else
  1566. return false;
  1567. }
  1568. /**
  1569. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1570. *
  1571. * @soc : core DP soc context
  1572. * @peer_id : peer id from peer object can be retrieved
  1573. * @handle : reference handle
  1574. * @mod_id : ID ot module requesting reference
  1575. *
  1576. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1577. */
  1578. static inline struct dp_txrx_peer *
  1579. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1580. uint16_t peer_id,
  1581. dp_txrx_ref_handle *handle,
  1582. enum dp_mod_id mod_id)
  1583. {
  1584. struct dp_peer *peer;
  1585. struct dp_txrx_peer *txrx_peer;
  1586. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1587. if (!peer)
  1588. return NULL;
  1589. txrx_peer = dp_get_txrx_peer(peer);
  1590. if (txrx_peer) {
  1591. *handle = (dp_txrx_ref_handle)peer;
  1592. return txrx_peer;
  1593. }
  1594. dp_peer_unref_delete(peer, mod_id);
  1595. return NULL;
  1596. }
  1597. /**
  1598. * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
  1599. *
  1600. * @soc : core DP soc context
  1601. *
  1602. * Return: void
  1603. */
  1604. void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
  1605. #else
  1606. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1607. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1608. /* is legacy peer */
  1609. #define IS_DP_LEGACY_PEER(_peer) true
  1610. #define IS_MLO_DP_LINK_PEER(_peer) false
  1611. #define IS_MLO_DP_MLD_PEER(_peer) false
  1612. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1613. static inline
  1614. struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
  1615. struct cdp_peer_info *peer_info,
  1616. enum dp_mod_id mod_id)
  1617. {
  1618. return dp_peer_find_hash_find(soc, peer_info->mac_addr,
  1619. peer_info->mac_addr_is_aligned,
  1620. peer_info->vdev_id,
  1621. mod_id);
  1622. }
  1623. static inline
  1624. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1625. uint8_t *peer_mac,
  1626. int mac_addr_is_aligned,
  1627. uint8_t vdev_id,
  1628. enum dp_mod_id mod_id)
  1629. {
  1630. return dp_peer_find_hash_find(soc, peer_mac,
  1631. mac_addr_is_aligned, vdev_id,
  1632. mod_id);
  1633. }
  1634. static inline
  1635. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1636. uint16_t peer_id,
  1637. enum dp_mod_id mod_id)
  1638. {
  1639. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1640. }
  1641. static inline
  1642. QDF_STATUS dp_peer_mlo_setup(
  1643. struct dp_soc *soc,
  1644. struct dp_peer *peer,
  1645. uint8_t vdev_id,
  1646. struct cdp_peer_setup_info *setup_info)
  1647. {
  1648. return QDF_STATUS_SUCCESS;
  1649. }
  1650. static inline
  1651. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1652. {
  1653. }
  1654. static inline
  1655. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1656. {
  1657. }
  1658. static inline
  1659. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1660. {
  1661. }
  1662. static inline
  1663. void dp_peer_mlo_delete(struct dp_peer *peer)
  1664. {
  1665. }
  1666. static inline
  1667. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1668. struct dp_peer *link_peer)
  1669. {
  1670. }
  1671. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1672. {
  1673. return 0;
  1674. }
  1675. static inline struct dp_peer *
  1676. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1677. uint8_t *peer_mac_addr,
  1678. int mac_addr_is_aligned,
  1679. uint8_t vdev_id,
  1680. uint8_t chip_id,
  1681. enum dp_mod_id mod_id)
  1682. {
  1683. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1684. mac_addr_is_aligned,
  1685. vdev_id, mod_id);
  1686. }
  1687. static inline
  1688. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1689. {
  1690. return peer;
  1691. }
  1692. static inline
  1693. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1694. uint16_t peer_id,
  1695. enum dp_mod_id mod_id)
  1696. {
  1697. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1698. }
  1699. static inline
  1700. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1701. {
  1702. return peer->txrx_peer;
  1703. }
  1704. static inline
  1705. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1706. {
  1707. return true;
  1708. }
  1709. /**
  1710. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1711. *
  1712. * @soc : core DP soc context
  1713. * @peer_id : peer id from peer object can be retrieved
  1714. * @handle : reference handle
  1715. * @mod_id : ID ot module requesting reference
  1716. *
  1717. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1718. */
  1719. static inline struct dp_txrx_peer *
  1720. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1721. uint16_t peer_id,
  1722. dp_txrx_ref_handle *handle,
  1723. enum dp_mod_id mod_id)
  1724. {
  1725. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1726. }
  1727. static inline
  1728. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1729. uint8_t lmac_id)
  1730. {
  1731. return peer_id;
  1732. }
  1733. static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
  1734. {
  1735. }
  1736. #endif /* WLAN_FEATURE_11BE_MLO */
  1737. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  1738. /**
  1739. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  1740. * @soc: Soc handle
  1741. * @peer: DP peer handle for ML peer
  1742. * @peer_id: peer_id
  1743. * Return: None
  1744. */
  1745. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1746. struct dp_peer *peer,
  1747. uint16_t peer_id);
  1748. /**
  1749. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  1750. * @soc: Soc handle
  1751. * @peer_id: peer_id
  1752. * Return: None
  1753. */
  1754. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1755. uint16_t peer_id);
  1756. #else
  1757. static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1758. struct dp_peer *peer,
  1759. uint16_t peer_id)
  1760. {
  1761. }
  1762. static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1763. uint16_t peer_id)
  1764. {
  1765. }
  1766. #endif
  1767. static inline
  1768. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1769. {
  1770. uint8_t i;
  1771. if (IS_MLO_DP_MLD_PEER(peer)) {
  1772. dp_peer_info("skip for mld peer");
  1773. return QDF_STATUS_SUCCESS;
  1774. }
  1775. if (peer->rx_tid) {
  1776. QDF_BUG(0);
  1777. dp_peer_err("peer rx_tid mem already exist");
  1778. return QDF_STATUS_E_FAILURE;
  1779. }
  1780. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1781. sizeof(struct dp_rx_tid));
  1782. if (!peer->rx_tid) {
  1783. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1784. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1785. return QDF_STATUS_E_NOMEM;
  1786. }
  1787. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1788. for (i = 0; i < DP_MAX_TIDS; i++)
  1789. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1790. return QDF_STATUS_SUCCESS;
  1791. }
  1792. static inline
  1793. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1794. {
  1795. uint8_t i;
  1796. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1797. for (i = 0; i < DP_MAX_TIDS; i++)
  1798. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1799. qdf_mem_free(peer->rx_tid);
  1800. }
  1801. peer->rx_tid = NULL;
  1802. }
  1803. static inline
  1804. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1805. {
  1806. uint8_t i;
  1807. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1808. sizeof(struct dp_rx_tid_defrag));
  1809. for (i = 0; i < DP_MAX_TIDS; i++)
  1810. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1811. }
  1812. static inline
  1813. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1814. {
  1815. uint8_t i;
  1816. for (i = 0; i < DP_MAX_TIDS; i++)
  1817. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1818. }
  1819. #ifdef PEER_CACHE_RX_PKTS
  1820. static inline
  1821. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1822. {
  1823. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  1824. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  1825. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  1826. DP_RX_CACHED_BUFQ_THRESH);
  1827. }
  1828. static inline
  1829. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1830. {
  1831. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  1832. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  1833. }
  1834. #else
  1835. static inline
  1836. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1837. {
  1838. }
  1839. static inline
  1840. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1841. {
  1842. }
  1843. #endif
  1844. #ifdef REO_SHARED_QREF_TABLE_EN
  1845. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1846. struct dp_peer *peer);
  1847. #else
  1848. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1849. struct dp_peer *peer) {}
  1850. #endif
  1851. #endif /* _DP_PEER_H_ */