dp_peer.h 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_PEER_H_
  20. #define _DP_PEER_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include "dp_types.h"
  24. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  25. #include "hal_reo.h"
  26. #endif
  27. #define DP_INVALID_PEER_ID 0xffff
  28. #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
  29. #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
  30. #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
  31. #define DP_PEER_HASH_LOAD_MULT 2
  32. #define DP_PEER_HASH_LOAD_SHIFT 0
  33. /* Threshold for peer's cached buf queue beyond which frames are dropped */
  34. #define DP_RX_CACHED_BUFQ_THRESH 64
  35. #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
  36. #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
  37. #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
  38. #define dp_peer_info(params...) \
  39. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
  40. #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
  41. #ifdef REO_QDESC_HISTORY
  42. enum reo_qdesc_event_type {
  43. REO_QDESC_UPDATE_CB = 0,
  44. REO_QDESC_FREE,
  45. };
  46. struct reo_qdesc_event {
  47. qdf_dma_addr_t qdesc_addr;
  48. uint64_t ts;
  49. enum reo_qdesc_event_type type;
  50. uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
  51. };
  52. #endif
  53. struct ast_del_ctxt {
  54. bool age;
  55. int del_count;
  56. };
  57. typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
  58. void *arg);
  59. void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
  60. void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
  61. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  62. uint8_t *peer_mac_addr,
  63. int mac_addr_is_aligned,
  64. uint8_t vdev_id,
  65. enum dp_mod_id id);
  66. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
  67. /**
  68. * dp_peer_get_ref() - Returns peer object given the peer id
  69. *
  70. * @soc : core DP soc context
  71. * @peer : DP peer
  72. * @mod_id : id of module requesting the reference
  73. *
  74. * Return: QDF_STATUS_SUCCESS if reference held successfully
  75. * else QDF_STATUS_E_INVAL
  76. */
  77. static inline
  78. QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
  79. struct dp_peer *peer,
  80. enum dp_mod_id mod_id)
  81. {
  82. if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
  83. return QDF_STATUS_E_INVAL;
  84. if (mod_id > DP_MOD_ID_RX)
  85. qdf_atomic_inc(&peer->mod_refs[mod_id]);
  86. return QDF_STATUS_SUCCESS;
  87. }
  88. /**
  89. * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
  90. *
  91. * @soc : core DP soc context
  92. * @peer_id : peer id from peer object can be retrieved
  93. * @mod_id : module id
  94. *
  95. * Return: struct dp_peer*: Pointer to DP peer object
  96. */
  97. static inline struct dp_peer *
  98. __dp_peer_get_ref_by_id(struct dp_soc *soc,
  99. uint16_t peer_id,
  100. enum dp_mod_id mod_id)
  101. {
  102. struct dp_peer *peer;
  103. qdf_spin_lock_bh(&soc->peer_map_lock);
  104. peer = (peer_id >= soc->max_peer_id) ? NULL :
  105. soc->peer_id_to_obj_map[peer_id];
  106. if (!peer ||
  107. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  108. qdf_spin_unlock_bh(&soc->peer_map_lock);
  109. return NULL;
  110. }
  111. qdf_spin_unlock_bh(&soc->peer_map_lock);
  112. return peer;
  113. }
  114. /**
  115. * dp_peer_get_ref_by_id() - Returns peer object given the peer id
  116. * if peer state is active
  117. *
  118. * @soc : core DP soc context
  119. * @peer_id : peer id from peer object can be retrieved
  120. * @mod_id : ID ot module requesting reference
  121. *
  122. * Return: struct dp_peer*: Pointer to DP peer object
  123. */
  124. static inline
  125. struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
  126. uint16_t peer_id,
  127. enum dp_mod_id mod_id)
  128. {
  129. struct dp_peer *peer;
  130. qdf_spin_lock_bh(&soc->peer_map_lock);
  131. peer = (peer_id >= soc->max_peer_id) ? NULL :
  132. soc->peer_id_to_obj_map[peer_id];
  133. if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
  134. (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
  135. qdf_spin_unlock_bh(&soc->peer_map_lock);
  136. return NULL;
  137. }
  138. qdf_spin_unlock_bh(&soc->peer_map_lock);
  139. return peer;
  140. }
  141. /**
  142. * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
  143. *
  144. * @soc : core DP soc context
  145. * @peer_id : peer id from peer object can be retrieved
  146. * @handle : reference handle
  147. * @mod_id : ID ot module requesting reference
  148. *
  149. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  150. */
  151. static inline struct dp_txrx_peer *
  152. dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  153. uint16_t peer_id,
  154. dp_txrx_ref_handle *handle,
  155. enum dp_mod_id mod_id)
  156. {
  157. struct dp_peer *peer;
  158. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  159. if (!peer)
  160. return NULL;
  161. if (!peer->txrx_peer) {
  162. dp_peer_unref_delete(peer, mod_id);
  163. return NULL;
  164. }
  165. *handle = (dp_txrx_ref_handle)peer;
  166. return peer->txrx_peer;
  167. }
  168. #ifdef PEER_CACHE_RX_PKTS
  169. /**
  170. * dp_rx_flush_rx_cached() - flush cached rx frames
  171. * @peer: peer
  172. * @drop: set flag to drop frames
  173. *
  174. * Return: None
  175. */
  176. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
  177. #else
  178. static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  179. {
  180. }
  181. #endif
  182. static inline void
  183. dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
  184. {
  185. qdf_spin_lock_bh(&peer->peer_info_lock);
  186. peer->state = OL_TXRX_PEER_STATE_DISC;
  187. qdf_spin_unlock_bh(&peer->peer_info_lock);
  188. dp_rx_flush_rx_cached(peer, true);
  189. }
  190. /**
  191. * dp_vdev_iterate_peer() - API to iterate through vdev peer list
  192. *
  193. * @vdev : DP vdev context
  194. * @func : function to be called for each peer
  195. * @arg : argument need to be passed to func
  196. * @mod_id : module_id
  197. *
  198. * Return: void
  199. */
  200. static inline void
  201. dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
  202. enum dp_mod_id mod_id)
  203. {
  204. struct dp_peer *peer;
  205. struct dp_peer *tmp_peer;
  206. struct dp_soc *soc = NULL;
  207. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  208. return;
  209. soc = vdev->pdev->soc;
  210. qdf_spin_lock_bh(&vdev->peer_list_lock);
  211. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  212. peer_list_elem,
  213. tmp_peer) {
  214. if (dp_peer_get_ref(soc, peer, mod_id) ==
  215. QDF_STATUS_SUCCESS) {
  216. (*func)(soc, peer, arg);
  217. dp_peer_unref_delete(peer, mod_id);
  218. }
  219. }
  220. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  221. }
  222. /**
  223. * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
  224. *
  225. * @pdev : DP pdev context
  226. * @func : function to be called for each peer
  227. * @arg : argument need to be passed to func
  228. * @mod_id : module_id
  229. *
  230. * Return: void
  231. */
  232. static inline void
  233. dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
  234. enum dp_mod_id mod_id)
  235. {
  236. struct dp_vdev *vdev;
  237. if (!pdev)
  238. return;
  239. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  240. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
  241. dp_vdev_iterate_peer(vdev, func, arg, mod_id);
  242. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  243. }
  244. /**
  245. * dp_soc_iterate_peer() - API to iterate through all peers of soc
  246. *
  247. * @soc : DP soc context
  248. * @func : function to be called for each peer
  249. * @arg : argument need to be passed to func
  250. * @mod_id : module_id
  251. *
  252. * Return: void
  253. */
  254. static inline void
  255. dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
  256. enum dp_mod_id mod_id)
  257. {
  258. struct dp_pdev *pdev;
  259. int i;
  260. if (!soc)
  261. return;
  262. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  263. pdev = soc->pdev_list[i];
  264. dp_pdev_iterate_peer(pdev, func, arg, mod_id);
  265. }
  266. }
  267. /**
  268. * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
  269. *
  270. * This API will cache the peers in local allocated memory and calls
  271. * iterate function outside the lock.
  272. *
  273. * As this API is allocating new memory it is suggested to use this
  274. * only when lock cannot be held
  275. *
  276. * @vdev : DP vdev context
  277. * @func : function to be called for each peer
  278. * @arg : argument need to be passed to func
  279. * @mod_id : module_id
  280. *
  281. * Return: void
  282. */
  283. static inline void
  284. dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
  285. dp_peer_iter_func *func,
  286. void *arg,
  287. enum dp_mod_id mod_id)
  288. {
  289. struct dp_peer *peer;
  290. struct dp_peer *tmp_peer;
  291. struct dp_soc *soc = NULL;
  292. struct dp_peer **peer_array = NULL;
  293. int i = 0;
  294. uint32_t num_peers = 0;
  295. if (!vdev || !vdev->pdev || !vdev->pdev->soc)
  296. return;
  297. num_peers = vdev->num_peers;
  298. soc = vdev->pdev->soc;
  299. peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
  300. if (!peer_array)
  301. return;
  302. qdf_spin_lock_bh(&vdev->peer_list_lock);
  303. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  304. peer_list_elem,
  305. tmp_peer) {
  306. if (i >= num_peers)
  307. break;
  308. if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
  309. peer_array[i] = peer;
  310. i = (i + 1);
  311. }
  312. }
  313. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  314. for (i = 0; i < num_peers; i++) {
  315. peer = peer_array[i];
  316. if (!peer)
  317. continue;
  318. (*func)(soc, peer, arg);
  319. dp_peer_unref_delete(peer, mod_id);
  320. }
  321. qdf_mem_free(peer_array);
  322. }
  323. /**
  324. * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
  325. *
  326. * This API will cache the peers in local allocated memory and calls
  327. * iterate function outside the lock.
  328. *
  329. * As this API is allocating new memory it is suggested to use this
  330. * only when lock cannot be held
  331. *
  332. * @pdev : DP pdev context
  333. * @func : function to be called for each peer
  334. * @arg : argument need to be passed to func
  335. * @mod_id : module_id
  336. *
  337. * Return: void
  338. */
  339. static inline void
  340. dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
  341. dp_peer_iter_func *func,
  342. void *arg,
  343. enum dp_mod_id mod_id)
  344. {
  345. struct dp_peer *peer;
  346. struct dp_peer *tmp_peer;
  347. struct dp_soc *soc = NULL;
  348. struct dp_vdev *vdev = NULL;
  349. struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
  350. int i = 0;
  351. int j = 0;
  352. uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
  353. if (!pdev || !pdev->soc)
  354. return;
  355. soc = pdev->soc;
  356. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  357. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  358. num_peers[i] = vdev->num_peers;
  359. peer_array[i] = qdf_mem_malloc(num_peers[i] *
  360. sizeof(struct dp_peer *));
  361. if (!peer_array[i])
  362. break;
  363. qdf_spin_lock_bh(&vdev->peer_list_lock);
  364. TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
  365. peer_list_elem,
  366. tmp_peer) {
  367. if (j >= num_peers[i])
  368. break;
  369. if (dp_peer_get_ref(soc, peer, mod_id) ==
  370. QDF_STATUS_SUCCESS) {
  371. peer_array[i][j] = peer;
  372. j = (j + 1);
  373. }
  374. }
  375. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  376. i = (i + 1);
  377. }
  378. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  379. for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
  380. if (!peer_array[i])
  381. break;
  382. for (j = 0; j < num_peers[i]; j++) {
  383. peer = peer_array[i][j];
  384. if (!peer)
  385. continue;
  386. (*func)(soc, peer, arg);
  387. dp_peer_unref_delete(peer, mod_id);
  388. }
  389. qdf_mem_free(peer_array[i]);
  390. }
  391. }
  392. /**
  393. * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
  394. *
  395. * This API will cache the peers in local allocated memory and calls
  396. * iterate function outside the lock.
  397. *
  398. * As this API is allocating new memory it is suggested to use this
  399. * only when lock cannot be held
  400. *
  401. * @soc : DP soc context
  402. * @func : function to be called for each peer
  403. * @arg : argument need to be passed to func
  404. * @mod_id : module_id
  405. *
  406. * Return: void
  407. */
  408. static inline void
  409. dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
  410. dp_peer_iter_func *func,
  411. void *arg,
  412. enum dp_mod_id mod_id)
  413. {
  414. struct dp_pdev *pdev;
  415. int i;
  416. if (!soc)
  417. return;
  418. for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
  419. pdev = soc->pdev_list[i];
  420. dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
  421. }
  422. }
  423. #ifdef DP_PEER_STATE_DEBUG
  424. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  425. do { \
  426. if (!(_condition)) { \
  427. dp_alert("Invalid state shift from %u to %u peer " \
  428. QDF_MAC_ADDR_FMT, \
  429. (_peer)->peer_state, (_new_state), \
  430. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  431. QDF_ASSERT(0); \
  432. } \
  433. } while (0)
  434. #else
  435. #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
  436. do { \
  437. if (!(_condition)) { \
  438. dp_alert("Invalid state shift from %u to %u peer " \
  439. QDF_MAC_ADDR_FMT, \
  440. (_peer)->peer_state, (_new_state), \
  441. QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
  442. } \
  443. } while (0)
  444. #endif
  445. /**
  446. * dp_peer_state_cmp() - compare dp peer state
  447. *
  448. * @peer : DP peer
  449. * @state : state
  450. *
  451. * Return: true if state matches with peer state
  452. * false if it does not match
  453. */
  454. static inline bool
  455. dp_peer_state_cmp(struct dp_peer *peer,
  456. enum dp_peer_state state)
  457. {
  458. bool is_status_equal = false;
  459. qdf_spin_lock_bh(&peer->peer_state_lock);
  460. is_status_equal = (peer->peer_state == state);
  461. qdf_spin_unlock_bh(&peer->peer_state_lock);
  462. return is_status_equal;
  463. }
  464. /**
  465. * dp_peer_update_state() - update dp peer state
  466. *
  467. * @soc : core DP soc context
  468. * @peer : DP peer
  469. * @state : new state
  470. *
  471. * Return: None
  472. */
  473. static inline void
  474. dp_peer_update_state(struct dp_soc *soc,
  475. struct dp_peer *peer,
  476. enum dp_peer_state state)
  477. {
  478. uint8_t peer_state;
  479. qdf_spin_lock_bh(&peer->peer_state_lock);
  480. peer_state = peer->peer_state;
  481. switch (state) {
  482. case DP_PEER_STATE_INIT:
  483. DP_PEER_STATE_ASSERT
  484. (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) ||
  485. (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
  486. break;
  487. case DP_PEER_STATE_ACTIVE:
  488. DP_PEER_STATE_ASSERT(peer, state,
  489. (peer_state == DP_PEER_STATE_INIT));
  490. break;
  491. case DP_PEER_STATE_LOGICAL_DELETE:
  492. DP_PEER_STATE_ASSERT(peer, state,
  493. (peer_state == DP_PEER_STATE_ACTIVE) ||
  494. (peer_state == DP_PEER_STATE_INIT));
  495. break;
  496. case DP_PEER_STATE_INACTIVE:
  497. DP_PEER_STATE_ASSERT
  498. (peer, state,
  499. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  500. break;
  501. case DP_PEER_STATE_FREED:
  502. if (peer->sta_self_peer)
  503. DP_PEER_STATE_ASSERT
  504. (peer, state, (peer_state == DP_PEER_STATE_INIT));
  505. else
  506. DP_PEER_STATE_ASSERT
  507. (peer, state,
  508. (peer_state == DP_PEER_STATE_INACTIVE) ||
  509. (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
  510. break;
  511. default:
  512. qdf_spin_unlock_bh(&peer->peer_state_lock);
  513. dp_alert("Invalid peer state %u for peer "QDF_MAC_ADDR_FMT,
  514. state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  515. return;
  516. }
  517. peer->peer_state = state;
  518. qdf_spin_unlock_bh(&peer->peer_state_lock);
  519. dp_info("Updating peer state from %u to %u mac "QDF_MAC_ADDR_FMT"\n",
  520. peer_state, state,
  521. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  522. }
  523. void dp_print_ast_stats(struct dp_soc *soc);
  524. QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  525. uint16_t hw_peer_id, uint8_t vdev_id,
  526. uint8_t *peer_mac_addr, uint16_t ast_hash,
  527. uint8_t is_wds);
  528. void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  529. uint8_t vdev_id, uint8_t *peer_mac_addr,
  530. uint8_t is_wds, uint32_t free_wds_count);
  531. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  532. /**
  533. * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
  534. * @soc - dp soc pointer
  535. * @vdev_id - vdev id
  536. * @peer_mac_addr - mac address of the peer
  537. *
  538. * This function resets the roamed peer auth status and mac address
  539. * after peer map indication of same peer is received from firmware.
  540. *
  541. * Return: None
  542. */
  543. void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  544. uint8_t *peer_mac_addr);
  545. #else
  546. static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
  547. uint8_t *peer_mac_addr)
  548. {
  549. }
  550. #endif
  551. #ifdef WLAN_FEATURE_11BE_MLO
  552. /**
  553. * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
  554. * @soc_handle - genereic soc handle
  555. * @peer_id - ML peer_id from firmware
  556. * @peer_mac_addr - mac address of the peer
  557. * @mlo_ast_flow_info: MLO AST flow info
  558. * @mlo_link_info - MLO link info
  559. *
  560. * associate the ML peer_id that firmware provided with peer entry
  561. * and update the ast table in the host with the hw_peer_id.
  562. *
  563. * Return: QDF_STATUS code
  564. */
  565. QDF_STATUS
  566. dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  567. uint8_t *peer_mac_addr,
  568. struct dp_mlo_flow_override_info *mlo_flow_info,
  569. struct dp_mlo_link_info *mlo_link_info);
  570. /**
  571. * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
  572. * @soc_handle - genereic soc handle
  573. * @peeri_id - peer_id from firmware
  574. *
  575. * Return: none
  576. */
  577. void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
  578. #endif
  579. void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  580. enum cdp_sec_type sec_type, int is_unicast,
  581. u_int32_t *michael_key, u_int32_t *rx_pn);
  582. QDF_STATUS dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  583. uint8_t tid, uint16_t win_sz);
  584. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  585. uint16_t peer_id, uint8_t *peer_mac);
  586. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  587. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  588. uint32_t flags);
  589. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
  590. void dp_peer_ast_unmap_handler(struct dp_soc *soc,
  591. struct dp_ast_entry *ast_entry);
  592. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  593. struct dp_ast_entry *ast_entry, uint32_t flags);
  594. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  595. uint8_t *ast_mac_addr,
  596. uint8_t pdev_id);
  597. struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
  598. uint8_t *ast_mac_addr,
  599. uint8_t vdev_id);
  600. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  601. uint8_t *ast_mac_addr);
  602. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  603. struct dp_ast_entry *ast_entry);
  604. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  605. struct dp_ast_entry *ast_entry);
  606. void dp_peer_ast_set_type(struct dp_soc *soc,
  607. struct dp_ast_entry *ast_entry,
  608. enum cdp_txrx_ast_entry_type type);
  609. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  610. struct dp_ast_entry *ast_entry,
  611. struct dp_peer *peer);
  612. #ifdef WLAN_FEATURE_MULTI_AST_DEL
  613. void dp_peer_ast_send_multi_wds_del(
  614. struct dp_soc *soc, uint8_t vdev_id,
  615. struct peer_del_multi_wds_entries *wds_list);
  616. #endif
  617. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  618. struct cdp_soc *dp_soc,
  619. void *cookie,
  620. enum cdp_ast_free_status status);
  621. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  622. struct dp_ast_entry *ase);
  623. void dp_peer_free_ast_entry(struct dp_soc *soc,
  624. struct dp_ast_entry *ast_entry);
  625. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  626. struct dp_ast_entry *ast_entry,
  627. struct dp_peer *peer);
  628. /**
  629. * dp_peer_mec_detach_entry() - Detach the MEC entry
  630. * @soc: SoC handle
  631. * @mecentry: MEC entry of the node
  632. * @ptr: pointer to free list
  633. *
  634. * The MEC entry is detached from MEC table and added to free_list
  635. * to free the object outside lock
  636. *
  637. * Return: None
  638. */
  639. void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
  640. void *ptr);
  641. /**
  642. * dp_peer_mec_free_list() - free the MEC entry from free_list
  643. * @soc: SoC handle
  644. * @ptr: pointer to free list
  645. *
  646. * Return: None
  647. */
  648. void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
  649. /**
  650. * dp_peer_mec_add_entry()
  651. * @soc: SoC handle
  652. * @vdev: vdev to which mec node belongs
  653. * @mac_addr: MAC address of mec node
  654. *
  655. * This function allocates and adds MEC entry to MEC table.
  656. * It assumes caller has taken the mec lock to protect the access to these
  657. * tables
  658. *
  659. * Return: QDF_STATUS
  660. */
  661. QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
  662. struct dp_vdev *vdev,
  663. uint8_t *mac_addr);
  664. /**
  665. * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by MAC address
  666. * within pdev
  667. * @soc: SoC handle
  668. *
  669. * It assumes caller has taken the mec_lock to protect the access to
  670. * MEC hash table
  671. *
  672. * Return: MEC entry
  673. */
  674. struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
  675. uint8_t pdev_id,
  676. uint8_t *mec_mac_addr);
  677. #define DP_AST_ASSERT(_condition) \
  678. do { \
  679. if (!(_condition)) { \
  680. dp_print_ast_stats(soc);\
  681. QDF_BUG(_condition); \
  682. } \
  683. } while (0)
  684. /**
  685. * dp_peer_update_inactive_time - Update inactive time for peer
  686. * @pdev: pdev object
  687. * @tag_type: htt_tlv_tag type
  688. * #tag_buf: buf message
  689. */
  690. void
  691. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  692. uint32_t *tag_buf);
  693. #ifndef QCA_MULTIPASS_SUPPORT
  694. /**
  695. * dp_peer_set_vlan_id: set vlan_id for this peer
  696. * @cdp_soc: soc handle
  697. * @vdev_id: id of vdev object
  698. * @peer_mac: mac address
  699. * @vlan_id: vlan id for peer
  700. *
  701. * return: void
  702. */
  703. static inline
  704. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  705. uint8_t vdev_id, uint8_t *peer_mac,
  706. uint16_t vlan_id)
  707. {
  708. }
  709. /**
  710. * dp_set_vlan_groupkey: set vlan map for vdev
  711. * @soc: pointer to soc
  712. * @vdev_id: id of vdev handle
  713. * @vlan_id: vlan_id
  714. * @group_key: group key for vlan
  715. *
  716. * return: set success/failure
  717. */
  718. static inline
  719. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  720. uint16_t vlan_id, uint16_t group_key)
  721. {
  722. return QDF_STATUS_SUCCESS;
  723. }
  724. /**
  725. * dp_peer_multipass_list_init: initialize multipass peer list
  726. * @vdev: pointer to vdev
  727. *
  728. * return: void
  729. */
  730. static inline
  731. void dp_peer_multipass_list_init(struct dp_vdev *vdev)
  732. {
  733. }
  734. /**
  735. * dp_peer_multipass_list_remove: remove peer from special peer list
  736. * @peer: peer handle
  737. *
  738. * return: void
  739. */
  740. static inline
  741. void dp_peer_multipass_list_remove(struct dp_peer *peer)
  742. {
  743. }
  744. #else
  745. void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
  746. uint8_t vdev_id, uint8_t *peer_mac,
  747. uint16_t vlan_id);
  748. QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
  749. uint16_t vlan_id, uint16_t group_key);
  750. void dp_peer_multipass_list_init(struct dp_vdev *vdev);
  751. void dp_peer_multipass_list_remove(struct dp_peer *peer);
  752. #endif
  753. #ifndef QCA_PEER_MULTIQ_SUPPORT
  754. /**
  755. * dp_peer_reset_flowq_map() - reset peer flowq map table
  756. * @peer - dp peer handle
  757. *
  758. * Return: none
  759. */
  760. static inline
  761. void dp_peer_reset_flowq_map(struct dp_peer *peer)
  762. {
  763. }
  764. /**
  765. * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
  766. * @soc - genereic soc handle
  767. * @is_wds - flag to indicate if peer is wds
  768. * @peer_id - peer_id from htt peer map message
  769. * @peer_mac_addr - mac address of the peer
  770. * @ast_info - ast flow override information from peer map
  771. *
  772. * Return: none
  773. */
  774. static inline
  775. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  776. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  777. struct dp_ast_flow_override_info *ast_info)
  778. {
  779. }
  780. #else
  781. void dp_peer_reset_flowq_map(struct dp_peer *peer);
  782. void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
  783. bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
  784. struct dp_ast_flow_override_info *ast_info);
  785. #endif
  786. /*
  787. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  788. * after deleting the entries (ie., setting valid=0)
  789. *
  790. * @soc: DP SOC handle
  791. * @cb_ctxt: Callback context
  792. * @reo_status: REO command status
  793. */
  794. void dp_rx_tid_delete_cb(struct dp_soc *soc,
  795. void *cb_ctxt,
  796. union hal_reo_status *reo_status);
  797. #ifdef QCA_PEER_EXT_STATS
  798. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  799. struct dp_txrx_peer *txrx_peer);
  800. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  801. struct dp_txrx_peer *txrx_peer);
  802. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  803. #else
  804. static inline
  805. QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
  806. struct dp_txrx_peer *txrx_peer)
  807. {
  808. return QDF_STATUS_SUCCESS;
  809. }
  810. static inline
  811. void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
  812. struct dp_txrx_peer *txrx_peer)
  813. {
  814. }
  815. static inline
  816. void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  817. {
  818. }
  819. #endif
  820. #ifdef WLAN_PEER_JITTER
  821. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  822. struct dp_txrx_peer *txrx_peer);
  823. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  824. struct dp_txrx_peer *txrx_peer);
  825. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
  826. #else
  827. static inline
  828. QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
  829. struct dp_txrx_peer *txrx_peer)
  830. {
  831. return QDF_STATUS_SUCCESS;
  832. }
  833. static inline
  834. void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
  835. struct dp_txrx_peer *txrx_peer)
  836. {
  837. }
  838. static inline
  839. void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
  840. {
  841. }
  842. #endif
  843. #ifndef CONFIG_SAWF_DEF_QUEUES
  844. static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
  845. struct dp_peer *peer)
  846. {
  847. return QDF_STATUS_SUCCESS;
  848. }
  849. static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
  850. struct dp_peer *peer)
  851. {
  852. return QDF_STATUS_SUCCESS;
  853. }
  854. #endif
  855. #ifndef CONFIG_SAWF
  856. static inline
  857. QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
  858. struct dp_txrx_peer *txrx_peer)
  859. {
  860. return QDF_STATUS_SUCCESS;
  861. }
  862. static inline
  863. QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
  864. struct dp_txrx_peer *txrx_peer)
  865. {
  866. return QDF_STATUS_SUCCESS;
  867. }
  868. #endif
  869. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  870. struct dp_vdev *vdev,
  871. enum dp_mod_id mod_id);
  872. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  873. struct dp_vdev *vdev,
  874. enum dp_mod_id mod_id);
  875. void dp_peer_ast_table_detach(struct dp_soc *soc);
  876. void dp_peer_find_map_detach(struct dp_soc *soc);
  877. void dp_soc_wds_detach(struct dp_soc *soc);
  878. QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
  879. QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
  880. QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
  881. void dp_soc_wds_attach(struct dp_soc *soc);
  882. void dp_peer_mec_hash_detach(struct dp_soc *soc);
  883. void dp_peer_ast_hash_detach(struct dp_soc *soc);
  884. #ifdef FEATURE_AST
  885. /*
  886. * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
  887. * @soc - datapath soc handle
  888. * @peer - datapath peer handle
  889. *
  890. * Delete the AST entries belonging to a peer
  891. */
  892. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  893. struct dp_peer *peer)
  894. {
  895. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  896. dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
  897. /*
  898. * Delete peer self ast entry. This is done to handle scenarios
  899. * where peer is freed before peer map is received(for ex in case
  900. * of auth disallow due to ACL) in such cases self ast is not added
  901. * to peer->ast_list.
  902. */
  903. if (peer->self_ast_entry) {
  904. dp_peer_del_ast(soc, peer->self_ast_entry);
  905. peer->self_ast_entry = NULL;
  906. }
  907. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
  908. dp_peer_del_ast(soc, ast_entry);
  909. }
  910. #else
  911. static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
  912. struct dp_peer *peer)
  913. {
  914. }
  915. #endif
  916. #ifdef FEATURE_MEC
  917. /**
  918. * dp_peer_mec_spinlock_create() - Create the MEC spinlock
  919. * @soc: SoC handle
  920. *
  921. * Return: none
  922. */
  923. void dp_peer_mec_spinlock_create(struct dp_soc *soc);
  924. /**
  925. * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
  926. * @soc: SoC handle
  927. *
  928. * Return: none
  929. */
  930. void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
  931. /**
  932. * dp_peer_mec_flush_entries() - Delete all mec entries in table
  933. * @soc: Datapath SOC
  934. *
  935. * Return: None
  936. */
  937. void dp_peer_mec_flush_entries(struct dp_soc *soc);
  938. #else
  939. static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
  940. {
  941. }
  942. static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
  943. {
  944. }
  945. static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
  946. {
  947. }
  948. #endif
  949. #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
  950. /**
  951. * dp_send_cache_flush_for_rx_tid() - Send cache flush cmd to REO per tid
  952. * @soc : dp_soc handle
  953. * @peer: peer
  954. *
  955. * This function is used to send cache flush cmd to reo and
  956. * to register the callback to handle the dumping of the reo
  957. * queue stas from DDR
  958. *
  959. * Return: none
  960. */
  961. void dp_send_cache_flush_for_rx_tid(
  962. struct dp_soc *soc, struct dp_peer *peer);
  963. /**
  964. * dp_get_rx_reo_queue_info() - Handler to get rx tid info
  965. * @soc : cdp_soc_t handle
  966. * @vdev_id: vdev id
  967. *
  968. * Handler to get rx tid info from DDR after h/w cache is
  969. * invalidated first using the cache flush cmd.
  970. *
  971. * Return: none
  972. */
  973. void dp_get_rx_reo_queue_info(
  974. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  975. /**
  976. * dp_dump_rx_reo_queue_info() - Callback function to dump reo queue stats
  977. * @soc : dp_soc handle
  978. * @cb_ctxt - callback context
  979. * @reo_status: vdev id
  980. *
  981. * This is the callback function registered after sending the reo cmd
  982. * to flush the h/w cache and invalidate it. In the callback the reo
  983. * queue desc info is dumped from DDR.
  984. *
  985. * Return: none
  986. */
  987. void dp_dump_rx_reo_queue_info(
  988. struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status);
  989. #else /* DUMP_REO_QUEUE_INFO_IN_DDR */
  990. static inline void dp_get_rx_reo_queue_info(
  991. struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  992. {
  993. }
  994. #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
  995. static inline int dp_peer_find_mac_addr_cmp(
  996. union dp_align_mac_addr *mac_addr1,
  997. union dp_align_mac_addr *mac_addr2)
  998. {
  999. /*
  1000. * Intentionally use & rather than &&.
  1001. * because the operands are binary rather than generic boolean,
  1002. * the functionality is equivalent.
  1003. * Using && has the advantage of short-circuited evaluation,
  1004. * but using & has the advantage of no conditional branching,
  1005. * which is a more significant benefit.
  1006. */
  1007. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  1008. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  1009. }
  1010. /**
  1011. * dp_peer_delete() - delete DP peer
  1012. *
  1013. * @soc: Datatpath soc
  1014. * @peer: Datapath peer
  1015. * @arg: argument to iter function
  1016. *
  1017. * Return: void
  1018. */
  1019. void dp_peer_delete(struct dp_soc *soc,
  1020. struct dp_peer *peer,
  1021. void *arg);
  1022. #ifdef WLAN_FEATURE_11BE_MLO
  1023. /* is MLO connection mld peer */
  1024. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
  1025. /* set peer type */
  1026. #define DP_PEER_SET_TYPE(_peer, _type_val) \
  1027. ((_peer)->peer_type = (_type_val))
  1028. /* is legacy peer */
  1029. #define IS_DP_LEGACY_PEER(_peer) \
  1030. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
  1031. /* is MLO connection link peer */
  1032. #define IS_MLO_DP_LINK_PEER(_peer) \
  1033. ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
  1034. /* is MLO connection mld peer */
  1035. #define IS_MLO_DP_MLD_PEER(_peer) \
  1036. ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
  1037. /* Get Mld peer from link peer */
  1038. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
  1039. ((link_peer)->mld_peer)
  1040. #ifdef WLAN_MLO_MULTI_CHIP
  1041. uint8_t dp_mlo_get_chip_id(struct dp_soc *soc);
  1042. struct dp_peer *
  1043. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1044. uint8_t *peer_mac_addr,
  1045. int mac_addr_is_aligned,
  1046. uint8_t vdev_id,
  1047. uint8_t chip_id,
  1048. enum dp_mod_id mod_id);
  1049. #else
  1050. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1051. {
  1052. return 0;
  1053. }
  1054. static inline struct dp_peer *
  1055. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1056. uint8_t *peer_mac_addr,
  1057. int mac_addr_is_aligned,
  1058. uint8_t vdev_id,
  1059. uint8_t chip_id,
  1060. enum dp_mod_id mod_id)
  1061. {
  1062. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1063. mac_addr_is_aligned,
  1064. vdev_id, mod_id);
  1065. }
  1066. #endif
  1067. /**
  1068. * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
  1069. increase mld peer ref_cnt
  1070. * @link_peer: link peer pointer
  1071. * @mld_peer: mld peer pointer
  1072. *
  1073. * Return: none
  1074. */
  1075. static inline
  1076. void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
  1077. struct dp_peer *mld_peer)
  1078. {
  1079. /* increase mld_peer ref_cnt */
  1080. dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
  1081. link_peer->mld_peer = mld_peer;
  1082. }
  1083. /**
  1084. * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
  1085. decrease mld peer ref_cnt
  1086. * @link_peer: link peer pointer
  1087. *
  1088. * Return: None
  1089. */
  1090. static inline
  1091. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1092. {
  1093. dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
  1094. link_peer->mld_peer = NULL;
  1095. }
  1096. /**
  1097. * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
  1098. * @mld_peer: mld peer pointer
  1099. *
  1100. * Return: None
  1101. */
  1102. static inline
  1103. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1104. {
  1105. int i;
  1106. qdf_spinlock_create(&mld_peer->link_peers_info_lock);
  1107. mld_peer->num_links = 0;
  1108. for (i = 0; i < DP_MAX_MLO_LINKS; i++)
  1109. mld_peer->link_peers[i].is_valid = false;
  1110. }
  1111. /**
  1112. * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
  1113. * @mld_peer: mld peer pointer
  1114. *
  1115. * Return: None
  1116. */
  1117. static inline
  1118. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1119. {
  1120. qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
  1121. }
  1122. /**
  1123. * dp_mld_peer_add_link_peer() - add link peer info to mld peer
  1124. * @mld_peer: mld dp peer pointer
  1125. * @link_peer: link dp peer pointer
  1126. *
  1127. * Return: None
  1128. */
  1129. static inline
  1130. void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
  1131. struct dp_peer *link_peer)
  1132. {
  1133. int i;
  1134. struct dp_peer_link_info *link_peer_info;
  1135. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1136. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1137. link_peer_info = &mld_peer->link_peers[i];
  1138. if (!link_peer_info->is_valid) {
  1139. qdf_mem_copy(link_peer_info->mac_addr.raw,
  1140. link_peer->mac_addr.raw,
  1141. QDF_MAC_ADDR_SIZE);
  1142. link_peer_info->is_valid = true;
  1143. link_peer_info->vdev_id = link_peer->vdev->vdev_id;
  1144. link_peer_info->chip_id =
  1145. dp_mlo_get_chip_id(link_peer->vdev->pdev->soc);
  1146. mld_peer->num_links++;
  1147. break;
  1148. }
  1149. }
  1150. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1151. if (i == DP_MAX_MLO_LINKS)
  1152. dp_err("fail to add link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1153. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1154. }
  1155. /**
  1156. * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
  1157. * @mld_peer: MLD dp peer pointer
  1158. * @link_peer: link dp peer pointer
  1159. *
  1160. * Return: number of links left after deletion
  1161. */
  1162. static inline
  1163. uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
  1164. struct dp_peer *link_peer)
  1165. {
  1166. int i;
  1167. struct dp_peer_link_info *link_peer_info;
  1168. uint8_t num_links;
  1169. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1170. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1171. link_peer_info = &mld_peer->link_peers[i];
  1172. if (link_peer_info->is_valid &&
  1173. !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
  1174. &link_peer_info->mac_addr)) {
  1175. link_peer_info->is_valid = false;
  1176. mld_peer->num_links--;
  1177. break;
  1178. }
  1179. }
  1180. num_links = mld_peer->num_links;
  1181. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1182. if (i == DP_MAX_MLO_LINKS)
  1183. dp_err("fail to del link peer" QDF_MAC_ADDR_FMT "to mld peer",
  1184. QDF_MAC_ADDR_REF(link_peer->mac_addr.raw));
  1185. return num_links;
  1186. }
  1187. /**
  1188. * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
  1189. increase link peers ref_cnt
  1190. * @soc: dp_soc handle
  1191. * @mld_peer: dp mld peer pointer
  1192. * @mld_link_peers: structure that hold links peers ponter array and number
  1193. * @mod_id: id of module requesting reference
  1194. *
  1195. * Return: None
  1196. */
  1197. static inline
  1198. void dp_get_link_peers_ref_from_mld_peer(
  1199. struct dp_soc *soc,
  1200. struct dp_peer *mld_peer,
  1201. struct dp_mld_link_peers *mld_link_peers,
  1202. enum dp_mod_id mod_id)
  1203. {
  1204. struct dp_peer *peer;
  1205. uint8_t i = 0, j = 0;
  1206. struct dp_peer_link_info *link_peer_info;
  1207. qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
  1208. qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
  1209. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  1210. link_peer_info = &mld_peer->link_peers[i];
  1211. if (link_peer_info->is_valid) {
  1212. peer = dp_link_peer_hash_find_by_chip_id(
  1213. soc,
  1214. link_peer_info->mac_addr.raw,
  1215. true,
  1216. link_peer_info->vdev_id,
  1217. link_peer_info->chip_id,
  1218. mod_id);
  1219. if (peer)
  1220. mld_link_peers->link_peers[j++] = peer;
  1221. }
  1222. }
  1223. qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
  1224. mld_link_peers->num_links = j;
  1225. }
  1226. /**
  1227. * dp_release_link_peers_ref() - release all link peers reference
  1228. * @mld_link_peers: structure that hold links peers ponter array and number
  1229. * @mod_id: id of module requesting reference
  1230. *
  1231. * Return: None.
  1232. */
  1233. static inline
  1234. void dp_release_link_peers_ref(
  1235. struct dp_mld_link_peers *mld_link_peers,
  1236. enum dp_mod_id mod_id)
  1237. {
  1238. struct dp_peer *peer;
  1239. uint8_t i;
  1240. for (i = 0; i < mld_link_peers->num_links; i++) {
  1241. peer = mld_link_peers->link_peers[i];
  1242. if (peer)
  1243. dp_peer_unref_delete(peer, mod_id);
  1244. mld_link_peers->link_peers[i] = NULL;
  1245. }
  1246. mld_link_peers->num_links = 0;
  1247. }
  1248. /**
  1249. * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
  1250. * @soc: Datapath soc handle
  1251. * @peer_id: peer id
  1252. * @lmac_id: lmac id to find the link peer on given lmac
  1253. *
  1254. * Return: peer_id of link peer if found
  1255. * else return HTT_INVALID_PEER
  1256. */
  1257. static inline
  1258. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1259. uint8_t lmac_id)
  1260. {
  1261. uint8_t i;
  1262. struct dp_peer *peer;
  1263. struct dp_peer *link_peer;
  1264. struct dp_soc *link_peer_soc;
  1265. struct dp_mld_link_peers link_peers_info;
  1266. uint16_t link_peer_id = HTT_INVALID_PEER;
  1267. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
  1268. if (!peer)
  1269. return HTT_INVALID_PEER;
  1270. if (IS_MLO_DP_MLD_PEER(peer)) {
  1271. /* get link peers with reference */
  1272. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1273. DP_MOD_ID_CDP);
  1274. for (i = 0; i < link_peers_info.num_links; i++) {
  1275. link_peer = link_peers_info.link_peers[i];
  1276. link_peer_soc = link_peer->vdev->pdev->soc;
  1277. if ((link_peer_soc == soc) &&
  1278. (link_peer->vdev->pdev->lmac_id == lmac_id)) {
  1279. link_peer_id = link_peer->peer_id;
  1280. break;
  1281. }
  1282. }
  1283. /* release link peers reference */
  1284. dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
  1285. } else {
  1286. link_peer_id = peer_id;
  1287. }
  1288. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  1289. return link_peer_id;
  1290. }
  1291. /**
  1292. * dp_peer_get_tgt_peer_hash_find() - get MLD dp_peer handle
  1293. for processing
  1294. * @soc: soc handle
  1295. * @peer_mac_addr: peer mac address
  1296. * @mac_addr_is_aligned: is mac addr alligned
  1297. * @vdev_id: vdev_id
  1298. * @mod_id: id of module requesting reference
  1299. *
  1300. * for MLO connection, get corresponding MLD peer,
  1301. * otherwise get link peer for non-MLO case.
  1302. *
  1303. * return: peer in success
  1304. * NULL in failure
  1305. */
  1306. static inline
  1307. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1308. uint8_t *peer_mac,
  1309. int mac_addr_is_aligned,
  1310. uint8_t vdev_id,
  1311. enum dp_mod_id mod_id)
  1312. {
  1313. struct dp_peer *ta_peer = NULL;
  1314. struct dp_peer *peer = dp_peer_find_hash_find(soc,
  1315. peer_mac, 0, vdev_id,
  1316. mod_id);
  1317. if (peer) {
  1318. /* mlo connection link peer, get mld peer with reference */
  1319. if (IS_MLO_DP_LINK_PEER(peer)) {
  1320. /* increase mld peer ref_cnt */
  1321. if (QDF_STATUS_SUCCESS ==
  1322. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1323. ta_peer = peer->mld_peer;
  1324. else
  1325. ta_peer = NULL;
  1326. /* relese peer reference that added by hash find */
  1327. dp_peer_unref_delete(peer, mod_id);
  1328. } else {
  1329. /* mlo MLD peer or non-mlo link peer */
  1330. ta_peer = peer;
  1331. }
  1332. }
  1333. return ta_peer;
  1334. }
  1335. /**
  1336. * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
  1337. * @soc : core DP soc context
  1338. * @peer_id : peer id from peer object can be retrieved
  1339. * @mod_id : ID ot module requesting reference
  1340. *
  1341. * for MLO connection, get corresponding MLD peer,
  1342. * otherwise get link peer for non-MLO case.
  1343. *
  1344. * return: peer in success
  1345. * NULL in failure
  1346. */
  1347. static inline
  1348. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1349. uint16_t peer_id,
  1350. enum dp_mod_id mod_id)
  1351. {
  1352. struct dp_peer *ta_peer = NULL;
  1353. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1354. if (peer) {
  1355. /* mlo connection link peer, get mld peer with reference */
  1356. if (IS_MLO_DP_LINK_PEER(peer)) {
  1357. /* increase mld peer ref_cnt */
  1358. if (QDF_STATUS_SUCCESS ==
  1359. dp_peer_get_ref(soc, peer->mld_peer, mod_id))
  1360. ta_peer = peer->mld_peer;
  1361. else
  1362. ta_peer = NULL;
  1363. /* relese peer reference that added by hash find */
  1364. dp_peer_unref_delete(peer, mod_id);
  1365. } else {
  1366. /* mlo MLD peer or non-mlo link peer */
  1367. ta_peer = peer;
  1368. }
  1369. }
  1370. return ta_peer;
  1371. }
  1372. /**
  1373. * dp_peer_mlo_delete() - peer MLO related delete operation
  1374. * @peer: DP peer handle
  1375. * Return: None
  1376. */
  1377. static inline
  1378. void dp_peer_mlo_delete(struct dp_peer *peer)
  1379. {
  1380. struct dp_peer *ml_peer;
  1381. struct dp_soc *soc;
  1382. /* MLO connection link peer */
  1383. if (IS_MLO_DP_LINK_PEER(peer)) {
  1384. ml_peer = peer->mld_peer;
  1385. soc = ml_peer->vdev->pdev->soc;
  1386. /* if last link peer deletion, delete MLD peer */
  1387. if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
  1388. dp_peer_delete(soc, peer->mld_peer, NULL);
  1389. }
  1390. }
  1391. /**
  1392. * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
  1393. * @soc: Soc handle
  1394. * @vdev_id: Vdev ID
  1395. * @peer_setup_info: peer setup information for MLO
  1396. */
  1397. QDF_STATUS dp_peer_mlo_setup(
  1398. struct dp_soc *soc,
  1399. struct dp_peer *peer,
  1400. uint8_t vdev_id,
  1401. struct cdp_peer_setup_info *setup_info);
  1402. /**
  1403. * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
  1404. * @peer: datapath peer
  1405. *
  1406. * Return: MLD peer in case of MLO Link peer
  1407. * Peer itself in other cases
  1408. */
  1409. static inline
  1410. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1411. {
  1412. return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
  1413. }
  1414. /**
  1415. * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
  1416. * peer id
  1417. * @soc: core DP soc context
  1418. * @peer_id: peer id
  1419. * @mod_id: ID of module requesting reference
  1420. *
  1421. * Return: primary link peer for the MLO peer
  1422. * legacy peer itself in case of legacy peer
  1423. */
  1424. static inline
  1425. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1426. uint16_t peer_id,
  1427. enum dp_mod_id mod_id)
  1428. {
  1429. uint8_t i;
  1430. struct dp_mld_link_peers link_peers_info;
  1431. struct dp_peer *peer;
  1432. struct dp_peer *link_peer;
  1433. struct dp_peer *primary_peer = NULL;
  1434. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1435. if (!peer)
  1436. return NULL;
  1437. if (IS_MLO_DP_MLD_PEER(peer)) {
  1438. /* get link peers with reference */
  1439. dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
  1440. mod_id);
  1441. for (i = 0; i < link_peers_info.num_links; i++) {
  1442. link_peer = link_peers_info.link_peers[i];
  1443. if (link_peer->primary_link) {
  1444. primary_peer = link_peer;
  1445. /*
  1446. * Take additional reference over
  1447. * primary link peer.
  1448. */
  1449. dp_peer_get_ref(NULL, primary_peer, mod_id);
  1450. break;
  1451. }
  1452. }
  1453. /* release link peers reference */
  1454. dp_release_link_peers_ref(&link_peers_info, mod_id);
  1455. dp_peer_unref_delete(peer, mod_id);
  1456. } else {
  1457. primary_peer = peer;
  1458. }
  1459. return primary_peer;
  1460. }
  1461. /**
  1462. * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
  1463. * @peer: Datapath peer
  1464. *
  1465. * Return: dp_txrx_peer from MLD peer if peer type is link peer
  1466. * dp_txrx_peer from peer itself for other cases
  1467. */
  1468. static inline
  1469. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1470. {
  1471. return IS_MLO_DP_LINK_PEER(peer) ?
  1472. peer->mld_peer->txrx_peer : peer->txrx_peer;
  1473. }
  1474. /**
  1475. * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
  1476. * @peer: Datapath peer
  1477. *
  1478. * Return: true if peer is primary link peer or legacy peer
  1479. * false otherwise
  1480. */
  1481. static inline
  1482. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1483. {
  1484. if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
  1485. return true;
  1486. else if (IS_DP_LEGACY_PEER(peer))
  1487. return true;
  1488. else
  1489. return false;
  1490. }
  1491. /**
  1492. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1493. *
  1494. * @soc : core DP soc context
  1495. * @peer_id : peer id from peer object can be retrieved
  1496. * @handle : reference handle
  1497. * @mod_id : ID ot module requesting reference
  1498. *
  1499. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1500. */
  1501. static inline struct dp_txrx_peer *
  1502. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1503. uint16_t peer_id,
  1504. dp_txrx_ref_handle *handle,
  1505. enum dp_mod_id mod_id)
  1506. {
  1507. struct dp_peer *peer;
  1508. struct dp_txrx_peer *txrx_peer;
  1509. peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1510. if (!peer)
  1511. return NULL;
  1512. txrx_peer = dp_get_txrx_peer(peer);
  1513. if (txrx_peer) {
  1514. *handle = (dp_txrx_ref_handle)peer;
  1515. return txrx_peer;
  1516. }
  1517. dp_peer_unref_delete(peer, mod_id);
  1518. return NULL;
  1519. }
  1520. #else
  1521. #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
  1522. #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
  1523. /* is legacy peer */
  1524. #define IS_DP_LEGACY_PEER(_peer) true
  1525. #define IS_MLO_DP_LINK_PEER(_peer) false
  1526. #define IS_MLO_DP_MLD_PEER(_peer) false
  1527. #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
  1528. static inline
  1529. struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
  1530. uint8_t *peer_mac,
  1531. int mac_addr_is_aligned,
  1532. uint8_t vdev_id,
  1533. enum dp_mod_id mod_id)
  1534. {
  1535. return dp_peer_find_hash_find(soc, peer_mac,
  1536. mac_addr_is_aligned, vdev_id,
  1537. mod_id);
  1538. }
  1539. static inline
  1540. struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
  1541. uint16_t peer_id,
  1542. enum dp_mod_id mod_id)
  1543. {
  1544. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1545. }
  1546. static inline
  1547. QDF_STATUS dp_peer_mlo_setup(
  1548. struct dp_soc *soc,
  1549. struct dp_peer *peer,
  1550. uint8_t vdev_id,
  1551. struct cdp_peer_setup_info *setup_info)
  1552. {
  1553. return QDF_STATUS_SUCCESS;
  1554. }
  1555. static inline
  1556. void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
  1557. {
  1558. }
  1559. static inline
  1560. void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
  1561. {
  1562. }
  1563. static inline
  1564. void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
  1565. {
  1566. }
  1567. static inline
  1568. void dp_peer_mlo_delete(struct dp_peer *peer)
  1569. {
  1570. }
  1571. static inline
  1572. void dp_mlo_peer_authorize(struct dp_soc *soc,
  1573. struct dp_peer *link_peer)
  1574. {
  1575. }
  1576. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  1577. {
  1578. return 0;
  1579. }
  1580. static inline struct dp_peer *
  1581. dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
  1582. uint8_t *peer_mac_addr,
  1583. int mac_addr_is_aligned,
  1584. uint8_t vdev_id,
  1585. uint8_t chip_id,
  1586. enum dp_mod_id mod_id)
  1587. {
  1588. return dp_peer_find_hash_find(soc, peer_mac_addr,
  1589. mac_addr_is_aligned,
  1590. vdev_id, mod_id);
  1591. }
  1592. static inline
  1593. struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
  1594. {
  1595. return peer;
  1596. }
  1597. static inline
  1598. struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
  1599. uint16_t peer_id,
  1600. enum dp_mod_id mod_id)
  1601. {
  1602. return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
  1603. }
  1604. static inline
  1605. struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
  1606. {
  1607. return peer->txrx_peer;
  1608. }
  1609. static inline
  1610. bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
  1611. {
  1612. return true;
  1613. }
  1614. /**
  1615. * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
  1616. *
  1617. * @soc : core DP soc context
  1618. * @peer_id : peer id from peer object can be retrieved
  1619. * @handle : reference handle
  1620. * @mod_id : ID ot module requesting reference
  1621. *
  1622. * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
  1623. */
  1624. static inline struct dp_txrx_peer *
  1625. dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
  1626. uint16_t peer_id,
  1627. dp_txrx_ref_handle *handle,
  1628. enum dp_mod_id mod_id)
  1629. {
  1630. return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
  1631. }
  1632. static inline
  1633. uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
  1634. uint8_t lmac_id)
  1635. {
  1636. return peer_id;
  1637. }
  1638. #endif /* WLAN_FEATURE_11BE_MLO */
  1639. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  1640. /**
  1641. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  1642. * @soc: Soc handle
  1643. * @peer: DP peer handle for ML peer
  1644. * @peer_id: peer_id
  1645. * Return: None
  1646. */
  1647. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1648. struct dp_peer *peer,
  1649. uint16_t peer_id);
  1650. /**
  1651. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  1652. * @soc: Soc handle
  1653. * @peer_id: peer_id
  1654. * Return: None
  1655. */
  1656. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1657. uint16_t peer_id);
  1658. #else
  1659. static inline void dp_mlo_partner_chips_map(struct dp_soc *soc,
  1660. struct dp_peer *peer,
  1661. uint16_t peer_id)
  1662. {
  1663. }
  1664. static inline void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  1665. uint16_t peer_id)
  1666. {
  1667. }
  1668. #endif
  1669. static inline
  1670. QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
  1671. {
  1672. uint8_t i;
  1673. if (IS_MLO_DP_MLD_PEER(peer)) {
  1674. dp_peer_info("skip for mld peer");
  1675. return QDF_STATUS_SUCCESS;
  1676. }
  1677. if (peer->rx_tid) {
  1678. QDF_BUG(0);
  1679. dp_peer_err("peer rx_tid mem already exist");
  1680. return QDF_STATUS_E_FAILURE;
  1681. }
  1682. peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
  1683. sizeof(struct dp_rx_tid));
  1684. if (!peer->rx_tid) {
  1685. dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
  1686. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1687. return QDF_STATUS_E_NOMEM;
  1688. }
  1689. qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
  1690. for (i = 0; i < DP_MAX_TIDS; i++)
  1691. qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
  1692. return QDF_STATUS_SUCCESS;
  1693. }
  1694. static inline
  1695. void dp_peer_rx_tids_destroy(struct dp_peer *peer)
  1696. {
  1697. uint8_t i;
  1698. if (!IS_MLO_DP_LINK_PEER(peer)) {
  1699. for (i = 0; i < DP_MAX_TIDS; i++)
  1700. qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
  1701. qdf_mem_free(peer->rx_tid);
  1702. }
  1703. peer->rx_tid = NULL;
  1704. }
  1705. static inline
  1706. void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
  1707. {
  1708. uint8_t i;
  1709. qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
  1710. sizeof(struct dp_rx_tid_defrag));
  1711. for (i = 0; i < DP_MAX_TIDS; i++)
  1712. qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1713. }
  1714. static inline
  1715. void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
  1716. {
  1717. uint8_t i;
  1718. for (i = 0; i < DP_MAX_TIDS; i++)
  1719. qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
  1720. }
  1721. #ifdef PEER_CACHE_RX_PKTS
  1722. static inline
  1723. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1724. {
  1725. qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
  1726. txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
  1727. qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
  1728. DP_RX_CACHED_BUFQ_THRESH);
  1729. }
  1730. static inline
  1731. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1732. {
  1733. qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
  1734. qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
  1735. }
  1736. #else
  1737. static inline
  1738. void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
  1739. {
  1740. }
  1741. static inline
  1742. void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
  1743. {
  1744. }
  1745. #endif
  1746. #ifdef REO_SHARED_QREF_TABLE_EN
  1747. void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1748. struct dp_peer *peer);
  1749. #else
  1750. static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
  1751. struct dp_peer *peer) {}
  1752. #endif
  1753. #endif /* _DP_PEER_H_ */