dp_be.h 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_BE_H
  20. #define __DP_BE_H
  21. #include <dp_types.h>
  22. #include <hal_be_tx.h>
  23. #ifdef WLAN_MLO_MULTI_CHIP
  24. #include "mlo/dp_mlo.h"
  25. #else
  26. #include <dp_peer.h>
  27. #endif
  28. #ifdef WIFI_MONITOR_SUPPORT
  29. #include <dp_mon.h>
  30. #endif
  31. enum CMEM_MEM_CLIENTS {
  32. COOKIE_CONVERSION,
  33. FISA_FST,
  34. };
  35. /* maximum number of entries in one page of secondary page table */
  36. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  37. /* maximum number of entries in one page of secondary page table */
  38. #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
  39. /* maximum number of entries in primary page table */
  40. #define DP_CC_PPT_MAX_ENTRIES \
  41. DP_CC_PPT_MEM_SIZE / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED
  42. /* cookie conversion required CMEM offset from CMEM pool */
  43. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  44. /* cookie conversion primary page table size 4K */
  45. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  46. #define DP_CC_PPT_MEM_SIZE 4096
  47. #else
  48. #define DP_CC_PPT_MEM_SIZE 8192
  49. #endif
  50. /* FST required CMEM offset M pool */
  51. #define DP_FST_MEM_OFFSET_IN_CMEM \
  52. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  53. /* lower 9 bits in Desc ID for offset in page of SPT */
  54. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  55. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  56. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  57. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  58. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  59. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  60. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  61. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  62. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  63. /*
  64. * page 4K unaligned case, single SPT page physical address
  65. * need 8 bytes in PPT
  66. */
  67. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  68. /*
  69. * page 4K aligned case, single SPT page physical address
  70. * need 4 bytes in PPT
  71. */
  72. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  73. /* 4K aligned case, number of bits HW append for one PPT entry value */
  74. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  75. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  76. /* WBM2SW ring id for rx release */
  77. #define WBM2SW_REL_ERR_RING_NUM 3
  78. #else
  79. /* WBM2SW ring id for rx release */
  80. #define WBM2SW_REL_ERR_RING_NUM 5
  81. #endif
  82. #ifdef WLAN_SUPPORT_PPEDS
  83. #define DP_PPEDS_STAMODE_ASTIDX_MAP_REG_IDX 1
  84. /* The MAX PPE PRI2TID */
  85. #define DP_TX_INT_PRI2TID_MAX 15
  86. /* size of CMEM needed for a ppeds tx desc pool */
  87. #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \
  88. ((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  89. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  90. /* Offset of ppeds tx descripotor pool */
  91. #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
  92. #define PEER_ROUTING_USE_PPE 1
  93. #define PEER_ROUTING_ENABLED 1
  94. #define DP_PPE_INTR_STRNG_LEN 32
  95. #define DP_PPE_INTR_MAX 3
  96. #else
  97. #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
  98. #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0
  99. #define DP_PPE_INTR_STRNG_LEN 0
  100. #define DP_PPE_INTR_MAX 0
  101. #endif
  102. /* tx descriptor are programmed at start of CMEM region*/
  103. #define DP_TX_DESC_CMEM_OFFSET \
  104. (DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE)
  105. /* size of CMEM needed for a tx desc pool*/
  106. #define DP_TX_DESC_POOL_CMEM_SIZE \
  107. ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  108. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  109. #ifndef QCA_SUPPORT_DP_GLOBAL_CTX
  110. /* Offset of rx descripotor pool */
  111. #define DP_RX_DESC_CMEM_OFFSET \
  112. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  113. #else
  114. /* tx special descriptor are programmed after tx desc CMEM region*/
  115. #define DP_TX_SPCL_DESC_CMEM_OFFSET \
  116. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  117. /* size of CMEM needed for a tx special desc pool*/
  118. #define DP_TX_SPCL_DESC_POOL_CMEM_SIZE \
  119. ((WLAN_CFG_NUM_TX_SPL_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  120. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  121. /* Offset of rx descripotor pool */
  122. #define DP_RX_DESC_CMEM_OFFSET \
  123. DP_TX_SPCL_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * \
  124. DP_TX_SPCL_DESC_POOL_CMEM_SIZE)
  125. #endif
  126. /* size of CMEM needed for a rx desc pool */
  127. #define DP_RX_DESC_POOL_CMEM_SIZE \
  128. ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  129. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  130. /* get ppt_id from CMEM_OFFSET */
  131. #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
  132. ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  133. /**
  134. * struct dp_spt_page_desc - secondary page table page descriptors
  135. * @page_v_addr: page virtual address
  136. * @page_p_addr: page physical address
  137. * @ppt_index: entry index in primary page table where this page physical
  138. * address stored
  139. */
  140. struct dp_spt_page_desc {
  141. uint8_t *page_v_addr;
  142. qdf_dma_addr_t page_p_addr;
  143. uint32_t ppt_index;
  144. };
  145. /**
  146. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  147. * @cmem_offset: CMEM offset from base address for primary page table setup
  148. * @total_page_num: total DDR page allocated
  149. * @page_desc_freelist: available page Desc list
  150. * @page_desc_base: page Desc buffer base address.
  151. * @page_pool: DDR pages pool
  152. * @cc_lock: locks for page acquiring/free
  153. */
  154. struct dp_hw_cookie_conversion_t {
  155. uint32_t cmem_offset;
  156. uint32_t total_page_num;
  157. struct dp_spt_page_desc *page_desc_base;
  158. struct qdf_mem_multi_page_t page_pool;
  159. qdf_spinlock_t cc_lock;
  160. };
  161. /**
  162. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  163. * @spt_page_list_head: head of SPT page descriptor list
  164. * @spt_page_list_tail: tail of SPT page descriptor list
  165. * @num_spt_pages: number of SPT page descriptor allocated
  166. */
  167. struct dp_spt_page_desc_list {
  168. struct dp_spt_page_desc *spt_page_list_head;
  169. struct dp_spt_page_desc *spt_page_list_tail;
  170. uint16_t num_spt_pages;
  171. };
  172. /* HW reading 8 bytes for VA */
  173. #define DP_CC_HW_READ_BYTES 8
  174. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  175. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  176. = (uintptr_t)(_desc_va); }
  177. /**
  178. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  179. * @is_configured: flag indicating if this bank is configured
  180. * @ref_count: ref count indicating number of users of the bank
  181. * @bank_config: HAL TX bank configuration
  182. */
  183. struct dp_tx_bank_profile {
  184. uint8_t is_configured;
  185. qdf_atomic_t ref_count;
  186. union hal_tx_bank_config bank_config;
  187. };
  188. #ifdef WLAN_SUPPORT_PPEDS
  189. /**
  190. * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
  191. * @is_configured: Boolean that the entry is configured.
  192. */
  193. struct dp_ppe_vp_tbl_entry {
  194. bool is_configured;
  195. };
  196. /**
  197. * struct dp_ppe_vp_search_idx_tbl_entry - PPE Virtual search table entry
  198. * @is_configured: Boolean that the entry is configured.
  199. */
  200. struct dp_ppe_vp_search_idx_tbl_entry {
  201. bool is_configured;
  202. };
  203. /**
  204. * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
  205. * @is_configured: Boolean that the entry is configured.
  206. * @vp_num: Virtual port number
  207. * @ppe_vp_num_idx: Index to the PPE VP table entry
  208. * @search_idx_reg_num: Address search Index register number
  209. * @drop_prec_enable: Drop precedance enable
  210. * @to_fw: To FW exception enable/disable.
  211. * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
  212. */
  213. struct dp_ppe_vp_profile {
  214. bool is_configured;
  215. uint8_t vp_num;
  216. uint8_t ppe_vp_num_idx;
  217. uint8_t search_idx_reg_num;
  218. uint8_t drop_prec_enable;
  219. uint8_t to_fw;
  220. uint8_t use_ppe_int_pri;
  221. };
  222. /**
  223. * struct dp_ppeds_tx_desc_pool_s - PPEDS Tx Descriptor Pool
  224. * @elem_size: Size of each descriptor
  225. * @hot_list_len: Length of hotlist chain
  226. * @num_allocated: Number of used descriptors
  227. * @freelist: Chain of free descriptors
  228. * @hotlist: Chain of descriptors with attached nbufs
  229. * @desc_pages: multiple page allocation information for actual descriptors
  230. * @elem_count: Number of descriptors in the pool
  231. * @num_free: Number of free descriptors
  232. * @lock: Lock for descriptor allocation/free from/to the pool
  233. */
  234. struct dp_ppeds_tx_desc_pool_s {
  235. uint16_t elem_size;
  236. uint32_t num_allocated;
  237. uint32_t hot_list_len;
  238. struct dp_tx_desc_s *freelist;
  239. struct dp_tx_desc_s *hotlist;
  240. struct qdf_mem_multi_page_t desc_pages;
  241. uint16_t elem_count;
  242. uint32_t num_free;
  243. qdf_spinlock_t lock;
  244. };
  245. #endif
  246. /**
  247. * struct dp_ppeds_napi - napi parameters for ppe ds
  248. * @napi: napi structure to register with napi infra
  249. * @ndev: net_dev structure
  250. */
  251. struct dp_ppeds_napi {
  252. struct napi_struct napi;
  253. struct net_device ndev;
  254. };
  255. /*
  256. * NB: intentionally not using kernel-doc comment because the kernel-doc
  257. * script does not handle the TAILQ_HEAD macro
  258. * struct dp_soc_be - Extended DP soc for BE targets
  259. * @soc: dp soc structure
  260. * @num_bank_profiles: num TX bank profiles
  261. * @tx_bank_lock: lock for @bank_profiles
  262. * @bank_profiles: bank profiles for various TX banks
  263. * @page_desc_base:
  264. * @cc_cmem_base: cmem offset reserved for CC
  265. * @tx_cc_ctx: Cookie conversion context for tx desc pools
  266. * @rx_cc_ctx: Cookie conversion context for rx desc pools
  267. * @ppeds_int_mode_enabled: PPE DS interrupt mode enabled
  268. * @ppeds_stopped:
  269. * @reo2ppe_ring: REO2PPE ring
  270. * @ppe2tcl_ring: PPE2TCL ring
  271. * @ppeds_wbm_release_ring:
  272. * @ppe_vp_tbl: PPE VP table
  273. * @ppe_vp_search_idx_tbl: PPE VP search idx table
  274. * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool
  275. * @ppeds_tx_desc: PPEDS tx desc pool
  276. * @ppeds_napi_ctxt:
  277. * @ppeds_handle: PPEDS soc instance handle
  278. * @dp_ppeds_txdesc_hotlist_len: PPEDS tx desc hotlist length
  279. * @ppe_vp_tbl_lock: PPE VP table lock
  280. * @num_ppe_vp_entries: Number of PPE VP entries
  281. * @num_ppe_vp_search_idx_entries: PPEDS VP search idx entries
  282. * @irq_name: PPEDS VP irq names
  283. * @ppeds_stats: PPEDS stats
  284. * @mlo_enabled: Flag to indicate MLO is enabled or not
  285. * @mlo_chip_id: MLO chip_id
  286. * @ml_ctxt: pointer to global ml_context
  287. * @delta_tqm: delta_tqm
  288. * @mlo_tstamp_offset: mlo timestamp offset
  289. * @mld_peer_hash_lock: lock to protect mld_peer_hash
  290. * @mld_peer_hash: peer hash table for ML peers
  291. * @mlo_dev_list: list of MLO device context
  292. * @mlo_dev_list_lock: lock to protect MLO device ctxt
  293. * @ipa_bank_id: TCL bank id used by IPA
  294. */
  295. struct dp_soc_be {
  296. struct dp_soc soc;
  297. uint8_t num_bank_profiles;
  298. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  299. qdf_mutex_t tx_bank_lock;
  300. #else
  301. qdf_spinlock_t tx_bank_lock;
  302. #endif
  303. struct dp_tx_bank_profile *bank_profiles;
  304. struct dp_spt_page_desc *page_desc_base;
  305. uint32_t cc_cmem_base;
  306. struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
  307. struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
  308. #ifdef WLAN_SUPPORT_PPEDS
  309. uint8_t ppeds_int_mode_enabled:1,
  310. ppeds_stopped:1;
  311. struct dp_srng reo2ppe_ring;
  312. struct dp_srng ppe2tcl_ring;
  313. struct dp_srng ppeds_wbm_release_ring;
  314. struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
  315. struct dp_ppe_vp_search_idx_tbl_entry *ppe_vp_search_idx_tbl;
  316. struct dp_ppe_vp_profile *ppe_vp_profile;
  317. struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx;
  318. struct dp_ppeds_tx_desc_pool_s ppeds_tx_desc;
  319. struct dp_ppeds_napi ppeds_napi_ctxt;
  320. void *ppeds_handle;
  321. int dp_ppeds_txdesc_hotlist_len;
  322. qdf_mutex_t ppe_vp_tbl_lock;
  323. uint8_t num_ppe_vp_entries;
  324. uint8_t num_ppe_vp_search_idx_entries;
  325. uint8_t num_ppe_vp_profiles;
  326. char irq_name[DP_PPE_INTR_MAX][DP_PPE_INTR_STRNG_LEN];
  327. struct {
  328. struct {
  329. uint64_t desc_alloc_failed;
  330. #ifdef GLOBAL_ASSERT_AVOIDANCE
  331. uint32_t tx_comp_buf_src;
  332. uint32_t tx_comp_desc_null;
  333. uint32_t tx_comp_invalid_flag;
  334. #endif
  335. } tx;
  336. } ppeds_stats;
  337. #endif
  338. #ifdef WLAN_FEATURE_11BE_MLO
  339. #ifdef WLAN_MLO_MULTI_CHIP
  340. uint8_t mlo_enabled;
  341. uint8_t mlo_chip_id;
  342. struct dp_mlo_ctxt *ml_ctxt;
  343. uint64_t delta_tqm;
  344. uint64_t mlo_tstamp_offset;
  345. #else
  346. /* Protect mld peer hash table */
  347. DP_MUTEX_TYPE mld_peer_hash_lock;
  348. struct {
  349. uint32_t mask;
  350. uint32_t idx_bits;
  351. TAILQ_HEAD(, dp_peer) * bins;
  352. } mld_peer_hash;
  353. /* MLO device ctxt list */
  354. TAILQ_HEAD(, dp_mlo_dev_ctxt) mlo_dev_list;
  355. qdf_spinlock_t mlo_dev_list_lock;
  356. #endif
  357. #endif
  358. #ifdef IPA_OFFLOAD
  359. int8_t ipa_bank_id;
  360. #endif
  361. };
  362. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  363. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  364. /**
  365. * struct dp_pdev_be - Extended DP pdev for BE targets
  366. * @pdev: dp pdev structure
  367. * @monitor_pdev_be: BE specific monitor object
  368. * @mlo_link_id: MLO link id for PDEV
  369. * @delta_tsf2: delta_tsf2
  370. */
  371. struct dp_pdev_be {
  372. struct dp_pdev pdev;
  373. #ifdef WLAN_MLO_MULTI_CHIP
  374. uint8_t mlo_link_id;
  375. uint64_t delta_tsf2;
  376. #endif
  377. };
  378. /**
  379. * struct dp_vdev_be - Extended DP vdev for BE targets
  380. * @vdev: dp vdev structure
  381. * @bank_id: bank_id to be used for TX
  382. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  383. * @partner_vdev_list: partner list used for Intra-BSS
  384. * @bridge_vdev_list: partner bridge vdev list
  385. * @mlo_stats: structure to hold stats for mlo unmapped peers
  386. * @mcast_primary: MLO Mcast primary vdev
  387. * @mlo_dev_ctxt: MLO device context pointer
  388. */
  389. struct dp_vdev_be {
  390. struct dp_vdev vdev;
  391. int8_t bank_id;
  392. uint8_t vdev_id_check_en;
  393. #ifdef WLAN_MLO_MULTI_CHIP
  394. struct cdp_vdev_stats mlo_stats;
  395. #ifdef WLAN_FEATURE_11BE_MLO
  396. #ifdef WLAN_MCAST_MLO
  397. bool mcast_primary;
  398. #endif
  399. #endif
  400. #endif
  401. #ifdef WLAN_FEATURE_11BE_MLO
  402. struct dp_mlo_dev_ctxt *mlo_dev_ctxt;
  403. #endif /* WLAN_FEATURE_11BE_MLO */
  404. };
  405. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX)
  406. /**
  407. * struct dp_mlo_dev_ctxt - Datapath MLO device context
  408. *
  409. * @ml_dev_list_elem: node in the ML dev list of Global MLO context
  410. * @mld_mac_addr: MLO device MAC address
  411. * @vdev_list: list of vdevs associated with this MLO connection
  412. * @vdev_list_lock: lock to protect vdev list
  413. * @bridge_vdev: list of bridge vdevs associated with this MLO connection
  414. * @is_bridge_vdev_present: flag to check if bridge vdev is present
  415. * @vdev_list_lock: lock to protect vdev list
  416. * @vdev_count: number of elements in the vdev list
  417. * @seq_num: DP MLO multicast sequence number
  418. * @ref_cnt: reference count
  419. * @mod_refs: module reference count
  420. * @ref_delete_pending: flag to monitor last ref delete
  421. * @stats: structure to store vdev stats of removed MLO Link
  422. */
  423. struct dp_mlo_dev_ctxt {
  424. TAILQ_ENTRY(dp_mlo_dev_ctxt) ml_dev_list_elem;
  425. union dp_align_mac_addr mld_mac_addr;
  426. #ifdef WLAN_MLO_MULTI_CHIP
  427. uint8_t vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  428. uint8_t bridge_vdev[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  429. bool is_bridge_vdev_present;
  430. qdf_spinlock_t vdev_list_lock;
  431. uint16_t vdev_count;
  432. uint16_t seq_num;
  433. #endif
  434. qdf_atomic_t ref_cnt;
  435. qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
  436. uint8_t ref_delete_pending;
  437. struct cdp_vdev_stats stats;
  438. };
  439. #endif /* WLAN_FEATURE_11BE_MLO */
  440. /**
  441. * struct dp_peer_be - Extended DP peer for BE targets
  442. * @peer: dp peer structure
  443. * @priority_valid:
  444. */
  445. struct dp_peer_be {
  446. struct dp_peer peer;
  447. #ifdef WLAN_SUPPORT_PPEDS
  448. uint8_t priority_valid;
  449. #endif
  450. };
  451. /**
  452. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  453. *
  454. * Return: value in bytes for BE specific soc structure
  455. */
  456. qdf_size_t dp_get_soc_context_size_be(void);
  457. /**
  458. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  459. * @arch_ops: arch ops pointer
  460. *
  461. * Return: none
  462. */
  463. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  464. /**
  465. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  466. * @context_type: context type for which the size is needed
  467. *
  468. * Return: size in bytes for the context_type
  469. */
  470. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  471. /**
  472. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  473. * @soc: dp_soc pointer
  474. *
  475. * Return: dp_soc_be pointer
  476. */
  477. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  478. {
  479. return (struct dp_soc_be *)soc;
  480. }
  481. /**
  482. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  483. * @be_soc: dp_soc_be pointer
  484. * @func: Function to be called for each soc
  485. * @arg: context to be passed to the callback
  486. *
  487. * Return: true if mlo is enabled, false if mlo is disabled
  488. */
  489. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  490. void *arg);
  491. #ifdef WLAN_MLO_MULTI_CHIP
  492. typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
  493. typedef struct dp_mlo_ctxt *dp_mlo_dev_obj_t;
  494. /**
  495. * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
  496. * @soc: soc handle
  497. *
  498. * return: MLD peer hash object
  499. */
  500. static inline dp_mld_peer_hash_obj_t
  501. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  502. {
  503. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  504. return be_soc->ml_ctxt;
  505. }
  506. /**
  507. * dp_get_mlo_dev_list_obj() - return the container struct of MLO Dev list
  508. * @be_soc: be soc handle
  509. *
  510. * return: MLO dev list object
  511. */
  512. static inline dp_mlo_dev_obj_t
  513. dp_get_mlo_dev_list_obj(struct dp_soc_be *be_soc)
  514. {
  515. return be_soc->ml_ctxt;
  516. }
  517. #if defined(WLAN_FEATURE_11BE_MLO)
  518. /**
  519. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  520. * @soc: Soc handle
  521. * @peer: DP peer handle for ML peer
  522. * @peer_id: peer_id
  523. * Return: None
  524. */
  525. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  526. struct dp_peer *peer,
  527. uint16_t peer_id);
  528. /**
  529. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  530. * @soc: Soc handle
  531. * @peer_id: peer_id
  532. * Return: None
  533. */
  534. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  535. uint16_t peer_id);
  536. /**
  537. * dp_soc_initialize_cdp_cmn_mlo_ops() - Initialize common CDP API's
  538. * @soc: Soc handle
  539. *
  540. * Return: None
  541. */
  542. void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc);
  543. #ifdef WLAN_MLO_MULTI_CHIP
  544. typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
  545. struct dp_vdev *ptnr_vdev,
  546. void *arg);
  547. /**
  548. * dp_mlo_iter_ptnr_vdev() - API to iterate through ptnr vdev list
  549. * @be_soc: dp_soc_be pointer
  550. * @be_vdev: dp_vdev_be pointer
  551. * @func: function to be called for each peer
  552. * @arg: argument need to be passed to func
  553. * @mod_id: module id
  554. * @type: iterate type
  555. * @include_self_vdev: flag to include/exclude self vdev in iteration
  556. *
  557. * Return: None
  558. */
  559. void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  560. struct dp_vdev_be *be_vdev,
  561. dp_ptnr_vdev_iter_func func, void *arg,
  562. enum dp_mod_id mod_id,
  563. uint8_t type,
  564. bool include_self_vdev);
  565. #endif
  566. #ifdef WLAN_MCAST_MLO
  567. /**
  568. * dp_mlo_get_mcast_primary_vdev() - get ref to mcast primary vdev
  569. * @be_soc: dp_soc_be pointer
  570. * @be_vdev: dp_vdev_be pointer
  571. * @mod_id: module id
  572. *
  573. * Return: mcast primary DP VDEV handle on success, NULL on failure
  574. */
  575. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  576. struct dp_vdev_be *be_vdev,
  577. enum dp_mod_id mod_id);
  578. #endif
  579. #endif
  580. #else
  581. typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
  582. typedef struct dp_soc_be *dp_mlo_dev_obj_t;
  583. static inline dp_mld_peer_hash_obj_t
  584. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  585. {
  586. return dp_get_be_soc_from_dp_soc(soc);
  587. }
  588. static inline dp_mlo_dev_obj_t
  589. dp_get_mlo_dev_list_obj(struct dp_soc_be *be_soc)
  590. {
  591. return be_soc;
  592. }
  593. #endif
  594. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  595. static inline
  596. struct dp_hw_cookie_conversion_t *dp_get_tx_cookie_t(struct dp_soc *soc,
  597. uint8_t pool_id)
  598. {
  599. struct dp_global_context *dp_global = NULL;
  600. dp_global = wlan_objmgr_get_global_ctx();
  601. return dp_global->tx_cc_ctx[pool_id];
  602. }
  603. static inline
  604. struct dp_hw_cookie_conversion_t *dp_get_spcl_tx_cookie_t(struct dp_soc *soc,
  605. uint8_t pool_id)
  606. {
  607. struct dp_global_context *dp_global = NULL;
  608. dp_global = wlan_objmgr_get_global_ctx();
  609. return dp_global->spcl_tx_cc_ctx[pool_id];
  610. }
  611. #else
  612. static inline
  613. struct dp_hw_cookie_conversion_t *dp_get_tx_cookie_t(struct dp_soc *soc,
  614. uint8_t pool_id)
  615. {
  616. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  617. return &be_soc->tx_cc_ctx[pool_id];
  618. }
  619. static inline
  620. struct dp_hw_cookie_conversion_t *dp_get_spcl_tx_cookie_t(struct dp_soc *soc,
  621. uint8_t pool_id)
  622. {
  623. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  624. return &be_soc->tx_cc_ctx[pool_id];
  625. }
  626. #endif
  627. /**
  628. * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
  629. * @mld_hash_obj: Peer has object
  630. * @hash_elems: number of entries in hash table
  631. *
  632. * Return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
  633. */
  634. QDF_STATUS
  635. dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
  636. int hash_elems);
  637. /**
  638. * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
  639. *
  640. * @mld_hash_obj: Peer has object
  641. *
  642. * Return: void
  643. */
  644. void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
  645. /**
  646. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  647. * @pdev: dp_pdev pointer
  648. *
  649. * Return: dp_pdev_be pointer
  650. */
  651. static inline
  652. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  653. {
  654. return (struct dp_pdev_be *)pdev;
  655. }
  656. /**
  657. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  658. * @vdev: dp_vdev pointer
  659. *
  660. * Return: dp_vdev_be pointer
  661. */
  662. static inline
  663. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  664. {
  665. return (struct dp_vdev_be *)vdev;
  666. }
  667. /**
  668. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  669. * @peer: dp_peer pointer
  670. *
  671. * Return: dp_peer_be pointer
  672. */
  673. static inline
  674. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  675. {
  676. return (struct dp_peer_be *)peer;
  677. }
  678. void dp_ppeds_disable_irq(struct dp_soc *soc, struct dp_srng *srng);
  679. void dp_ppeds_enable_irq(struct dp_soc *soc, struct dp_srng *srng);
  680. QDF_STATUS dp_peer_setup_ppeds_be(struct dp_soc *soc, struct dp_peer *peer,
  681. struct dp_vdev_be *be_vdev,
  682. void *args);
  683. QDF_STATUS
  684. dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
  685. struct dp_hw_cookie_conversion_t *cc_ctx,
  686. uint32_t num_descs,
  687. enum qdf_dp_desc_type desc_type,
  688. uint8_t desc_pool_id);
  689. void dp_reo_shared_qaddr_detach(struct dp_soc *soc);
  690. QDF_STATUS
  691. dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
  692. struct dp_hw_cookie_conversion_t *cc_ctx);
  693. QDF_STATUS
  694. dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
  695. struct dp_hw_cookie_conversion_t *cc_ctx);
  696. QDF_STATUS
  697. dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
  698. struct dp_hw_cookie_conversion_t *cc_ctx);
  699. /**
  700. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  701. * @be_soc: beryllium soc handler
  702. * @list_head: pointer to page desc head
  703. * @list_tail: pointer to page desc tail
  704. * @num_desc: number of TX/RX Descs required for SPT pages
  705. *
  706. * Return: number of SPT page Desc allocated
  707. */
  708. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  709. struct dp_spt_page_desc **list_head,
  710. struct dp_spt_page_desc **list_tail,
  711. uint16_t num_desc);
  712. /**
  713. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  714. * @be_soc: beryllium soc handler
  715. * @list_head: pointer to page desc head
  716. * @list_tail: pointer to page desc tail
  717. * @page_nums: number of page desc freed back to pool
  718. */
  719. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  720. struct dp_spt_page_desc **list_head,
  721. struct dp_spt_page_desc **list_tail,
  722. uint16_t page_nums);
  723. /**
  724. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  725. * DDR page 4K aligned or not
  726. * @ppt_index: offset index in primary page table
  727. * @spt_index: offset index in sceondary DDR page
  728. *
  729. * Generate SW cookie ID to match as HW expected
  730. *
  731. * Return: cookie ID
  732. */
  733. static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
  734. uint16_t spt_index)
  735. {
  736. /*
  737. * for 4k aligned case, cmem entry size is 4 bytes,
  738. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  739. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  740. * exactly same with original ppt_index value.
  741. * for 4k un-aligned case, cmem entry size is 8 bytes.
  742. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  743. */
  744. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  745. spt_index);
  746. }
  747. /**
  748. * dp_cc_desc_find() - find TX/RX Descs virtual address by ID
  749. * @soc: be soc handle
  750. * @desc_id: TX/RX Dess ID
  751. *
  752. * Return: TX/RX Desc virtual address
  753. */
  754. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  755. uint32_t desc_id)
  756. {
  757. struct dp_soc_be *be_soc;
  758. uint16_t ppt_page_id, spt_va_id;
  759. uint8_t *spt_page_va;
  760. be_soc = dp_get_be_soc_from_dp_soc(soc);
  761. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  762. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  763. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  764. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  765. /*
  766. * ppt index in cmem is same order where the page in the
  767. * page desc array during initialization.
  768. * entry size in DDR page is 64 bits, for 32 bits system,
  769. * only lower 32 bits VA value is needed.
  770. */
  771. spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
  772. return (*((uintptr_t *)(spt_page_va +
  773. spt_va_id * DP_CC_HW_READ_BYTES)));
  774. }
  775. /**
  776. * dp_update_mlo_ctxt_stats() - aggregate stats from mlo ctx
  777. * @buf: vdev stats buf
  778. * @mlo_ctxt_stats: mlo ctxt stats
  779. *
  780. * return: void
  781. */
  782. static inline
  783. void dp_update_mlo_ctxt_stats(void *buf,
  784. struct cdp_vdev_stats *mlo_ctxt_stats)
  785. {
  786. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)buf;
  787. DP_UPDATE_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats);
  788. }
  789. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  790. /**
  791. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  792. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  793. * of processing the entries in SRNG
  794. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  795. * of processing the entries in SRNG
  796. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  797. * condition and drastic steps need to be taken for processing
  798. * the entries in SRNG
  799. */
  800. enum dp_srng_near_full_levels {
  801. DP_SRNG_THRESH_SAFE,
  802. DP_SRNG_THRESH_NEAR_FULL,
  803. DP_SRNG_THRESH_CRITICAL,
  804. };
  805. /**
  806. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  807. * its corresponding near-full irq handler
  808. * @soc: Datapath SoC handle
  809. * @dp_srng: datapath handle for this SRNG
  810. *
  811. * Return: 1, if the srng was marked as near-full
  812. * 0, if the srng was not marked as near-full
  813. */
  814. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  815. struct dp_srng *dp_srng)
  816. {
  817. return qdf_atomic_read(&dp_srng->near_full);
  818. }
  819. /**
  820. * dp_srng_get_near_full_level() - Check the num available entries in the
  821. * consumer srng and return the level of the srng
  822. * near full state.
  823. * @soc: Datapath SoC Handle [To be validated by the caller]
  824. * @dp_srng: SRNG handle
  825. *
  826. * Return: near-full level
  827. */
  828. static inline int
  829. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  830. {
  831. uint32_t num_valid;
  832. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  833. dp_srng->hal_srng,
  834. true);
  835. if (num_valid > dp_srng->crit_thresh)
  836. return DP_SRNG_THRESH_CRITICAL;
  837. else if (num_valid < dp_srng->safe_thresh)
  838. return DP_SRNG_THRESH_SAFE;
  839. else
  840. return DP_SRNG_THRESH_NEAR_FULL;
  841. }
  842. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  843. /**
  844. * _dp_srng_test_and_update_nf_params() - Test the near full level and update
  845. * the reap_limit and flags to reflect the state.
  846. * @soc: Datapath soc handle
  847. * @srng: Datapath handle for the srng
  848. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  849. * per the near-full state
  850. *
  851. * Return: 1, if the srng is near full
  852. * 0, if the srng is not near full
  853. */
  854. static inline int
  855. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  856. struct dp_srng *srng,
  857. int *max_reap_limit)
  858. {
  859. int ring_near_full = 0, near_full_level;
  860. if (dp_srng_check_ring_near_full(soc, srng)) {
  861. near_full_level = dp_srng_get_near_full_level(soc, srng);
  862. switch (near_full_level) {
  863. case DP_SRNG_THRESH_CRITICAL:
  864. /* Currently not doing anything special here */
  865. fallthrough;
  866. case DP_SRNG_THRESH_NEAR_FULL:
  867. ring_near_full = 1;
  868. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  869. break;
  870. case DP_SRNG_THRESH_SAFE:
  871. qdf_atomic_set(&srng->near_full, 0);
  872. ring_near_full = 0;
  873. break;
  874. default:
  875. qdf_assert(0);
  876. break;
  877. }
  878. }
  879. return ring_near_full;
  880. }
  881. #else
  882. static inline int
  883. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  884. struct dp_srng *srng,
  885. int *max_reap_limit)
  886. {
  887. return 0;
  888. }
  889. #endif
  890. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  891. static inline
  892. uint32_t dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id)
  893. {
  894. return (DP_TX_SPCL_DESC_CMEM_OFFSET +
  895. (desc_pool_id * DP_TX_SPCL_DESC_POOL_CMEM_SIZE));
  896. }
  897. #else
  898. static inline
  899. uint32_t dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id)
  900. {
  901. QDF_BUG(0);
  902. return 0;
  903. }
  904. #endif
  905. static inline
  906. uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
  907. enum qdf_dp_desc_type desc_type)
  908. {
  909. switch (desc_type) {
  910. case QDF_DP_TX_DESC_TYPE:
  911. return (DP_TX_DESC_CMEM_OFFSET +
  912. (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
  913. case QDF_DP_TX_SPCL_DESC_TYPE:
  914. return dp_desc_pool_get_spcl_cmem_base(desc_pool_id);
  915. case QDF_DP_RX_DESC_BUF_TYPE:
  916. return (DP_RX_DESC_CMEM_OFFSET +
  917. ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
  918. DP_RX_DESC_POOL_CMEM_SIZE);
  919. case QDF_DP_TX_PPEDS_DESC_TYPE:
  920. return DP_TX_PPEDS_DESC_CMEM_OFFSET;
  921. default:
  922. QDF_BUG(0);
  923. }
  924. return 0;
  925. }
  926. #ifndef WLAN_MLO_MULTI_CHIP
  927. static inline
  928. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  929. struct cdp_soc_attach_params *params)
  930. {
  931. }
  932. static inline
  933. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  934. struct cdp_pdev_attach_params *params)
  935. {
  936. }
  937. static inline
  938. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  939. {
  940. }
  941. static inline
  942. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  943. {
  944. }
  945. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  946. {
  947. return 0;
  948. }
  949. #endif
  950. /**
  951. * dp_mlo_dev_ctxt_list_attach_wrapper() - Wrapper API for MLO dev list Init
  952. *
  953. * @mlo_dev_obj: MLO device object
  954. *
  955. * Return: void
  956. */
  957. void dp_mlo_dev_ctxt_list_attach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj);
  958. /**
  959. * dp_mlo_dev_ctxt_list_detach_wrapper() - Wrapper API for MLO dev list de-Init
  960. *
  961. * @mlo_dev_obj: MLO device object
  962. *
  963. * Return: void
  964. */
  965. void dp_mlo_dev_ctxt_list_detach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj);
  966. /**
  967. * dp_mlo_dev_ctxt_list_attach() - API to initialize MLO device List
  968. *
  969. * @mlo_dev_obj: MLO device object
  970. *
  971. * Return: void
  972. */
  973. void dp_mlo_dev_ctxt_list_attach(dp_mlo_dev_obj_t mlo_dev_obj);
  974. /**
  975. * dp_mlo_dev_ctxt_list_detach() - API to de-initialize MLO device List
  976. *
  977. * @mlo_dev_obj: MLO device object
  978. *
  979. * Return: void
  980. */
  981. void dp_mlo_dev_ctxt_list_detach(dp_mlo_dev_obj_t mlo_dev_obj);
  982. /**
  983. * dp_soc_initialize_cdp_cmn_mlo_ops() - API to initialize common CDP MLO ops
  984. *
  985. * @soc: Datapath soc handle
  986. *
  987. * Return: void
  988. */
  989. void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc);
  990. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX)
  991. /**
  992. * dp_mlo_dev_ctxt_unref_delete() - Releasing the ref for MLO device ctxt
  993. *
  994. * @mlo_dev_ctxt: MLO device context handle
  995. * @mod_id: module id which is releasing the reference
  996. *
  997. * Return: void
  998. */
  999. void dp_mlo_dev_ctxt_unref_delete(struct dp_mlo_dev_ctxt *mlo_dev_ctxt,
  1000. enum dp_mod_id mod_id);
  1001. /**
  1002. * dp_mlo_dev_get_ref() - Get the ref for MLO device ctxt
  1003. *
  1004. * @mlo_dev_ctxt: MLO device context handle
  1005. * @mod_id: module id which is requesting the reference
  1006. *
  1007. * Return: SUCCESS on acquiring the ref.
  1008. */
  1009. QDF_STATUS
  1010. dp_mlo_dev_get_ref(struct dp_mlo_dev_ctxt *mlo_dev_ctxt,
  1011. enum dp_mod_id mod_id);
  1012. /**
  1013. * dp_get_mlo_dev_ctx_by_mld_mac_addr() - Get MLO device ctx based on MLD MAC
  1014. *
  1015. * @be_soc: be soc handle
  1016. * @mldaddr: MLD MAC address
  1017. * @mod_id: module id which is requesting the reference
  1018. *
  1019. * Return: MLO device context Handle on success, NULL on failure
  1020. */
  1021. struct dp_mlo_dev_ctxt *
  1022. dp_get_mlo_dev_ctx_by_mld_mac_addr(struct dp_soc_be *be_soc,
  1023. uint8_t *mldaddr, enum dp_mod_id mod_id);
  1024. #endif /* WLAN_DP_MLO_DEV_CTX */
  1025. #endif