dp_be.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_BE_H
  20. #define __DP_BE_H
  21. #include <dp_types.h>
  22. #include <hal_be_tx.h>
  23. #ifdef WLAN_MLO_MULTI_CHIP
  24. #include "mlo/dp_mlo.h"
  25. #else
  26. #include <dp_peer.h>
  27. #endif
  28. #ifdef WIFI_MONITOR_SUPPORT
  29. #include <dp_mon.h>
  30. #endif
  31. enum CMEM_MEM_CLIENTS {
  32. COOKIE_CONVERSION,
  33. FISA_FST,
  34. };
  35. /* maximum number of entries in one page of secondary page table */
  36. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  37. /* maximum number of entries in one page of secondary page table */
  38. #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
  39. /* maximum number of entries in primary page table */
  40. #define DP_CC_PPT_MAX_ENTRIES \
  41. DP_CC_PPT_MEM_SIZE / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED
  42. /* cookie conversion required CMEM offset from CMEM pool */
  43. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  44. /* cookie conversion primary page table size 4K */
  45. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  46. #define DP_CC_PPT_MEM_SIZE 4096
  47. #else
  48. #define DP_CC_PPT_MEM_SIZE 8192
  49. #endif
  50. /* FST required CMEM offset M pool */
  51. #define DP_FST_MEM_OFFSET_IN_CMEM \
  52. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  53. /* lower 9 bits in Desc ID for offset in page of SPT */
  54. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  55. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  56. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  57. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  58. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  59. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  60. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  61. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  62. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  63. /*
  64. * page 4K unaligned case, single SPT page physical address
  65. * need 8 bytes in PPT
  66. */
  67. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  68. /*
  69. * page 4K aligned case, single SPT page physical address
  70. * need 4 bytes in PPT
  71. */
  72. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  73. /* 4K aligned case, number of bits HW append for one PPT entry value */
  74. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  75. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  76. /* WBM2SW ring id for rx release */
  77. #define WBM2SW_REL_ERR_RING_NUM 3
  78. #else
  79. /* WBM2SW ring id for rx release */
  80. #define WBM2SW_REL_ERR_RING_NUM 5
  81. #endif
  82. #ifdef WLAN_SUPPORT_PPEDS
  83. #define DP_PPEDS_STAMODE_ASTIDX_MAP_REG_IDX 1
  84. /* The MAX PPE PRI2TID */
  85. #define DP_TX_INT_PRI2TID_MAX 15
  86. /* size of CMEM needed for a ppeds tx desc pool */
  87. #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \
  88. ((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  89. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  90. /* Offset of ppeds tx descripotor pool */
  91. #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
  92. #define PEER_ROUTING_USE_PPE 1
  93. #define PEER_ROUTING_ENABLED 1
  94. #define DP_PPE_INTR_STRNG_LEN 32
  95. #define DP_PPE_INTR_MAX 3
  96. #else
  97. #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
  98. #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0
  99. #define DP_PPE_INTR_STRNG_LEN 0
  100. #define DP_PPE_INTR_MAX 0
  101. #endif
  102. /* tx descriptor are programmed at start of CMEM region*/
  103. #define DP_TX_DESC_CMEM_OFFSET \
  104. (DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE)
  105. /* size of CMEM needed for a tx desc pool*/
  106. #define DP_TX_DESC_POOL_CMEM_SIZE \
  107. ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  108. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  109. #ifndef QCA_SUPPORT_DP_GLOBAL_CTX
  110. /* Offset of rx descripotor pool */
  111. #define DP_RX_DESC_CMEM_OFFSET \
  112. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  113. #else
  114. /* tx special descriptor are programmed after tx desc CMEM region*/
  115. #define DP_TX_SPCL_DESC_CMEM_OFFSET \
  116. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  117. /* size of CMEM needed for a tx special desc pool*/
  118. #define DP_TX_SPCL_DESC_POOL_CMEM_SIZE \
  119. ((WLAN_CFG_NUM_TX_SPL_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  120. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  121. /* Offset of rx descripotor pool */
  122. #define DP_RX_DESC_CMEM_OFFSET \
  123. DP_TX_SPCL_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * \
  124. DP_TX_SPCL_DESC_POOL_CMEM_SIZE)
  125. #endif
  126. /* size of CMEM needed for a rx desc pool */
  127. #define DP_RX_DESC_POOL_CMEM_SIZE \
  128. ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  129. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  130. /* get ppt_id from CMEM_OFFSET */
  131. #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
  132. ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  133. /**
  134. * struct dp_spt_page_desc - secondary page table page descriptors
  135. * @page_v_addr: page virtual address
  136. * @page_p_addr: page physical address
  137. * @ppt_index: entry index in primary page table where this page physical
  138. * address stored
  139. */
  140. struct dp_spt_page_desc {
  141. uint8_t *page_v_addr;
  142. qdf_dma_addr_t page_p_addr;
  143. uint32_t ppt_index;
  144. };
  145. /**
  146. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  147. * @cmem_offset: CMEM offset from base address for primary page table setup
  148. * @total_page_num: total DDR page allocated
  149. * @page_desc_freelist: available page Desc list
  150. * @page_desc_base: page Desc buffer base address.
  151. * @page_pool: DDR pages pool
  152. * @cc_lock: locks for page acquiring/free
  153. */
  154. struct dp_hw_cookie_conversion_t {
  155. uint32_t cmem_offset;
  156. uint32_t total_page_num;
  157. struct dp_spt_page_desc *page_desc_base;
  158. struct qdf_mem_multi_page_t page_pool;
  159. qdf_spinlock_t cc_lock;
  160. };
  161. /**
  162. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  163. * @spt_page_list_head: head of SPT page descriptor list
  164. * @spt_page_list_tail: tail of SPT page descriptor list
  165. * @num_spt_pages: number of SPT page descriptor allocated
  166. */
  167. struct dp_spt_page_desc_list {
  168. struct dp_spt_page_desc *spt_page_list_head;
  169. struct dp_spt_page_desc *spt_page_list_tail;
  170. uint16_t num_spt_pages;
  171. };
  172. /* HW reading 8 bytes for VA */
  173. #define DP_CC_HW_READ_BYTES 8
  174. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  175. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  176. = (uintptr_t)(_desc_va); }
  177. /**
  178. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  179. * @is_configured: flag indicating if this bank is configured
  180. * @ref_count: ref count indicating number of users of the bank
  181. * @bank_config: HAL TX bank configuration
  182. */
  183. struct dp_tx_bank_profile {
  184. uint8_t is_configured;
  185. qdf_atomic_t ref_count;
  186. union hal_tx_bank_config bank_config;
  187. };
  188. #ifdef WLAN_SUPPORT_PPEDS
  189. /**
  190. * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
  191. * @is_configured: Boolean that the entry is configured.
  192. */
  193. struct dp_ppe_vp_tbl_entry {
  194. bool is_configured;
  195. };
  196. /**
  197. * struct dp_ppe_vp_search_idx_tbl_entry - PPE Virtual search table entry
  198. * @is_configured: Boolean that the entry is configured.
  199. */
  200. struct dp_ppe_vp_search_idx_tbl_entry {
  201. bool is_configured;
  202. };
  203. /**
  204. * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
  205. * @is_configured: Boolean that the entry is configured.
  206. * @vp_num: Virtual port number
  207. * @ppe_vp_num_idx: Index to the PPE VP table entry
  208. * @search_idx_reg_num: Address search Index register number
  209. * @drop_prec_enable: Drop precedance enable
  210. * @to_fw: To FW exception enable/disable.
  211. * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
  212. * @vdev_id: Vdev ID
  213. */
  214. struct dp_ppe_vp_profile {
  215. bool is_configured;
  216. uint8_t vp_num;
  217. uint8_t ppe_vp_num_idx;
  218. uint8_t search_idx_reg_num;
  219. uint8_t drop_prec_enable;
  220. uint8_t to_fw;
  221. uint8_t use_ppe_int_pri;
  222. uint8_t vdev_id;
  223. };
  224. /**
  225. * struct dp_ppeds_tx_desc_pool_s - PPEDS Tx Descriptor Pool
  226. * @elem_size: Size of each descriptor
  227. * @hot_list_len: Length of hotlist chain
  228. * @num_allocated: Number of used descriptors
  229. * @freelist: Chain of free descriptors
  230. * @hotlist: Chain of descriptors with attached nbufs
  231. * @desc_pages: multiple page allocation information for actual descriptors
  232. * @elem_count: Number of descriptors in the pool
  233. * @num_free: Number of free descriptors
  234. * @lock: Lock for descriptor allocation/free from/to the pool
  235. */
  236. struct dp_ppeds_tx_desc_pool_s {
  237. uint16_t elem_size;
  238. uint32_t num_allocated;
  239. uint32_t hot_list_len;
  240. struct dp_tx_desc_s *freelist;
  241. struct dp_tx_desc_s *hotlist;
  242. struct qdf_mem_multi_page_t desc_pages;
  243. uint16_t elem_count;
  244. uint32_t num_free;
  245. qdf_spinlock_t lock;
  246. };
  247. #endif
  248. /**
  249. * struct dp_ppeds_napi - napi parameters for ppe ds
  250. * @napi: napi structure to register with napi infra
  251. * @ndev: net_dev structure
  252. */
  253. struct dp_ppeds_napi {
  254. struct napi_struct napi;
  255. struct net_device ndev;
  256. };
  257. /*
  258. * NB: intentionally not using kernel-doc comment because the kernel-doc
  259. * script does not handle the TAILQ_HEAD macro
  260. * struct dp_soc_be - Extended DP soc for BE targets
  261. * @soc: dp soc structure
  262. * @num_bank_profiles: num TX bank profiles
  263. * @tx_bank_lock: lock for @bank_profiles
  264. * @bank_profiles: bank profiles for various TX banks
  265. * @page_desc_base:
  266. * @cc_cmem_base: cmem offset reserved for CC
  267. * @tx_cc_ctx: Cookie conversion context for tx desc pools
  268. * @rx_cc_ctx: Cookie conversion context for rx desc pools
  269. * @ppeds_int_mode_enabled: PPE DS interrupt mode enabled
  270. * @ppeds_stopped:
  271. * @reo2ppe_ring: REO2PPE ring
  272. * @ppe2tcl_ring: PPE2TCL ring
  273. * @ppeds_wbm_release_ring:
  274. * @ppe_vp_tbl: PPE VP table
  275. * @ppe_vp_search_idx_tbl: PPE VP search idx table
  276. * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool
  277. * @ppeds_tx_desc: PPEDS tx desc pool
  278. * @ppeds_napi_ctxt:
  279. * @ppeds_handle: PPEDS soc instance handle
  280. * @dp_ppeds_txdesc_hotlist_len: PPEDS tx desc hotlist length
  281. * @ppe_vp_tbl_lock: PPE VP table lock
  282. * @num_ppe_vp_entries: Number of PPE VP entries
  283. * @num_ppe_vp_search_idx_entries: PPEDS VP search idx entries
  284. * @irq_name: PPEDS VP irq names
  285. * @ppeds_stats: PPEDS stats
  286. * @mlo_enabled: Flag to indicate MLO is enabled or not
  287. * @mlo_chip_id: MLO chip_id
  288. * @ml_ctxt: pointer to global ml_context
  289. * @delta_tqm: delta_tqm
  290. * @mlo_tstamp_offset: mlo timestamp offset
  291. * @mld_peer_hash_lock: lock to protect mld_peer_hash
  292. * @mld_peer_hash: peer hash table for ML peers
  293. * @mlo_dev_list: list of MLO device context
  294. * @mlo_dev_list_lock: lock to protect MLO device ctxt
  295. * @ipa_bank_id: TCL bank id used by IPA
  296. */
  297. struct dp_soc_be {
  298. struct dp_soc soc;
  299. uint8_t num_bank_profiles;
  300. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  301. qdf_mutex_t tx_bank_lock;
  302. #else
  303. qdf_spinlock_t tx_bank_lock;
  304. #endif
  305. struct dp_tx_bank_profile *bank_profiles;
  306. struct dp_spt_page_desc *page_desc_base;
  307. uint32_t cc_cmem_base;
  308. struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
  309. struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
  310. #ifdef WLAN_SUPPORT_PPEDS
  311. uint8_t ppeds_int_mode_enabled:1,
  312. ppeds_stopped:1;
  313. struct dp_srng reo2ppe_ring;
  314. struct dp_srng ppe2tcl_ring;
  315. struct dp_srng ppeds_wbm_release_ring;
  316. struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
  317. struct dp_ppe_vp_search_idx_tbl_entry *ppe_vp_search_idx_tbl;
  318. struct dp_ppe_vp_profile *ppe_vp_profile;
  319. struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx;
  320. struct dp_ppeds_tx_desc_pool_s ppeds_tx_desc;
  321. struct dp_ppeds_napi ppeds_napi_ctxt;
  322. void *ppeds_handle;
  323. int dp_ppeds_txdesc_hotlist_len;
  324. qdf_mutex_t ppe_vp_tbl_lock;
  325. uint8_t num_ppe_vp_entries;
  326. uint8_t num_ppe_vp_search_idx_entries;
  327. uint8_t num_ppe_vp_profiles;
  328. char irq_name[DP_PPE_INTR_MAX][DP_PPE_INTR_STRNG_LEN];
  329. struct {
  330. struct {
  331. uint64_t desc_alloc_failed;
  332. #ifdef GLOBAL_ASSERT_AVOIDANCE
  333. uint32_t tx_comp_buf_src;
  334. uint32_t tx_comp_desc_null;
  335. uint32_t tx_comp_invalid_flag;
  336. #endif
  337. } tx;
  338. } ppeds_stats;
  339. #endif
  340. #ifdef WLAN_FEATURE_11BE_MLO
  341. #ifdef WLAN_MLO_MULTI_CHIP
  342. uint8_t mlo_enabled;
  343. uint8_t mlo_chip_id;
  344. struct dp_mlo_ctxt *ml_ctxt;
  345. uint64_t delta_tqm;
  346. uint64_t mlo_tstamp_offset;
  347. #else
  348. /* Protect mld peer hash table */
  349. DP_MUTEX_TYPE mld_peer_hash_lock;
  350. struct {
  351. uint32_t mask;
  352. uint32_t idx_bits;
  353. TAILQ_HEAD(, dp_peer) * bins;
  354. } mld_peer_hash;
  355. /* MLO device ctxt list */
  356. TAILQ_HEAD(, dp_mlo_dev_ctxt) mlo_dev_list;
  357. qdf_spinlock_t mlo_dev_list_lock;
  358. #endif
  359. #endif
  360. #ifdef IPA_OFFLOAD
  361. int8_t ipa_bank_id;
  362. #endif
  363. };
  364. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  365. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  366. /**
  367. * struct dp_pdev_be - Extended DP pdev for BE targets
  368. * @pdev: dp pdev structure
  369. * @monitor_pdev_be: BE specific monitor object
  370. * @mlo_link_id: MLO link id for PDEV
  371. * @delta_tsf2: delta_tsf2
  372. */
  373. struct dp_pdev_be {
  374. struct dp_pdev pdev;
  375. #ifdef WLAN_MLO_MULTI_CHIP
  376. uint8_t mlo_link_id;
  377. uint64_t delta_tsf2;
  378. #endif
  379. };
  380. /**
  381. * struct dp_vdev_be - Extended DP vdev for BE targets
  382. * @vdev: dp vdev structure
  383. * @bank_id: bank_id to be used for TX
  384. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  385. * @partner_vdev_list: partner list used for Intra-BSS
  386. * @bridge_vdev_list: partner bridge vdev list
  387. * @mlo_stats: structure to hold stats for mlo unmapped peers
  388. * @mcast_primary: MLO Mcast primary vdev
  389. * @mlo_dev_ctxt: MLO device context pointer
  390. */
  391. struct dp_vdev_be {
  392. struct dp_vdev vdev;
  393. int8_t bank_id;
  394. uint8_t vdev_id_check_en;
  395. #ifdef WLAN_MLO_MULTI_CHIP
  396. struct cdp_vdev_stats mlo_stats;
  397. #ifdef WLAN_FEATURE_11BE_MLO
  398. #ifdef WLAN_MCAST_MLO
  399. bool mcast_primary;
  400. #endif
  401. #endif
  402. #endif
  403. #ifdef WLAN_FEATURE_11BE_MLO
  404. struct dp_mlo_dev_ctxt *mlo_dev_ctxt;
  405. #endif /* WLAN_FEATURE_11BE_MLO */
  406. };
  407. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX)
  408. /**
  409. * struct dp_mlo_dev_ctxt - Datapath MLO device context
  410. *
  411. * @ml_dev_list_elem: node in the ML dev list of Global MLO context
  412. * @mld_mac_addr: MLO device MAC address
  413. * @vdev_list: list of vdevs associated with this MLO connection
  414. * @vdev_list_lock: lock to protect vdev list
  415. * @bridge_vdev: list of bridge vdevs associated with this MLO connection
  416. * @is_bridge_vdev_present: flag to check if bridge vdev is present
  417. * @vdev_list_lock: lock to protect vdev list
  418. * @vdev_count: number of elements in the vdev list
  419. * @seq_num: DP MLO multicast sequence number
  420. * @ref_cnt: reference count
  421. * @mod_refs: module reference count
  422. * @ref_delete_pending: flag to monitor last ref delete
  423. * @stats: structure to store vdev stats of removed MLO Link
  424. */
  425. struct dp_mlo_dev_ctxt {
  426. TAILQ_ENTRY(dp_mlo_dev_ctxt) ml_dev_list_elem;
  427. union dp_align_mac_addr mld_mac_addr;
  428. #ifdef WLAN_MLO_MULTI_CHIP
  429. uint8_t vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  430. uint8_t bridge_vdev[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  431. bool is_bridge_vdev_present;
  432. qdf_spinlock_t vdev_list_lock;
  433. uint16_t vdev_count;
  434. uint16_t seq_num;
  435. #endif
  436. qdf_atomic_t ref_cnt;
  437. qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
  438. uint8_t ref_delete_pending;
  439. struct dp_vdev_stats stats;
  440. };
  441. #endif /* WLAN_FEATURE_11BE_MLO */
  442. /**
  443. * struct dp_peer_be - Extended DP peer for BE targets
  444. * @peer: dp peer structure
  445. * @priority_valid:
  446. */
  447. struct dp_peer_be {
  448. struct dp_peer peer;
  449. #ifdef WLAN_SUPPORT_PPEDS
  450. uint8_t priority_valid;
  451. #endif
  452. };
  453. /**
  454. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  455. *
  456. * Return: value in bytes for BE specific soc structure
  457. */
  458. qdf_size_t dp_get_soc_context_size_be(void);
  459. /**
  460. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  461. * @arch_ops: arch ops pointer
  462. *
  463. * Return: none
  464. */
  465. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  466. /**
  467. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  468. * @context_type: context type for which the size is needed
  469. *
  470. * Return: size in bytes for the context_type
  471. */
  472. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  473. /**
  474. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  475. * @soc: dp_soc pointer
  476. *
  477. * Return: dp_soc_be pointer
  478. */
  479. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  480. {
  481. return (struct dp_soc_be *)soc;
  482. }
  483. /**
  484. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  485. * @be_soc: dp_soc_be pointer
  486. * @func: Function to be called for each soc
  487. * @arg: context to be passed to the callback
  488. *
  489. * Return: true if mlo is enabled, false if mlo is disabled
  490. */
  491. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  492. void *arg);
  493. #ifdef WLAN_MLO_MULTI_CHIP
  494. typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
  495. typedef struct dp_mlo_ctxt *dp_mlo_dev_obj_t;
  496. /**
  497. * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
  498. * @soc: soc handle
  499. *
  500. * return: MLD peer hash object
  501. */
  502. static inline dp_mld_peer_hash_obj_t
  503. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  504. {
  505. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  506. return be_soc->ml_ctxt;
  507. }
  508. /**
  509. * dp_get_mlo_dev_list_obj() - return the container struct of MLO Dev list
  510. * @be_soc: be soc handle
  511. *
  512. * return: MLO dev list object
  513. */
  514. static inline dp_mlo_dev_obj_t
  515. dp_get_mlo_dev_list_obj(struct dp_soc_be *be_soc)
  516. {
  517. return be_soc->ml_ctxt;
  518. }
  519. #if defined(WLAN_FEATURE_11BE_MLO)
  520. /**
  521. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  522. * @soc: Soc handle
  523. * @peer: DP peer handle for ML peer
  524. * @peer_id: peer_id
  525. * Return: None
  526. */
  527. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  528. struct dp_peer *peer,
  529. uint16_t peer_id);
  530. /**
  531. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  532. * @soc: Soc handle
  533. * @peer_id: peer_id
  534. * Return: None
  535. */
  536. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  537. uint16_t peer_id);
  538. /**
  539. * dp_soc_initialize_cdp_cmn_mlo_ops() - Initialize common CDP API's
  540. * @soc: Soc handle
  541. *
  542. * Return: None
  543. */
  544. void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc);
  545. #ifdef WLAN_MLO_MULTI_CHIP
  546. typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
  547. struct dp_vdev *ptnr_vdev,
  548. void *arg);
  549. /**
  550. * dp_mlo_iter_ptnr_vdev() - API to iterate through ptnr vdev list
  551. * @be_soc: dp_soc_be pointer
  552. * @be_vdev: dp_vdev_be pointer
  553. * @func: function to be called for each peer
  554. * @arg: argument need to be passed to func
  555. * @mod_id: module id
  556. * @type: iterate type
  557. * @include_self_vdev: flag to include/exclude self vdev in iteration
  558. *
  559. * Return: None
  560. */
  561. void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  562. struct dp_vdev_be *be_vdev,
  563. dp_ptnr_vdev_iter_func func, void *arg,
  564. enum dp_mod_id mod_id,
  565. uint8_t type,
  566. bool include_self_vdev);
  567. #endif
  568. #ifdef WLAN_MCAST_MLO
  569. /**
  570. * dp_mlo_get_mcast_primary_vdev() - get ref to mcast primary vdev
  571. * @be_soc: dp_soc_be pointer
  572. * @be_vdev: dp_vdev_be pointer
  573. * @mod_id: module id
  574. *
  575. * Return: mcast primary DP VDEV handle on success, NULL on failure
  576. */
  577. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  578. struct dp_vdev_be *be_vdev,
  579. enum dp_mod_id mod_id);
  580. #endif
  581. #endif
  582. #else
  583. typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
  584. typedef struct dp_soc_be *dp_mlo_dev_obj_t;
  585. static inline dp_mld_peer_hash_obj_t
  586. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  587. {
  588. return dp_get_be_soc_from_dp_soc(soc);
  589. }
  590. static inline dp_mlo_dev_obj_t
  591. dp_get_mlo_dev_list_obj(struct dp_soc_be *be_soc)
  592. {
  593. return be_soc;
  594. }
  595. #endif
  596. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  597. static inline
  598. struct dp_hw_cookie_conversion_t *dp_get_tx_cookie_t(struct dp_soc *soc,
  599. uint8_t pool_id)
  600. {
  601. struct dp_global_context *dp_global = NULL;
  602. dp_global = wlan_objmgr_get_global_ctx();
  603. return dp_global->tx_cc_ctx[pool_id];
  604. }
  605. static inline
  606. struct dp_hw_cookie_conversion_t *dp_get_spcl_tx_cookie_t(struct dp_soc *soc,
  607. uint8_t pool_id)
  608. {
  609. struct dp_global_context *dp_global = NULL;
  610. dp_global = wlan_objmgr_get_global_ctx();
  611. return dp_global->spcl_tx_cc_ctx[pool_id];
  612. }
  613. #else
  614. static inline
  615. struct dp_hw_cookie_conversion_t *dp_get_tx_cookie_t(struct dp_soc *soc,
  616. uint8_t pool_id)
  617. {
  618. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  619. return &be_soc->tx_cc_ctx[pool_id];
  620. }
  621. static inline
  622. struct dp_hw_cookie_conversion_t *dp_get_spcl_tx_cookie_t(struct dp_soc *soc,
  623. uint8_t pool_id)
  624. {
  625. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  626. return &be_soc->tx_cc_ctx[pool_id];
  627. }
  628. #endif
  629. /**
  630. * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
  631. * @mld_hash_obj: Peer has object
  632. * @hash_elems: number of entries in hash table
  633. *
  634. * Return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
  635. */
  636. QDF_STATUS
  637. dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
  638. int hash_elems);
  639. /**
  640. * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
  641. *
  642. * @mld_hash_obj: Peer has object
  643. *
  644. * Return: void
  645. */
  646. void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
  647. /**
  648. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  649. * @pdev: dp_pdev pointer
  650. *
  651. * Return: dp_pdev_be pointer
  652. */
  653. static inline
  654. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  655. {
  656. return (struct dp_pdev_be *)pdev;
  657. }
  658. /**
  659. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  660. * @vdev: dp_vdev pointer
  661. *
  662. * Return: dp_vdev_be pointer
  663. */
  664. static inline
  665. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  666. {
  667. return (struct dp_vdev_be *)vdev;
  668. }
  669. /**
  670. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  671. * @peer: dp_peer pointer
  672. *
  673. * Return: dp_peer_be pointer
  674. */
  675. static inline
  676. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  677. {
  678. return (struct dp_peer_be *)peer;
  679. }
  680. void dp_ppeds_disable_irq(struct dp_soc *soc, struct dp_srng *srng);
  681. void dp_ppeds_enable_irq(struct dp_soc *soc, struct dp_srng *srng);
  682. QDF_STATUS dp_peer_setup_ppeds_be(struct dp_soc *soc, struct dp_peer *peer,
  683. struct dp_vdev_be *be_vdev,
  684. void *args);
  685. QDF_STATUS
  686. dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
  687. struct dp_hw_cookie_conversion_t *cc_ctx,
  688. uint32_t num_descs,
  689. enum qdf_dp_desc_type desc_type,
  690. uint8_t desc_pool_id);
  691. void dp_reo_shared_qaddr_detach(struct dp_soc *soc);
  692. QDF_STATUS
  693. dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
  694. struct dp_hw_cookie_conversion_t *cc_ctx);
  695. QDF_STATUS
  696. dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
  697. struct dp_hw_cookie_conversion_t *cc_ctx);
  698. QDF_STATUS
  699. dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
  700. struct dp_hw_cookie_conversion_t *cc_ctx);
  701. /**
  702. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  703. * @be_soc: beryllium soc handler
  704. * @list_head: pointer to page desc head
  705. * @list_tail: pointer to page desc tail
  706. * @num_desc: number of TX/RX Descs required for SPT pages
  707. *
  708. * Return: number of SPT page Desc allocated
  709. */
  710. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  711. struct dp_spt_page_desc **list_head,
  712. struct dp_spt_page_desc **list_tail,
  713. uint16_t num_desc);
  714. /**
  715. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  716. * @be_soc: beryllium soc handler
  717. * @list_head: pointer to page desc head
  718. * @list_tail: pointer to page desc tail
  719. * @page_nums: number of page desc freed back to pool
  720. */
  721. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  722. struct dp_spt_page_desc **list_head,
  723. struct dp_spt_page_desc **list_tail,
  724. uint16_t page_nums);
  725. /**
  726. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  727. * DDR page 4K aligned or not
  728. * @ppt_index: offset index in primary page table
  729. * @spt_index: offset index in sceondary DDR page
  730. *
  731. * Generate SW cookie ID to match as HW expected
  732. *
  733. * Return: cookie ID
  734. */
  735. static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
  736. uint16_t spt_index)
  737. {
  738. /*
  739. * for 4k aligned case, cmem entry size is 4 bytes,
  740. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  741. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  742. * exactly same with original ppt_index value.
  743. * for 4k un-aligned case, cmem entry size is 8 bytes.
  744. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  745. */
  746. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  747. spt_index);
  748. }
  749. /**
  750. * dp_cc_desc_find() - find TX/RX Descs virtual address by ID
  751. * @soc: be soc handle
  752. * @desc_id: TX/RX Dess ID
  753. *
  754. * Return: TX/RX Desc virtual address
  755. */
  756. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  757. uint32_t desc_id)
  758. {
  759. struct dp_soc_be *be_soc;
  760. uint16_t ppt_page_id, spt_va_id;
  761. uint8_t *spt_page_va;
  762. be_soc = dp_get_be_soc_from_dp_soc(soc);
  763. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  764. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  765. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  766. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  767. /*
  768. * ppt index in cmem is same order where the page in the
  769. * page desc array during initialization.
  770. * entry size in DDR page is 64 bits, for 32 bits system,
  771. * only lower 32 bits VA value is needed.
  772. */
  773. spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
  774. return (*((uintptr_t *)(spt_page_va +
  775. spt_va_id * DP_CC_HW_READ_BYTES)));
  776. }
  777. /**
  778. * dp_update_mlo_mld_vdev_ctxt_stats() - aggregate stats from mlo ctx
  779. * @buf: vdev stats buf
  780. * @mlo_ctxt_stats: mlo ctxt stats
  781. *
  782. * return: void
  783. */
  784. static inline
  785. void dp_update_mlo_mld_vdev_ctxt_stats(void *buf,
  786. struct dp_vdev_stats *mlo_ctxt_stats)
  787. {
  788. struct dp_vdev_stats *tgt_vdev_stats = (struct dp_vdev_stats *)buf;
  789. DP_UPDATE_TO_MLD_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats,
  790. DP_XMIT_TOTAL);
  791. }
  792. /**
  793. * dp_update_mlo_link_vdev_ctxt_stats() - aggregate stats from mlo ctx
  794. * @buf: vdev stats buf
  795. * @mlo_ctxt_stats: mlo ctxt stats
  796. * @xmit_type: xmit type of packet - MLD/Link
  797. * return: void
  798. */
  799. static inline
  800. void dp_update_mlo_link_vdev_ctxt_stats(void *buf,
  801. struct dp_vdev_stats *mlo_ctxt_stats,
  802. enum dp_pkt_xmit_type xmit_type)
  803. {
  804. struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)buf;
  805. DP_UPDATE_TO_LINK_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats, xmit_type);
  806. }
  807. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  808. /**
  809. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  810. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  811. * of processing the entries in SRNG
  812. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  813. * of processing the entries in SRNG
  814. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  815. * condition and drastic steps need to be taken for processing
  816. * the entries in SRNG
  817. */
  818. enum dp_srng_near_full_levels {
  819. DP_SRNG_THRESH_SAFE,
  820. DP_SRNG_THRESH_NEAR_FULL,
  821. DP_SRNG_THRESH_CRITICAL,
  822. };
  823. /**
  824. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  825. * its corresponding near-full irq handler
  826. * @soc: Datapath SoC handle
  827. * @dp_srng: datapath handle for this SRNG
  828. *
  829. * Return: 1, if the srng was marked as near-full
  830. * 0, if the srng was not marked as near-full
  831. */
  832. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  833. struct dp_srng *dp_srng)
  834. {
  835. return qdf_atomic_read(&dp_srng->near_full);
  836. }
  837. /**
  838. * dp_srng_get_near_full_level() - Check the num available entries in the
  839. * consumer srng and return the level of the srng
  840. * near full state.
  841. * @soc: Datapath SoC Handle [To be validated by the caller]
  842. * @dp_srng: SRNG handle
  843. *
  844. * Return: near-full level
  845. */
  846. static inline int
  847. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  848. {
  849. uint32_t num_valid;
  850. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  851. dp_srng->hal_srng,
  852. true);
  853. if (num_valid > dp_srng->crit_thresh)
  854. return DP_SRNG_THRESH_CRITICAL;
  855. else if (num_valid < dp_srng->safe_thresh)
  856. return DP_SRNG_THRESH_SAFE;
  857. else
  858. return DP_SRNG_THRESH_NEAR_FULL;
  859. }
  860. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  861. /**
  862. * _dp_srng_test_and_update_nf_params() - Test the near full level and update
  863. * the reap_limit and flags to reflect the state.
  864. * @soc: Datapath soc handle
  865. * @srng: Datapath handle for the srng
  866. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  867. * per the near-full state
  868. *
  869. * Return: 1, if the srng is near full
  870. * 0, if the srng is not near full
  871. */
  872. static inline int
  873. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  874. struct dp_srng *srng,
  875. int *max_reap_limit)
  876. {
  877. int ring_near_full = 0, near_full_level;
  878. if (dp_srng_check_ring_near_full(soc, srng)) {
  879. near_full_level = dp_srng_get_near_full_level(soc, srng);
  880. switch (near_full_level) {
  881. case DP_SRNG_THRESH_CRITICAL:
  882. /* Currently not doing anything special here */
  883. fallthrough;
  884. case DP_SRNG_THRESH_NEAR_FULL:
  885. ring_near_full = 1;
  886. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  887. break;
  888. case DP_SRNG_THRESH_SAFE:
  889. qdf_atomic_set(&srng->near_full, 0);
  890. ring_near_full = 0;
  891. break;
  892. default:
  893. qdf_assert(0);
  894. break;
  895. }
  896. }
  897. return ring_near_full;
  898. }
  899. #else
  900. static inline int
  901. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  902. struct dp_srng *srng,
  903. int *max_reap_limit)
  904. {
  905. return 0;
  906. }
  907. #endif
  908. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  909. static inline
  910. uint32_t dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id)
  911. {
  912. return (DP_TX_SPCL_DESC_CMEM_OFFSET +
  913. (desc_pool_id * DP_TX_SPCL_DESC_POOL_CMEM_SIZE));
  914. }
  915. #else
  916. static inline
  917. uint32_t dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id)
  918. {
  919. QDF_BUG(0);
  920. return 0;
  921. }
  922. #endif
  923. static inline
  924. uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
  925. enum qdf_dp_desc_type desc_type)
  926. {
  927. switch (desc_type) {
  928. case QDF_DP_TX_DESC_TYPE:
  929. return (DP_TX_DESC_CMEM_OFFSET +
  930. (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
  931. case QDF_DP_TX_SPCL_DESC_TYPE:
  932. return dp_desc_pool_get_spcl_cmem_base(desc_pool_id);
  933. case QDF_DP_RX_DESC_BUF_TYPE:
  934. return (DP_RX_DESC_CMEM_OFFSET +
  935. ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
  936. DP_RX_DESC_POOL_CMEM_SIZE);
  937. case QDF_DP_TX_PPEDS_DESC_TYPE:
  938. return DP_TX_PPEDS_DESC_CMEM_OFFSET;
  939. default:
  940. QDF_BUG(0);
  941. }
  942. return 0;
  943. }
  944. #ifndef WLAN_MLO_MULTI_CHIP
  945. static inline
  946. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  947. struct cdp_soc_attach_params *params)
  948. {
  949. }
  950. static inline
  951. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  952. struct cdp_pdev_attach_params *params)
  953. {
  954. }
  955. static inline
  956. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  957. {
  958. }
  959. static inline
  960. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  961. {
  962. }
  963. static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
  964. {
  965. return 0;
  966. }
  967. #endif
  968. /**
  969. * dp_mlo_dev_ctxt_list_attach_wrapper() - Wrapper API for MLO dev list Init
  970. *
  971. * @mlo_dev_obj: MLO device object
  972. *
  973. * Return: void
  974. */
  975. void dp_mlo_dev_ctxt_list_attach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj);
  976. /**
  977. * dp_mlo_dev_ctxt_list_detach_wrapper() - Wrapper API for MLO dev list de-Init
  978. *
  979. * @mlo_dev_obj: MLO device object
  980. *
  981. * Return: void
  982. */
  983. void dp_mlo_dev_ctxt_list_detach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj);
  984. /**
  985. * dp_mlo_dev_ctxt_list_attach() - API to initialize MLO device List
  986. *
  987. * @mlo_dev_obj: MLO device object
  988. *
  989. * Return: void
  990. */
  991. void dp_mlo_dev_ctxt_list_attach(dp_mlo_dev_obj_t mlo_dev_obj);
  992. /**
  993. * dp_mlo_dev_ctxt_list_detach() - API to de-initialize MLO device List
  994. *
  995. * @mlo_dev_obj: MLO device object
  996. *
  997. * Return: void
  998. */
  999. void dp_mlo_dev_ctxt_list_detach(dp_mlo_dev_obj_t mlo_dev_obj);
  1000. /**
  1001. * dp_soc_initialize_cdp_cmn_mlo_ops() - API to initialize common CDP MLO ops
  1002. *
  1003. * @soc: Datapath soc handle
  1004. *
  1005. * Return: void
  1006. */
  1007. void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc);
  1008. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX)
  1009. /**
  1010. * dp_mlo_dev_ctxt_unref_delete() - Releasing the ref for MLO device ctxt
  1011. *
  1012. * @mlo_dev_ctxt: MLO device context handle
  1013. * @mod_id: module id which is releasing the reference
  1014. *
  1015. * Return: void
  1016. */
  1017. void dp_mlo_dev_ctxt_unref_delete(struct dp_mlo_dev_ctxt *mlo_dev_ctxt,
  1018. enum dp_mod_id mod_id);
  1019. /**
  1020. * dp_mlo_dev_get_ref() - Get the ref for MLO device ctxt
  1021. *
  1022. * @mlo_dev_ctxt: MLO device context handle
  1023. * @mod_id: module id which is requesting the reference
  1024. *
  1025. * Return: SUCCESS on acquiring the ref.
  1026. */
  1027. QDF_STATUS
  1028. dp_mlo_dev_get_ref(struct dp_mlo_dev_ctxt *mlo_dev_ctxt,
  1029. enum dp_mod_id mod_id);
  1030. /**
  1031. * dp_get_mlo_dev_ctx_by_mld_mac_addr() - Get MLO device ctx based on MLD MAC
  1032. *
  1033. * @be_soc: be soc handle
  1034. * @mldaddr: MLD MAC address
  1035. * @mod_id: module id which is requesting the reference
  1036. *
  1037. * Return: MLO device context Handle on success, NULL on failure
  1038. */
  1039. struct dp_mlo_dev_ctxt *
  1040. dp_get_mlo_dev_ctx_by_mld_mac_addr(struct dp_soc_be *be_soc,
  1041. uint8_t *mldaddr, enum dp_mod_id mod_id);
  1042. #endif /* WLAN_DP_MLO_DEV_CTX */
  1043. #endif