dp_be.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_BE_H
  20. #define __DP_BE_H
  21. #include <dp_types.h>
  22. #include <hal_be_tx.h>
  23. #ifdef WLAN_MLO_MULTI_CHIP
  24. #include "mlo/dp_mlo.h"
  25. #else
  26. #include <dp_peer.h>
  27. #endif
  28. #ifdef WIFI_MONITOR_SUPPORT
  29. #include <dp_mon.h>
  30. #endif
  31. enum CMEM_MEM_CLIENTS {
  32. COOKIE_CONVERSION,
  33. FISA_FST,
  34. };
  35. /* maximum number of entries in one page of secondary page table */
  36. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  37. /* maximum number of entries in one page of secondary page table */
  38. #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
  39. /* maximum number of entries in primary page table */
  40. #define DP_CC_PPT_MAX_ENTRIES 1024
  41. /* cookie conversion required CMEM offset from CMEM pool */
  42. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  43. /* cookie conversion primary page table size 4K */
  44. #define DP_CC_PPT_MEM_SIZE 4096
  45. /* FST required CMEM offset from CMEM pool */
  46. #define DP_FST_MEM_OFFSET_IN_CMEM \
  47. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  48. /* CMEM size for FISA FST 16K */
  49. #define DP_CMEM_FST_SIZE 16384
  50. /* lower 9 bits in Desc ID for offset in page of SPT */
  51. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  52. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  53. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  54. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  55. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  56. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  57. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  58. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  59. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  60. /*
  61. * page 4K unaligned case, single SPT page physical address
  62. * need 8 bytes in PPT
  63. */
  64. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  65. /*
  66. * page 4K aligned case, single SPT page physical address
  67. * need 4 bytes in PPT
  68. */
  69. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  70. /* 4K aligned case, number of bits HW append for one PPT entry value */
  71. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  72. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  73. /* WBM2SW ring id for rx release */
  74. #define WBM2SW_REL_ERR_RING_NUM 3
  75. #else
  76. /* WBM2SW ring id for rx release */
  77. #define WBM2SW_REL_ERR_RING_NUM 5
  78. #endif
  79. #ifdef WLAN_SUPPORT_PPEDS
  80. /* The MAX PPE PRI2TID */
  81. #define DP_TX_INT_PRI2TID_MAX 15
  82. #define DP_TX_PPEDS_POOL_ID 0
  83. /* size of CMEM needed for a ppeds tx desc pool */
  84. #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \
  85. ((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  86. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  87. /* Offset of ppeds tx descripotor pool */
  88. #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
  89. #define PEER_ROUTING_USE_PPE 1
  90. #define PEER_ROUTING_ENABLED 1
  91. #else
  92. #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
  93. #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0
  94. #endif
  95. /* tx descriptor are programmed at start of CMEM region*/
  96. #define DP_TX_DESC_CMEM_OFFSET \
  97. (DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE)
  98. /* size of CMEM needed for a tx desc pool*/
  99. #define DP_TX_DESC_POOL_CMEM_SIZE \
  100. ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  101. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  102. /* Offset of rx descripotor pool */
  103. #define DP_RX_DESC_CMEM_OFFSET \
  104. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  105. /* size of CMEM needed for a rx desc pool */
  106. #define DP_RX_DESC_POOL_CMEM_SIZE \
  107. ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  108. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  109. /* get ppt_id from CMEM_OFFSET */
  110. #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
  111. ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  112. /**
  113. * struct dp_spt_page_desc - secondary page table page descriptors
  114. * @next: pointer to next linked SPT page Desc
  115. * @page_v_addr: page virtual address
  116. * @page_p_addr: page physical address
  117. * @ppt_index: entry index in primary page table where this page physical
  118. address stored
  119. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  120. */
  121. struct dp_spt_page_desc {
  122. uint8_t *page_v_addr;
  123. qdf_dma_addr_t page_p_addr;
  124. uint32_t ppt_index;
  125. };
  126. /**
  127. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  128. * @cmem_offset: CMEM offset from base address for primary page table setup
  129. * @total_page_num: total DDR page allocated
  130. * @page_desc_freelist: available page Desc list
  131. * @page_desc_base: page Desc buffer base address.
  132. * @page_pool: DDR pages pool
  133. * @cc_lock: locks for page acquiring/free
  134. */
  135. struct dp_hw_cookie_conversion_t {
  136. uint32_t cmem_offset;
  137. uint32_t total_page_num;
  138. struct dp_spt_page_desc *page_desc_base;
  139. struct qdf_mem_multi_page_t page_pool;
  140. qdf_spinlock_t cc_lock;
  141. };
  142. /**
  143. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  144. * @spt_page_list_head: head of SPT page descriptor list
  145. * @spt_page_list_tail: tail of SPT page descriptor list
  146. * @num_spt_pages: number of SPT page descriptor allocated
  147. */
  148. struct dp_spt_page_desc_list {
  149. struct dp_spt_page_desc *spt_page_list_head;
  150. struct dp_spt_page_desc *spt_page_list_tail;
  151. uint16_t num_spt_pages;
  152. };
  153. /* HW reading 8 bytes for VA */
  154. #define DP_CC_HW_READ_BYTES 8
  155. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  156. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  157. = (uintptr_t)(_desc_va); }
  158. /**
  159. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  160. * @is_configured: flag indicating if this bank is configured
  161. * @ref_count: ref count indicating number of users of the bank
  162. * @bank_config: HAL TX bank configuration
  163. */
  164. struct dp_tx_bank_profile {
  165. uint8_t is_configured;
  166. qdf_atomic_t ref_count;
  167. union hal_tx_bank_config bank_config;
  168. };
  169. #ifdef WLAN_SUPPORT_PPEDS
  170. /**
  171. * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
  172. * @is_configured: Boolean that the entry is configured.
  173. */
  174. struct dp_ppe_vp_tbl_entry {
  175. bool is_configured;
  176. };
  177. /**
  178. * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
  179. * @vp_num: Virtual port number
  180. * @ppe_vp_num_idx: Index to the PPE VP table entry
  181. * @search_idx_reg_num: Address search Index register number
  182. * @drop_prec_enable: Drop precedance enable
  183. * @to_fw: To FW exception enable/disable.
  184. * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
  185. */
  186. struct dp_ppe_vp_profile {
  187. uint8_t vp_num;
  188. uint8_t ppe_vp_num_idx;
  189. uint8_t search_idx_reg_num;
  190. uint8_t drop_prec_enable;
  191. uint8_t to_fw;
  192. uint8_t use_ppe_int_pri;
  193. };
  194. /**
  195. * struct dp_ppe_tx_desc_pool_s - PPEDS Tx Descriptor Pool
  196. * @elem_size: Size of each descriptor
  197. * @num_allocated: Number of used descriptors
  198. * @freelist: Chain of free descriptors
  199. * @desc_pages: multiple page allocation information for actual descriptors
  200. * @elem_count: Number of descriptors in the pool
  201. * @num_free: Number of free descriptors
  202. * @lock- Lock for descriptor allocation/free from/to the pool
  203. */
  204. struct dp_ppe_tx_desc_pool_s {
  205. uint16_t elem_size;
  206. uint32_t num_allocated;
  207. struct dp_tx_desc_s *freelist;
  208. struct qdf_mem_multi_page_t desc_pages;
  209. uint16_t elem_count;
  210. uint32_t num_free;
  211. qdf_spinlock_t lock;
  212. };
  213. #endif
  214. /**
  215. * struct dp_soc_be - Extended DP soc for BE targets
  216. * @soc: dp soc structure
  217. * @num_bank_profiles: num TX bank profiles
  218. * @bank_profiles: bank profiles for various TX banks
  219. * @cc_cmem_base: cmem offset reserved for CC
  220. * @tx_cc_ctx: Cookie conversion context for tx desc pools
  221. * @rx_cc_ctx: Cookie conversion context for rx desc pools
  222. * @monitor_soc_be: BE specific monitor object
  223. * @mlo_enabled: Flag to indicate MLO is enabled or not
  224. * @mlo_chip_id: MLO chip_id
  225. * @ml_ctxt: pointer to global ml_context
  226. * @delta_tqm: delta_tqm
  227. * @mlo_tstamp_offset: mlo timestamp offset
  228. * @mld_peer_hash: peer hash table for ML peers
  229. * Associated peer with this MAC address)
  230. * @mld_peer_hash_lock: lock to protect mld_peer_hash
  231. * @reo2ppe_ring: REO2PPE ring
  232. * @ppe2tcl_ring: PPE2TCL ring
  233. * @ppe_release_ring: PPE release ring
  234. * @ppe_vp_tbl: PPE VP table
  235. * @ppe_vp_tbl_lock: PPE VP table lock
  236. * @num_ppe_vp_entries : Number of PPE VP entries
  237. * @ipa_bank_id: TCL bank id used by IPA
  238. * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool
  239. * @ppeds_tx_desc: PPEDS tx desc pool
  240. * @ppeds_handle: PPEDS soc instance handle
  241. */
  242. struct dp_soc_be {
  243. struct dp_soc soc;
  244. uint8_t num_bank_profiles;
  245. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  246. qdf_mutex_t tx_bank_lock;
  247. #else
  248. qdf_spinlock_t tx_bank_lock;
  249. #endif
  250. struct dp_tx_bank_profile *bank_profiles;
  251. struct dp_spt_page_desc *page_desc_base;
  252. uint32_t cc_cmem_base;
  253. struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
  254. struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
  255. #ifdef WLAN_SUPPORT_PPEDS
  256. struct dp_srng reo2ppe_ring;
  257. struct dp_srng ppe2tcl_ring;
  258. struct dp_srng ppe_release_ring;
  259. struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
  260. struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx;
  261. struct dp_ppe_tx_desc_pool_s ppeds_tx_desc;
  262. void *ppeds_handle;
  263. qdf_mutex_t ppe_vp_tbl_lock;
  264. uint8_t num_ppe_vp_entries;
  265. #endif
  266. #ifdef WLAN_FEATURE_11BE_MLO
  267. #ifdef WLAN_MLO_MULTI_CHIP
  268. uint8_t mlo_enabled;
  269. uint8_t mlo_chip_id;
  270. struct dp_mlo_ctxt *ml_ctxt;
  271. uint64_t delta_tqm;
  272. uint64_t mlo_tstamp_offset;
  273. #else
  274. /* Protect mld peer hash table */
  275. DP_MUTEX_TYPE mld_peer_hash_lock;
  276. struct {
  277. uint32_t mask;
  278. uint32_t idx_bits;
  279. TAILQ_HEAD(, dp_peer) * bins;
  280. } mld_peer_hash;
  281. #endif
  282. #endif
  283. #ifdef IPA_OFFLOAD
  284. int8_t ipa_bank_id;
  285. #endif
  286. };
  287. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  288. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  289. /**
  290. * struct dp_pdev_be - Extended DP pdev for BE targets
  291. * @pdev: dp pdev structure
  292. * @monitor_pdev_be: BE specific monitor object
  293. * @mlo_link_id: MLO link id for PDEV
  294. * @delta_tsf2: delta_tsf2
  295. */
  296. struct dp_pdev_be {
  297. struct dp_pdev pdev;
  298. #ifdef WLAN_MLO_MULTI_CHIP
  299. uint8_t mlo_link_id;
  300. uint64_t delta_tsf2;
  301. #endif
  302. };
  303. /**
  304. * struct dp_vdev_be - Extended DP vdev for BE targets
  305. * @vdev: dp vdev structure
  306. * @bank_id: bank_id to be used for TX
  307. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  308. * @ppe_vp_enabled: flag to check if PPE VP is enabled for vdev
  309. * @ppe_vp_profile: PPE VP profile
  310. */
  311. struct dp_vdev_be {
  312. struct dp_vdev vdev;
  313. int8_t bank_id;
  314. uint8_t vdev_id_check_en;
  315. #ifdef WLAN_MLO_MULTI_CHIP
  316. /* partner list used for Intra-BSS */
  317. uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  318. #ifdef WLAN_FEATURE_11BE_MLO
  319. #ifdef WLAN_MCAST_MLO
  320. /* DP MLO seq number */
  321. uint16_t seq_num;
  322. /* MLO Mcast primary vdev */
  323. bool mcast_primary;
  324. #endif
  325. #endif
  326. #endif
  327. unsigned long ppe_vp_enabled;
  328. #ifdef WLAN_SUPPORT_PPEDS
  329. struct dp_ppe_vp_profile ppe_vp_profile;
  330. #endif
  331. };
  332. /**
  333. * struct dp_peer_be - Extended DP peer for BE targets
  334. * @dp_peer: dp peer structure
  335. */
  336. struct dp_peer_be {
  337. struct dp_peer peer;
  338. #ifdef WLAN_SUPPORT_PPEDS
  339. uint8_t priority_valid;
  340. #endif
  341. };
  342. /**
  343. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  344. *
  345. * Return: value in bytes for BE specific soc structure
  346. */
  347. qdf_size_t dp_get_soc_context_size_be(void);
  348. /**
  349. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  350. * @arch_ops: arch ops pointer
  351. *
  352. * Return: none
  353. */
  354. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  355. /**
  356. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  357. * @arch_ops: arch ops pointer
  358. *
  359. * Return: size in bytes for the context_type
  360. */
  361. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  362. /**
  363. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  364. * @soc: dp_soc pointer
  365. *
  366. * Return: dp_soc_be pointer
  367. */
  368. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  369. {
  370. return (struct dp_soc_be *)soc;
  371. }
  372. #ifdef WLAN_MLO_MULTI_CHIP
  373. typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
  374. /*
  375. * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
  376. *
  377. * @soc: soc handle
  378. *
  379. * return: MLD peer hash object
  380. */
  381. static inline dp_mld_peer_hash_obj_t
  382. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  383. {
  384. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  385. return be_soc->ml_ctxt;
  386. }
  387. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
  388. #if defined(WLAN_FEATURE_11BE_MLO)
  389. /**
  390. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  391. * @soc: Soc handle
  392. * @peer: DP peer handle for ML peer
  393. * @peer_id: peer_id
  394. * Return: None
  395. */
  396. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  397. struct dp_peer *peer,
  398. uint16_t peer_id);
  399. /**
  400. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  401. * @soc: Soc handle
  402. * @peer_id: peer_id
  403. * Return: None
  404. */
  405. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  406. uint16_t peer_id);
  407. #ifdef WLAN_MCAST_MLO
  408. typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
  409. struct dp_vdev *ptnr_vdev,
  410. void *arg);
  411. typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc,
  412. void *arg);
  413. /*
  414. * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
  415. * @be_soc: dp_soc_be pointer
  416. * @be_vdev: dp_vdev_be pointer
  417. * @func : function to be called for each peer
  418. * @arg : argument need to be passed to func
  419. * @mod_id: module id
  420. *
  421. * Return: None
  422. */
  423. void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  424. struct dp_vdev_be *be_vdev,
  425. dp_ptnr_vdev_iter_func func,
  426. void *arg,
  427. enum dp_mod_id mod_id);
  428. /*
  429. * dp_mcast_mlo_iter_ptnr_soc - API to iterate through ptnr soc list
  430. * @be_soc: dp_soc_be pointer
  431. * @func : function to be called for each peer
  432. * @arg : argument need to be passed to func
  433. *
  434. * Return: None
  435. */
  436. void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
  437. dp_ptnr_soc_iter_func func,
  438. void *arg);
  439. /*
  440. * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
  441. * @be_soc: dp_soc_be pointer
  442. * @be_vdev: dp_vdev_be pointer
  443. * @mod_id: module id
  444. *
  445. * Return: mcast primary DP VDEV handle on success, NULL on failure
  446. */
  447. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  448. struct dp_vdev_be *be_vdev,
  449. enum dp_mod_id mod_id);
  450. #endif
  451. #endif
  452. #else
  453. typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
  454. static inline dp_mld_peer_hash_obj_t
  455. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  456. {
  457. return dp_get_be_soc_from_dp_soc(soc);
  458. }
  459. static inline void dp_clr_mlo_ptnr_list(struct dp_soc *soc,
  460. struct dp_vdev *vdev)
  461. {
  462. }
  463. #endif
  464. /*
  465. * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
  466. *
  467. * @mld_hash_obj: Peer has object
  468. * @hash_elems: number of entries in hash table
  469. *
  470. * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
  471. */
  472. QDF_STATUS
  473. dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
  474. int hash_elems);
  475. /*
  476. * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
  477. *
  478. * @mld_hash_obj: Peer has object
  479. *
  480. * return: void
  481. */
  482. void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
  483. /**
  484. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  485. * @pdev: dp_pdev pointer
  486. *
  487. * Return: dp_pdev_be pointer
  488. */
  489. static inline
  490. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  491. {
  492. return (struct dp_pdev_be *)pdev;
  493. }
  494. /**
  495. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  496. * @vdev: dp_vdev pointer
  497. *
  498. * Return: dp_vdev_be pointer
  499. */
  500. static inline
  501. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  502. {
  503. return (struct dp_vdev_be *)vdev;
  504. }
  505. /**
  506. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  507. * @peer: dp_peer pointer
  508. *
  509. * Return: dp_peer_be pointer
  510. */
  511. static inline
  512. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  513. {
  514. return (struct dp_peer_be *)peer;
  515. }
  516. QDF_STATUS
  517. dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
  518. struct dp_hw_cookie_conversion_t *cc_ctx,
  519. uint32_t num_descs,
  520. enum dp_desc_type desc_type,
  521. uint8_t desc_pool_id);
  522. QDF_STATUS
  523. dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
  524. struct dp_hw_cookie_conversion_t *cc_ctx);
  525. QDF_STATUS
  526. dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
  527. struct dp_hw_cookie_conversion_t *cc_ctx);
  528. QDF_STATUS
  529. dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
  530. struct dp_hw_cookie_conversion_t *cc_ctx);
  531. /**
  532. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  533. * @be_soc: beryllium soc handler
  534. * @list_head: pointer to page desc head
  535. * @list_tail: pointer to page desc tail
  536. * @num_desc: number of TX/RX Descs required for SPT pages
  537. *
  538. * Return: number of SPT page Desc allocated
  539. */
  540. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  541. struct dp_spt_page_desc **list_head,
  542. struct dp_spt_page_desc **list_tail,
  543. uint16_t num_desc);
  544. /**
  545. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  546. * @be_soc: beryllium soc handler
  547. * @list_head: pointer to page desc head
  548. * @list_tail: pointer to page desc tail
  549. * @page_nums: number of page desc freed back to pool
  550. */
  551. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  552. struct dp_spt_page_desc **list_head,
  553. struct dp_spt_page_desc **list_tail,
  554. uint16_t page_nums);
  555. /**
  556. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  557. DDR page 4K aligned or not
  558. * @ppt_index: offset index in primary page table
  559. * @spt_index: offset index in sceondary DDR page
  560. *
  561. * Generate SW cookie ID to match as HW expected
  562. *
  563. * Return: cookie ID
  564. */
  565. static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
  566. uint16_t spt_index)
  567. {
  568. /*
  569. * for 4k aligned case, cmem entry size is 4 bytes,
  570. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  571. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  572. * exactly same with original ppt_index value.
  573. * for 4k un-aligned case, cmem entry size is 8 bytes.
  574. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  575. */
  576. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  577. spt_index);
  578. }
  579. /**
  580. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  581. * @be_soc: be soc handle
  582. * @desc_id: TX/RX Dess ID
  583. *
  584. * Return: TX/RX Desc virtual address
  585. */
  586. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  587. uint32_t desc_id)
  588. {
  589. struct dp_soc_be *be_soc;
  590. uint16_t ppt_page_id, spt_va_id;
  591. uint8_t *spt_page_va;
  592. be_soc = dp_get_be_soc_from_dp_soc(soc);
  593. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  594. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  595. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  596. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  597. /*
  598. * ppt index in cmem is same order where the page in the
  599. * page desc array during initialization.
  600. * entry size in DDR page is 64 bits, for 32 bits system,
  601. * only lower 32 bits VA value is needed.
  602. */
  603. spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
  604. return (*((uintptr_t *)(spt_page_va +
  605. spt_va_id * DP_CC_HW_READ_BYTES)));
  606. }
  607. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  608. /**
  609. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  610. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  611. * of processing the entries in SRNG
  612. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  613. * of processing the entries in SRNG
  614. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  615. * condition and drastic steps need to be taken for processing
  616. * the entries in SRNG
  617. */
  618. enum dp_srng_near_full_levels {
  619. DP_SRNG_THRESH_SAFE,
  620. DP_SRNG_THRESH_NEAR_FULL,
  621. DP_SRNG_THRESH_CRITICAL,
  622. };
  623. /**
  624. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  625. * its corresponding near-full irq handler
  626. * @soc: Datapath SoC handle
  627. * @dp_srng: datapath handle for this SRNG
  628. *
  629. * Return: 1, if the srng was marked as near-full
  630. * 0, if the srng was not marked as near-full
  631. */
  632. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  633. struct dp_srng *dp_srng)
  634. {
  635. return qdf_atomic_read(&dp_srng->near_full);
  636. }
  637. /**
  638. * dp_srng_get_near_full_level() - Check the num available entries in the
  639. * consumer srng and return the level of the srng
  640. * near full state.
  641. * @soc: Datapath SoC Handle [To be validated by the caller]
  642. * @hal_ring_hdl: SRNG handle
  643. *
  644. * Return: near-full level
  645. */
  646. static inline int
  647. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  648. {
  649. uint32_t num_valid;
  650. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  651. dp_srng->hal_srng,
  652. true);
  653. if (num_valid > dp_srng->crit_thresh)
  654. return DP_SRNG_THRESH_CRITICAL;
  655. else if (num_valid < dp_srng->safe_thresh)
  656. return DP_SRNG_THRESH_SAFE;
  657. else
  658. return DP_SRNG_THRESH_NEAR_FULL;
  659. }
  660. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  661. /**
  662. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  663. * the reap_limit and flags to reflect the state.
  664. * @soc: Datapath soc handle
  665. * @srng: Datapath handle for the srng
  666. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  667. * per the near-full state
  668. *
  669. * Return: 1, if the srng is near full
  670. * 0, if the srng is not near full
  671. */
  672. static inline int
  673. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  674. struct dp_srng *srng,
  675. int *max_reap_limit)
  676. {
  677. int ring_near_full = 0, near_full_level;
  678. if (dp_srng_check_ring_near_full(soc, srng)) {
  679. near_full_level = dp_srng_get_near_full_level(soc, srng);
  680. switch (near_full_level) {
  681. case DP_SRNG_THRESH_CRITICAL:
  682. /* Currently not doing anything special here */
  683. fallthrough;
  684. case DP_SRNG_THRESH_NEAR_FULL:
  685. ring_near_full = 1;
  686. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  687. break;
  688. case DP_SRNG_THRESH_SAFE:
  689. qdf_atomic_set(&srng->near_full, 0);
  690. ring_near_full = 0;
  691. break;
  692. default:
  693. qdf_assert(0);
  694. break;
  695. }
  696. }
  697. return ring_near_full;
  698. }
  699. #else
  700. static inline int
  701. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  702. struct dp_srng *srng,
  703. int *max_reap_limit)
  704. {
  705. return 0;
  706. }
  707. #endif
  708. static inline
  709. uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
  710. enum dp_desc_type desc_type)
  711. {
  712. switch (desc_type) {
  713. case DP_TX_DESC_TYPE:
  714. return (DP_TX_DESC_CMEM_OFFSET +
  715. (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
  716. case DP_RX_DESC_BUF_TYPE:
  717. return (DP_RX_DESC_CMEM_OFFSET +
  718. ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
  719. DP_RX_DESC_POOL_CMEM_SIZE);
  720. case DP_TX_PPEDS_DESC_TYPE:
  721. return DP_TX_PPEDS_DESC_CMEM_OFFSET;
  722. default:
  723. QDF_BUG(0);
  724. }
  725. return 0;
  726. }
  727. #ifndef WLAN_MLO_MULTI_CHIP
  728. static inline
  729. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  730. struct cdp_soc_attach_params *params)
  731. {
  732. }
  733. static inline
  734. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  735. struct cdp_pdev_attach_params *params)
  736. {
  737. }
  738. static inline
  739. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  740. {
  741. }
  742. static inline
  743. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  744. {
  745. }
  746. #endif
  747. /*
  748. * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
  749. * @soc : DP soc handle
  750. * @vdev: pointer to vdev structure
  751. * @param: parameter type to get value
  752. * @val: value
  753. *
  754. * return: QDF_STATUS
  755. */
  756. QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
  757. struct dp_vdev *vdev,
  758. enum cdp_vdev_param_type param,
  759. cdp_config_param_type val);
  760. #endif