dp_be.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_BE_H
  20. #define __DP_BE_H
  21. #include <dp_types.h>
  22. #include <hal_be_tx.h>
  23. #ifdef WLAN_MLO_MULTI_CHIP
  24. #include "mlo/dp_mlo.h"
  25. #else
  26. #include <dp_peer.h>
  27. #endif
  28. /* maximum number of entries in one page of secondary page table */
  29. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  30. /* maximum number of entries in one page of secondary page table */
  31. #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
  32. /* maximum number of entries in primary page table */
  33. #define DP_CC_PPT_MAX_ENTRIES 1024
  34. /* cookie conversion required CMEM offset from CMEM pool */
  35. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  36. /* cookie conversion primary page table size 4K */
  37. #define DP_CC_PPT_MEM_SIZE 4096
  38. /* FST required CMEM offset from CMEM pool */
  39. #define DP_FST_MEM_OFFSET_IN_CMEM \
  40. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  41. /* lower 9 bits in Desc ID for offset in page of SPT */
  42. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  43. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  44. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  45. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  46. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  47. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  48. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  49. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  50. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  51. /*
  52. * page 4K unaligned case, single SPT page physical address
  53. * need 8 bytes in PPT
  54. */
  55. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  56. /*
  57. * page 4K aligned case, single SPT page physical address
  58. * need 4 bytes in PPT
  59. */
  60. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  61. /* 4K aligned case, number of bits HW append for one PPT entry value */
  62. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  63. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  64. /* WBM2SW ring id for rx release */
  65. #define WBM2SW_REL_ERR_RING_NUM 3
  66. #else
  67. /* WBM2SW ring id for rx release */
  68. #define WBM2SW_REL_ERR_RING_NUM 5
  69. #endif
  70. /* tx descriptor are programmed at start of CMEM region*/
  71. #define DP_TX_DESC_CMEM_OFFSET 0
  72. /* size of CMEM needed for a tx desc pool*/
  73. #define DP_TX_DESC_POOL_CMEM_SIZE \
  74. ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  75. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  76. /* Offset of rx descripotor pool */
  77. #define DP_RX_DESC_CMEM_OFFSET \
  78. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  79. /* size of CMEM needed for a rx desc pool */
  80. #define DP_RX_DESC_POOL_CMEM_SIZE \
  81. ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  82. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  83. /* get ppt_id from CMEM_OFFSET */
  84. #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
  85. ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  86. /**
  87. * struct dp_spt_page_desc - secondary page table page descriptors
  88. * @next: pointer to next linked SPT page Desc
  89. * @page_v_addr: page virtual address
  90. * @page_p_addr: page physical address
  91. * @ppt_index: entry index in primary page table where this page physical
  92. address stored
  93. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  94. */
  95. struct dp_spt_page_desc {
  96. uint8_t *page_v_addr;
  97. qdf_dma_addr_t page_p_addr;
  98. uint32_t ppt_index;
  99. };
  100. /**
  101. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  102. * @cmem_offset: CMEM offset from base address for primary page table setup
  103. * @total_page_num: total DDR page allocated
  104. * @page_desc_freelist: available page Desc list
  105. * @page_desc_base: page Desc buffer base address.
  106. * @page_pool: DDR pages pool
  107. * @cc_lock: locks for page acquiring/free
  108. */
  109. struct dp_hw_cookie_conversion_t {
  110. uint32_t cmem_offset;
  111. uint32_t total_page_num;
  112. struct dp_spt_page_desc *page_desc_base;
  113. struct qdf_mem_multi_page_t page_pool;
  114. qdf_spinlock_t cc_lock;
  115. };
  116. /**
  117. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  118. * @spt_page_list_head: head of SPT page descriptor list
  119. * @spt_page_list_tail: tail of SPT page descriptor list
  120. * @num_spt_pages: number of SPT page descriptor allocated
  121. */
  122. struct dp_spt_page_desc_list {
  123. struct dp_spt_page_desc *spt_page_list_head;
  124. struct dp_spt_page_desc *spt_page_list_tail;
  125. uint16_t num_spt_pages;
  126. };
  127. /* HW reading 8 bytes for VA */
  128. #define DP_CC_HW_READ_BYTES 8
  129. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  130. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  131. = (uintptr_t)(_desc_va); }
  132. /**
  133. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  134. * @is_configured: flag indicating if this bank is configured
  135. * @ref_count: ref count indicating number of users of the bank
  136. * @bank_config: HAL TX bank configuration
  137. */
  138. struct dp_tx_bank_profile {
  139. uint8_t is_configured;
  140. qdf_atomic_t ref_count;
  141. union hal_tx_bank_config bank_config;
  142. };
  143. /**
  144. * struct dp_soc_be - Extended DP soc for BE targets
  145. * @soc: dp soc structure
  146. * @num_bank_profiles: num TX bank profiles
  147. * @bank_profiles: bank profiles for various TX banks
  148. * @cc_cmem_base: cmem offset reserved for CC
  149. * @tx_cc_ctx: Cookie conversion context for tx desc pools
  150. * @rx_cc_ctx: Cookie conversion context for rx desc pools
  151. * @monitor_soc_be: BE specific monitor object
  152. * @mlo_enabled: Flag to indicate MLO is enabled or not
  153. * @mlo_chip_id: MLO chip_id
  154. * @ml_ctxt: pointer to global ml_context
  155. * @mld_peer_hash: peer hash table for ML peers
  156. * Associated peer with this MAC address)
  157. * @mld_peer_hash_lock: lock to protect mld_peer_hash
  158. */
  159. struct dp_soc_be {
  160. struct dp_soc soc;
  161. uint8_t num_bank_profiles;
  162. qdf_spinlock_t tx_bank_lock;
  163. struct dp_tx_bank_profile *bank_profiles;
  164. struct dp_spt_page_desc *page_desc_base;
  165. uint32_t cc_cmem_base;
  166. struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
  167. struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
  168. #ifdef WLAN_SUPPORT_PPEDS
  169. struct dp_srng reo2ppe_ring;
  170. struct dp_srng ppe2tcl_ring;
  171. struct dp_srng ppe_release_ring;
  172. #endif
  173. #if !defined(DISABLE_MON_CONFIG)
  174. struct dp_mon_soc_be *monitor_soc_be;
  175. #endif
  176. #ifdef WLAN_FEATURE_11BE_MLO
  177. #ifdef WLAN_MLO_MULTI_CHIP
  178. uint8_t mlo_enabled;
  179. uint8_t mlo_chip_id;
  180. struct dp_mlo_ctxt *ml_ctxt;
  181. #else
  182. /* Protect mld peer hash table */
  183. DP_MUTEX_TYPE mld_peer_hash_lock;
  184. struct {
  185. uint32_t mask;
  186. uint32_t idx_bits;
  187. TAILQ_HEAD(, dp_peer) * bins;
  188. } mld_peer_hash;
  189. #endif
  190. #endif
  191. };
  192. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  193. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  194. /**
  195. * struct dp_pdev_be - Extended DP pdev for BE targets
  196. * @pdev: dp pdev structure
  197. * @monitor_pdev_be: BE specific monitor object
  198. * @mlo_link_id: MLO link id for PDEV
  199. */
  200. struct dp_pdev_be {
  201. struct dp_pdev pdev;
  202. #if !defined(DISABLE_MON_CONFIG)
  203. struct dp_mon_pdev_be *monitor_pdev_be;
  204. #endif
  205. #ifdef WLAN_MLO_MULTI_CHIP
  206. uint8_t mlo_link_id;
  207. #endif
  208. };
  209. /**
  210. * struct dp_vdev_be - Extended DP vdev for BE targets
  211. * @vdev: dp vdev structure
  212. * @bank_id: bank_id to be used for TX
  213. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  214. */
  215. struct dp_vdev_be {
  216. struct dp_vdev vdev;
  217. int8_t bank_id;
  218. uint8_t vdev_id_check_en;
  219. };
  220. /**
  221. * struct dp_peer_be - Extended DP peer for BE targets
  222. * @dp_peer: dp peer structure
  223. */
  224. struct dp_peer_be {
  225. struct dp_peer peer;
  226. };
  227. /**
  228. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  229. *
  230. * Return: value in bytes for BE specific soc structure
  231. */
  232. qdf_size_t dp_get_soc_context_size_be(void);
  233. /**
  234. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  235. * @arch_ops: arch ops pointer
  236. *
  237. * Return: none
  238. */
  239. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  240. /**
  241. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  242. * @arch_ops: arch ops pointer
  243. *
  244. * Return: size in bytes for the context_type
  245. */
  246. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  247. /**
  248. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  249. * @soc: dp_soc pointer
  250. *
  251. * Return: dp_soc_be pointer
  252. */
  253. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  254. {
  255. return (struct dp_soc_be *)soc;
  256. }
  257. #ifdef WLAN_MLO_MULTI_CHIP
  258. typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
  259. /*
  260. * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
  261. *
  262. * @soc: soc handle
  263. *
  264. * return: MLD peer hash object
  265. */
  266. static inline dp_mld_peer_hash_obj_t
  267. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  268. {
  269. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  270. return be_soc->ml_ctxt;
  271. }
  272. #else
  273. typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
  274. static inline dp_mld_peer_hash_obj_t
  275. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  276. {
  277. return dp_get_be_soc_from_dp_soc(soc);
  278. }
  279. #endif
  280. /*
  281. * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
  282. *
  283. * @mld_hash_obj: Peer has object
  284. * @hash_elems: number of entries in hash table
  285. *
  286. * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
  287. */
  288. QDF_STATUS
  289. dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
  290. int hash_elems);
  291. /*
  292. * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
  293. *
  294. * @mld_hash_obj: Peer has object
  295. *
  296. * return: void
  297. */
  298. void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
  299. /**
  300. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  301. * @pdev: dp_pdev pointer
  302. *
  303. * Return: dp_pdev_be pointer
  304. */
  305. static inline
  306. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  307. {
  308. return (struct dp_pdev_be *)pdev;
  309. }
  310. /**
  311. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  312. * @vdev: dp_vdev pointer
  313. *
  314. * Return: dp_vdev_be pointer
  315. */
  316. static inline
  317. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  318. {
  319. return (struct dp_vdev_be *)vdev;
  320. }
  321. /**
  322. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  323. * @peer: dp_peer pointer
  324. *
  325. * Return: dp_peer_be pointer
  326. */
  327. static inline
  328. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  329. {
  330. return (struct dp_peer_be *)peer;
  331. }
  332. QDF_STATUS
  333. dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
  334. struct dp_hw_cookie_conversion_t *cc_ctx,
  335. uint32_t num_descs,
  336. enum dp_desc_type desc_type,
  337. uint8_t desc_pool_id);
  338. QDF_STATUS
  339. dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
  340. struct dp_hw_cookie_conversion_t *cc_ctx);
  341. QDF_STATUS
  342. dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
  343. struct dp_hw_cookie_conversion_t *cc_ctx);
  344. QDF_STATUS
  345. dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
  346. struct dp_hw_cookie_conversion_t *cc_ctx);
  347. /**
  348. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  349. * @be_soc: beryllium soc handler
  350. * @list_head: pointer to page desc head
  351. * @list_tail: pointer to page desc tail
  352. * @num_desc: number of TX/RX Descs required for SPT pages
  353. *
  354. * Return: number of SPT page Desc allocated
  355. */
  356. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  357. struct dp_spt_page_desc **list_head,
  358. struct dp_spt_page_desc **list_tail,
  359. uint16_t num_desc);
  360. /**
  361. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  362. * @be_soc: beryllium soc handler
  363. * @list_head: pointer to page desc head
  364. * @list_tail: pointer to page desc tail
  365. * @page_nums: number of page desc freed back to pool
  366. */
  367. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  368. struct dp_spt_page_desc **list_head,
  369. struct dp_spt_page_desc **list_tail,
  370. uint16_t page_nums);
  371. /**
  372. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  373. DDR page 4K aligned or not
  374. * @ppt_index: offset index in primary page table
  375. * @spt_index: offset index in sceondary DDR page
  376. *
  377. * Generate SW cookie ID to match as HW expected
  378. *
  379. * Return: cookie ID
  380. */
  381. static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
  382. uint16_t spt_index)
  383. {
  384. /*
  385. * for 4k aligned case, cmem entry size is 4 bytes,
  386. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  387. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  388. * exactly same with original ppt_index value.
  389. * for 4k un-aligned case, cmem entry size is 8 bytes.
  390. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  391. */
  392. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  393. spt_index);
  394. }
  395. /**
  396. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  397. * @be_soc: be soc handle
  398. * @desc_id: TX/RX Dess ID
  399. *
  400. * Return: TX/RX Desc virtual address
  401. */
  402. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  403. uint32_t desc_id)
  404. {
  405. struct dp_soc_be *be_soc;
  406. uint16_t ppt_page_id, spt_va_id;
  407. uint8_t *spt_page_va;
  408. be_soc = dp_get_be_soc_from_dp_soc(soc);
  409. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  410. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  411. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  412. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  413. /*
  414. * ppt index in cmem is same order where the page in the
  415. * page desc array during initialization.
  416. * entry size in DDR page is 64 bits, for 32 bits system,
  417. * only lower 32 bits VA value is needed.
  418. */
  419. spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
  420. return (*((uintptr_t *)(spt_page_va +
  421. spt_va_id * DP_CC_HW_READ_BYTES)));
  422. }
  423. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  424. /**
  425. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  426. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  427. * of processing the entries in SRNG
  428. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  429. * of processing the entries in SRNG
  430. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  431. * condition and drastic steps need to be taken for processing
  432. * the entries in SRNG
  433. */
  434. enum dp_srng_near_full_levels {
  435. DP_SRNG_THRESH_SAFE,
  436. DP_SRNG_THRESH_NEAR_FULL,
  437. DP_SRNG_THRESH_CRITICAL,
  438. };
  439. /**
  440. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  441. * its corresponding near-full irq handler
  442. * @soc: Datapath SoC handle
  443. * @dp_srng: datapath handle for this SRNG
  444. *
  445. * Return: 1, if the srng was marked as near-full
  446. * 0, if the srng was not marked as near-full
  447. */
  448. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  449. struct dp_srng *dp_srng)
  450. {
  451. return qdf_atomic_read(&dp_srng->near_full);
  452. }
  453. /**
  454. * dp_srng_get_near_full_level() - Check the num available entries in the
  455. * consumer srng and return the level of the srng
  456. * near full state.
  457. * @soc: Datapath SoC Handle [To be validated by the caller]
  458. * @hal_ring_hdl: SRNG handle
  459. *
  460. * Return: near-full level
  461. */
  462. static inline int
  463. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  464. {
  465. uint32_t num_valid;
  466. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  467. dp_srng->hal_srng,
  468. true);
  469. if (num_valid > dp_srng->crit_thresh)
  470. return DP_SRNG_THRESH_CRITICAL;
  471. else if (num_valid < dp_srng->safe_thresh)
  472. return DP_SRNG_THRESH_SAFE;
  473. else
  474. return DP_SRNG_THRESH_NEAR_FULL;
  475. }
  476. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  477. /**
  478. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  479. * the reap_limit and flags to reflect the state.
  480. * @soc: Datapath soc handle
  481. * @srng: Datapath handle for the srng
  482. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  483. * per the near-full state
  484. *
  485. * Return: 1, if the srng is near full
  486. * 0, if the srng is not near full
  487. */
  488. static inline int
  489. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  490. struct dp_srng *srng,
  491. int *max_reap_limit)
  492. {
  493. int ring_near_full = 0, near_full_level;
  494. if (dp_srng_check_ring_near_full(soc, srng)) {
  495. near_full_level = dp_srng_get_near_full_level(soc, srng);
  496. switch (near_full_level) {
  497. case DP_SRNG_THRESH_CRITICAL:
  498. /* Currently not doing anything special here */
  499. /* fall through */
  500. case DP_SRNG_THRESH_NEAR_FULL:
  501. ring_near_full = 1;
  502. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  503. break;
  504. case DP_SRNG_THRESH_SAFE:
  505. qdf_atomic_set(&srng->near_full, 0);
  506. ring_near_full = 0;
  507. break;
  508. default:
  509. qdf_assert(0);
  510. break;
  511. }
  512. }
  513. return ring_near_full;
  514. }
  515. #else
  516. static inline int
  517. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  518. struct dp_srng *srng,
  519. int *max_reap_limit)
  520. {
  521. return 0;
  522. }
  523. #endif
  524. static inline
  525. uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
  526. enum dp_desc_type desc_type)
  527. {
  528. switch (desc_type) {
  529. case DP_TX_DESC_TYPE:
  530. return (DP_TX_DESC_CMEM_OFFSET +
  531. (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
  532. case DP_RX_DESC_BUF_TYPE:
  533. return (DP_RX_DESC_CMEM_OFFSET +
  534. ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
  535. DP_RX_DESC_POOL_CMEM_SIZE);
  536. default:
  537. QDF_BUG(0);
  538. }
  539. return 0;
  540. }
  541. #ifndef WLAN_MLO_MULTI_CHIP
  542. static inline
  543. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  544. struct cdp_soc_attach_params *params)
  545. {
  546. }
  547. static inline
  548. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  549. struct cdp_pdev_attach_params *params)
  550. {
  551. }
  552. #endif
  553. /*
  554. * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
  555. * @soc : DP soc handle
  556. * @vdev: pointer to vdev structure
  557. * @param: parameter type to get value
  558. * @val: value
  559. *
  560. * return: QDF_STATUS
  561. */
  562. QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
  563. struct dp_vdev *vdev,
  564. enum cdp_vdev_param_type param,
  565. cdp_config_param_type val);
  566. #endif