dp_be.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_BE_H
  20. #define __DP_BE_H
  21. #include <dp_types.h>
  22. #include <hal_be_tx.h>
  23. #ifdef WLAN_MLO_MULTI_CHIP
  24. #include "mlo/dp_mlo.h"
  25. #else
  26. #include <dp_peer.h>
  27. #endif
  28. #include <dp_mon.h>
  29. /* maximum number of entries in one page of secondary page table */
  30. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  31. /* maximum number of entries in one page of secondary page table */
  32. #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
  33. /* maximum number of entries in primary page table */
  34. #define DP_CC_PPT_MAX_ENTRIES 1024
  35. /* cookie conversion required CMEM offset from CMEM pool */
  36. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  37. /* cookie conversion primary page table size 4K */
  38. #define DP_CC_PPT_MEM_SIZE 4096
  39. /* FST required CMEM offset from CMEM pool */
  40. #define DP_FST_MEM_OFFSET_IN_CMEM \
  41. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  42. /* lower 9 bits in Desc ID for offset in page of SPT */
  43. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  44. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  45. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  46. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  47. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  48. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  49. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  50. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  51. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  52. /*
  53. * page 4K unaligned case, single SPT page physical address
  54. * need 8 bytes in PPT
  55. */
  56. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  57. /*
  58. * page 4K aligned case, single SPT page physical address
  59. * need 4 bytes in PPT
  60. */
  61. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  62. /* 4K aligned case, number of bits HW append for one PPT entry value */
  63. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  64. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  65. /* WBM2SW ring id for rx release */
  66. #define WBM2SW_REL_ERR_RING_NUM 3
  67. #else
  68. /* WBM2SW ring id for rx release */
  69. #define WBM2SW_REL_ERR_RING_NUM 5
  70. #endif
  71. /* tx descriptor are programmed at start of CMEM region*/
  72. #define DP_TX_DESC_CMEM_OFFSET 0
  73. /* size of CMEM needed for a tx desc pool*/
  74. #define DP_TX_DESC_POOL_CMEM_SIZE \
  75. ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  76. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  77. /* Offset of rx descripotor pool */
  78. #define DP_RX_DESC_CMEM_OFFSET \
  79. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  80. /* size of CMEM needed for a rx desc pool */
  81. #define DP_RX_DESC_POOL_CMEM_SIZE \
  82. ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  83. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  84. /* get ppt_id from CMEM_OFFSET */
  85. #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
  86. ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  87. /**
  88. * struct dp_spt_page_desc - secondary page table page descriptors
  89. * @next: pointer to next linked SPT page Desc
  90. * @page_v_addr: page virtual address
  91. * @page_p_addr: page physical address
  92. * @ppt_index: entry index in primary page table where this page physical
  93. address stored
  94. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  95. */
  96. struct dp_spt_page_desc {
  97. uint8_t *page_v_addr;
  98. qdf_dma_addr_t page_p_addr;
  99. uint32_t ppt_index;
  100. };
  101. /**
  102. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  103. * @cmem_offset: CMEM offset from base address for primary page table setup
  104. * @total_page_num: total DDR page allocated
  105. * @page_desc_freelist: available page Desc list
  106. * @page_desc_base: page Desc buffer base address.
  107. * @page_pool: DDR pages pool
  108. * @cc_lock: locks for page acquiring/free
  109. */
  110. struct dp_hw_cookie_conversion_t {
  111. uint32_t cmem_offset;
  112. uint32_t total_page_num;
  113. struct dp_spt_page_desc *page_desc_base;
  114. struct qdf_mem_multi_page_t page_pool;
  115. qdf_spinlock_t cc_lock;
  116. };
  117. /**
  118. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  119. * @spt_page_list_head: head of SPT page descriptor list
  120. * @spt_page_list_tail: tail of SPT page descriptor list
  121. * @num_spt_pages: number of SPT page descriptor allocated
  122. */
  123. struct dp_spt_page_desc_list {
  124. struct dp_spt_page_desc *spt_page_list_head;
  125. struct dp_spt_page_desc *spt_page_list_tail;
  126. uint16_t num_spt_pages;
  127. };
  128. /* HW reading 8 bytes for VA */
  129. #define DP_CC_HW_READ_BYTES 8
  130. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  131. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  132. = (uintptr_t)(_desc_va); }
  133. /**
  134. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  135. * @is_configured: flag indicating if this bank is configured
  136. * @ref_count: ref count indicating number of users of the bank
  137. * @bank_config: HAL TX bank configuration
  138. */
  139. struct dp_tx_bank_profile {
  140. uint8_t is_configured;
  141. qdf_atomic_t ref_count;
  142. union hal_tx_bank_config bank_config;
  143. };
  144. /**
  145. * struct dp_soc_be - Extended DP soc for BE targets
  146. * @soc: dp soc structure
  147. * @num_bank_profiles: num TX bank profiles
  148. * @bank_profiles: bank profiles for various TX banks
  149. * @cc_cmem_base: cmem offset reserved for CC
  150. * @tx_cc_ctx: Cookie conversion context for tx desc pools
  151. * @rx_cc_ctx: Cookie conversion context for rx desc pools
  152. * @monitor_soc_be: BE specific monitor object
  153. * @mlo_enabled: Flag to indicate MLO is enabled or not
  154. * @mlo_chip_id: MLO chip_id
  155. * @ml_ctxt: pointer to global ml_context
  156. * @mld_peer_hash: peer hash table for ML peers
  157. * Associated peer with this MAC address)
  158. * @mld_peer_hash_lock: lock to protect mld_peer_hash
  159. */
  160. struct dp_soc_be {
  161. struct dp_soc soc;
  162. uint8_t num_bank_profiles;
  163. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  164. qdf_mutex_t tx_bank_lock;
  165. #else
  166. qdf_spinlock_t tx_bank_lock;
  167. #endif
  168. struct dp_tx_bank_profile *bank_profiles;
  169. struct dp_spt_page_desc *page_desc_base;
  170. uint32_t cc_cmem_base;
  171. struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
  172. struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
  173. #ifdef WLAN_SUPPORT_PPEDS
  174. struct dp_srng reo2ppe_ring;
  175. struct dp_srng ppe2tcl_ring;
  176. struct dp_srng ppe_release_ring;
  177. #endif
  178. #ifdef WLAN_FEATURE_11BE_MLO
  179. #ifdef WLAN_MLO_MULTI_CHIP
  180. uint8_t mlo_enabled;
  181. uint8_t mlo_chip_id;
  182. struct dp_mlo_ctxt *ml_ctxt;
  183. #else
  184. /* Protect mld peer hash table */
  185. DP_MUTEX_TYPE mld_peer_hash_lock;
  186. struct {
  187. uint32_t mask;
  188. uint32_t idx_bits;
  189. TAILQ_HEAD(, dp_peer) * bins;
  190. } mld_peer_hash;
  191. #endif
  192. #endif
  193. };
  194. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  195. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  196. /**
  197. * struct dp_pdev_be - Extended DP pdev for BE targets
  198. * @pdev: dp pdev structure
  199. * @monitor_pdev_be: BE specific monitor object
  200. * @mlo_link_id: MLO link id for PDEV
  201. */
  202. struct dp_pdev_be {
  203. struct dp_pdev pdev;
  204. #ifdef WLAN_MLO_MULTI_CHIP
  205. uint8_t mlo_link_id;
  206. #endif
  207. };
  208. /**
  209. * struct dp_vdev_be - Extended DP vdev for BE targets
  210. * @vdev: dp vdev structure
  211. * @bank_id: bank_id to be used for TX
  212. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  213. */
  214. struct dp_vdev_be {
  215. struct dp_vdev vdev;
  216. int8_t bank_id;
  217. uint8_t vdev_id_check_en;
  218. #ifdef WLAN_MLO_MULTI_CHIP
  219. /* partner list used for Intra-BSS */
  220. uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  221. #ifdef WLAN_FEATURE_11BE_MLO
  222. #ifdef WLAN_MCAST_MLO
  223. /* DP MLO seq number */
  224. uint16_t seq_num;
  225. /* MLO Mcast primary vdev */
  226. bool mcast_primary;
  227. #endif
  228. #endif
  229. #endif
  230. };
  231. /**
  232. * struct dp_peer_be - Extended DP peer for BE targets
  233. * @dp_peer: dp peer structure
  234. */
  235. struct dp_peer_be {
  236. struct dp_peer peer;
  237. };
  238. /**
  239. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  240. *
  241. * Return: value in bytes for BE specific soc structure
  242. */
  243. qdf_size_t dp_get_soc_context_size_be(void);
  244. /**
  245. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  246. * @arch_ops: arch ops pointer
  247. *
  248. * Return: none
  249. */
  250. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  251. /**
  252. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  253. * @arch_ops: arch ops pointer
  254. *
  255. * Return: size in bytes for the context_type
  256. */
  257. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  258. /**
  259. * dp_mon_get_context_size_be() - get BE specific size for mon pdev/soc
  260. * @arch_ops: arch ops pointer
  261. *
  262. * Return: size in bytes for the context_type
  263. */
  264. qdf_size_t dp_mon_get_context_size_be(enum dp_context_type context_type);
  265. /**
  266. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  267. * @soc: dp_soc pointer
  268. *
  269. * Return: dp_soc_be pointer
  270. */
  271. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  272. {
  273. return (struct dp_soc_be *)soc;
  274. }
  275. /**
  276. * dp_get_be_mon_soc_from_dp_mon_soc() - get dp_mon_soc_be from dp_mon_soc
  277. * @soc: dp_mon_soc pointer
  278. *
  279. * Return: dp_mon_soc_be pointer
  280. */
  281. static inline
  282. struct dp_mon_soc_be *dp_get_be_mon_soc_from_dp_mon_soc(struct dp_mon_soc *soc)
  283. {
  284. return (struct dp_mon_soc_be *)soc;
  285. }
  286. #ifdef WLAN_MLO_MULTI_CHIP
  287. typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
  288. /*
  289. * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
  290. *
  291. * @soc: soc handle
  292. *
  293. * return: MLD peer hash object
  294. */
  295. static inline dp_mld_peer_hash_obj_t
  296. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  297. {
  298. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  299. return be_soc->ml_ctxt;
  300. }
  301. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
  302. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO)
  303. typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
  304. struct dp_vdev *ptnr_vdev,
  305. void *arg);
  306. /*
  307. * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
  308. * @be_soc: dp_soc_be pointer
  309. * @be_vdev: dp_vdev_be pointer
  310. * @func : function to be called for each peer
  311. * @arg : argument need to be passed to func
  312. * @mod_id: module id
  313. *
  314. * Return: None
  315. */
  316. void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  317. struct dp_vdev_be *be_vdev,
  318. dp_ptnr_vdev_iter_func func,
  319. void *arg,
  320. enum dp_mod_id mod_id);
  321. /*
  322. * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
  323. * @be_soc: dp_soc_be pointer
  324. * @be_vdev: dp_vdev_be pointer
  325. * @mod_id: module id
  326. *
  327. * Return: mcast primary DP VDEV handle on success, NULL on failure
  328. */
  329. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  330. struct dp_vdev_be *be_vdev,
  331. enum dp_mod_id mod_id);
  332. #endif
  333. #else
  334. typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
  335. static inline dp_mld_peer_hash_obj_t
  336. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  337. {
  338. return dp_get_be_soc_from_dp_soc(soc);
  339. }
  340. static inline void dp_clr_mlo_ptnr_list(struct dp_soc *soc,
  341. struct dp_vdev *vdev)
  342. {
  343. }
  344. #endif
  345. /*
  346. * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
  347. *
  348. * @mld_hash_obj: Peer has object
  349. * @hash_elems: number of entries in hash table
  350. *
  351. * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
  352. */
  353. QDF_STATUS
  354. dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
  355. int hash_elems);
  356. /*
  357. * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
  358. *
  359. * @mld_hash_obj: Peer has object
  360. *
  361. * return: void
  362. */
  363. void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
  364. /**
  365. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  366. * @pdev: dp_pdev pointer
  367. *
  368. * Return: dp_pdev_be pointer
  369. */
  370. static inline
  371. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  372. {
  373. return (struct dp_pdev_be *)pdev;
  374. }
  375. #ifdef QCA_MONITOR_2_0_SUPPORT
  376. /**
  377. * dp_get_be_mon_pdev_from_dp_mon_pdev() - get dp_mon_pdev_be from dp_mon_pdev
  378. * @pdev: dp_mon_pdev pointer
  379. *
  380. * Return: dp_mon_pdev_be pointer
  381. */
  382. static inline
  383. struct dp_mon_pdev_be *dp_get_be_mon_pdev_from_dp_mon_pdev(struct dp_mon_pdev *mon_pdev)
  384. {
  385. return (struct dp_mon_pdev_be *)mon_pdev;
  386. }
  387. #endif
  388. /**
  389. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  390. * @vdev: dp_vdev pointer
  391. *
  392. * Return: dp_vdev_be pointer
  393. */
  394. static inline
  395. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  396. {
  397. return (struct dp_vdev_be *)vdev;
  398. }
  399. /**
  400. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  401. * @peer: dp_peer pointer
  402. *
  403. * Return: dp_peer_be pointer
  404. */
  405. static inline
  406. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  407. {
  408. return (struct dp_peer_be *)peer;
  409. }
  410. QDF_STATUS
  411. dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
  412. struct dp_hw_cookie_conversion_t *cc_ctx,
  413. uint32_t num_descs,
  414. enum dp_desc_type desc_type,
  415. uint8_t desc_pool_id);
  416. QDF_STATUS
  417. dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
  418. struct dp_hw_cookie_conversion_t *cc_ctx);
  419. QDF_STATUS
  420. dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
  421. struct dp_hw_cookie_conversion_t *cc_ctx);
  422. QDF_STATUS
  423. dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
  424. struct dp_hw_cookie_conversion_t *cc_ctx);
  425. /**
  426. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  427. * @be_soc: beryllium soc handler
  428. * @list_head: pointer to page desc head
  429. * @list_tail: pointer to page desc tail
  430. * @num_desc: number of TX/RX Descs required for SPT pages
  431. *
  432. * Return: number of SPT page Desc allocated
  433. */
  434. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  435. struct dp_spt_page_desc **list_head,
  436. struct dp_spt_page_desc **list_tail,
  437. uint16_t num_desc);
  438. /**
  439. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  440. * @be_soc: beryllium soc handler
  441. * @list_head: pointer to page desc head
  442. * @list_tail: pointer to page desc tail
  443. * @page_nums: number of page desc freed back to pool
  444. */
  445. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  446. struct dp_spt_page_desc **list_head,
  447. struct dp_spt_page_desc **list_tail,
  448. uint16_t page_nums);
  449. /**
  450. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  451. DDR page 4K aligned or not
  452. * @ppt_index: offset index in primary page table
  453. * @spt_index: offset index in sceondary DDR page
  454. *
  455. * Generate SW cookie ID to match as HW expected
  456. *
  457. * Return: cookie ID
  458. */
  459. static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
  460. uint16_t spt_index)
  461. {
  462. /*
  463. * for 4k aligned case, cmem entry size is 4 bytes,
  464. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  465. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  466. * exactly same with original ppt_index value.
  467. * for 4k un-aligned case, cmem entry size is 8 bytes.
  468. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  469. */
  470. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  471. spt_index);
  472. }
  473. /**
  474. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  475. * @be_soc: be soc handle
  476. * @desc_id: TX/RX Dess ID
  477. *
  478. * Return: TX/RX Desc virtual address
  479. */
  480. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  481. uint32_t desc_id)
  482. {
  483. struct dp_soc_be *be_soc;
  484. uint16_t ppt_page_id, spt_va_id;
  485. uint8_t *spt_page_va;
  486. be_soc = dp_get_be_soc_from_dp_soc(soc);
  487. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  488. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  489. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  490. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  491. /*
  492. * ppt index in cmem is same order where the page in the
  493. * page desc array during initialization.
  494. * entry size in DDR page is 64 bits, for 32 bits system,
  495. * only lower 32 bits VA value is needed.
  496. */
  497. spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
  498. return (*((uintptr_t *)(spt_page_va +
  499. spt_va_id * DP_CC_HW_READ_BYTES)));
  500. }
  501. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  502. /**
  503. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  504. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  505. * of processing the entries in SRNG
  506. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  507. * of processing the entries in SRNG
  508. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  509. * condition and drastic steps need to be taken for processing
  510. * the entries in SRNG
  511. */
  512. enum dp_srng_near_full_levels {
  513. DP_SRNG_THRESH_SAFE,
  514. DP_SRNG_THRESH_NEAR_FULL,
  515. DP_SRNG_THRESH_CRITICAL,
  516. };
  517. /**
  518. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  519. * its corresponding near-full irq handler
  520. * @soc: Datapath SoC handle
  521. * @dp_srng: datapath handle for this SRNG
  522. *
  523. * Return: 1, if the srng was marked as near-full
  524. * 0, if the srng was not marked as near-full
  525. */
  526. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  527. struct dp_srng *dp_srng)
  528. {
  529. return qdf_atomic_read(&dp_srng->near_full);
  530. }
  531. /**
  532. * dp_srng_get_near_full_level() - Check the num available entries in the
  533. * consumer srng and return the level of the srng
  534. * near full state.
  535. * @soc: Datapath SoC Handle [To be validated by the caller]
  536. * @hal_ring_hdl: SRNG handle
  537. *
  538. * Return: near-full level
  539. */
  540. static inline int
  541. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  542. {
  543. uint32_t num_valid;
  544. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  545. dp_srng->hal_srng,
  546. true);
  547. if (num_valid > dp_srng->crit_thresh)
  548. return DP_SRNG_THRESH_CRITICAL;
  549. else if (num_valid < dp_srng->safe_thresh)
  550. return DP_SRNG_THRESH_SAFE;
  551. else
  552. return DP_SRNG_THRESH_NEAR_FULL;
  553. }
  554. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  555. /**
  556. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  557. * the reap_limit and flags to reflect the state.
  558. * @soc: Datapath soc handle
  559. * @srng: Datapath handle for the srng
  560. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  561. * per the near-full state
  562. *
  563. * Return: 1, if the srng is near full
  564. * 0, if the srng is not near full
  565. */
  566. static inline int
  567. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  568. struct dp_srng *srng,
  569. int *max_reap_limit)
  570. {
  571. int ring_near_full = 0, near_full_level;
  572. if (dp_srng_check_ring_near_full(soc, srng)) {
  573. near_full_level = dp_srng_get_near_full_level(soc, srng);
  574. switch (near_full_level) {
  575. case DP_SRNG_THRESH_CRITICAL:
  576. /* Currently not doing anything special here */
  577. /* fall through */
  578. case DP_SRNG_THRESH_NEAR_FULL:
  579. ring_near_full = 1;
  580. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  581. break;
  582. case DP_SRNG_THRESH_SAFE:
  583. qdf_atomic_set(&srng->near_full, 0);
  584. ring_near_full = 0;
  585. break;
  586. default:
  587. qdf_assert(0);
  588. break;
  589. }
  590. }
  591. return ring_near_full;
  592. }
  593. #else
  594. static inline int
  595. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  596. struct dp_srng *srng,
  597. int *max_reap_limit)
  598. {
  599. return 0;
  600. }
  601. #endif
  602. static inline
  603. uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
  604. enum dp_desc_type desc_type)
  605. {
  606. switch (desc_type) {
  607. case DP_TX_DESC_TYPE:
  608. return (DP_TX_DESC_CMEM_OFFSET +
  609. (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
  610. case DP_RX_DESC_BUF_TYPE:
  611. return (DP_RX_DESC_CMEM_OFFSET +
  612. ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
  613. DP_RX_DESC_POOL_CMEM_SIZE);
  614. default:
  615. QDF_BUG(0);
  616. }
  617. return 0;
  618. }
  619. #ifndef WLAN_MLO_MULTI_CHIP
  620. static inline
  621. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  622. struct cdp_soc_attach_params *params)
  623. {
  624. }
  625. static inline
  626. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  627. struct cdp_pdev_attach_params *params)
  628. {
  629. }
  630. #endif
  631. /*
  632. * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
  633. * @soc : DP soc handle
  634. * @vdev: pointer to vdev structure
  635. * @param: parameter type to get value
  636. * @val: value
  637. *
  638. * return: QDF_STATUS
  639. */
  640. QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
  641. struct dp_vdev *vdev,
  642. enum cdp_vdev_param_type param,
  643. cdp_config_param_type val);
  644. #endif