dp_be.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_BE_H
  20. #define __DP_BE_H
  21. #include <dp_types.h>
  22. #include <hal_be_tx.h>
  23. #ifdef WLAN_MLO_MULTI_CHIP
  24. #include "mlo/dp_mlo.h"
  25. #else
  26. #include <dp_peer.h>
  27. #endif
  28. #include <dp_mon.h>
  29. /* maximum number of entries in one page of secondary page table */
  30. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  31. /* maximum number of entries in one page of secondary page table */
  32. #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
  33. /* maximum number of entries in primary page table */
  34. #define DP_CC_PPT_MAX_ENTRIES 1024
  35. /* cookie conversion required CMEM offset from CMEM pool */
  36. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  37. /* cookie conversion primary page table size 4K */
  38. #define DP_CC_PPT_MEM_SIZE 4096
  39. /* FST required CMEM offset from CMEM pool */
  40. #define DP_FST_MEM_OFFSET_IN_CMEM \
  41. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  42. /* lower 9 bits in Desc ID for offset in page of SPT */
  43. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  44. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  45. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  46. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  47. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  48. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  49. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  50. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  51. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  52. /*
  53. * page 4K unaligned case, single SPT page physical address
  54. * need 8 bytes in PPT
  55. */
  56. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  57. /*
  58. * page 4K aligned case, single SPT page physical address
  59. * need 4 bytes in PPT
  60. */
  61. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  62. /* 4K aligned case, number of bits HW append for one PPT entry value */
  63. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  64. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  65. /* WBM2SW ring id for rx release */
  66. #define WBM2SW_REL_ERR_RING_NUM 3
  67. #else
  68. /* WBM2SW ring id for rx release */
  69. #define WBM2SW_REL_ERR_RING_NUM 5
  70. #endif
  71. /* tx descriptor are programmed at start of CMEM region*/
  72. #define DP_TX_DESC_CMEM_OFFSET 0
  73. /* size of CMEM needed for a tx desc pool*/
  74. #define DP_TX_DESC_POOL_CMEM_SIZE \
  75. ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  76. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  77. /* Offset of rx descripotor pool */
  78. #define DP_RX_DESC_CMEM_OFFSET \
  79. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  80. /* size of CMEM needed for a rx desc pool */
  81. #define DP_RX_DESC_POOL_CMEM_SIZE \
  82. ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  83. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  84. /* get ppt_id from CMEM_OFFSET */
  85. #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
  86. ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  87. /* The MAX PPE PRI2TID */
  88. #ifdef WLAN_SUPPORT_PPEDS
  89. #define DP_TX_INT_PRI2TID_MAX 15
  90. #endif
  91. /**
  92. * struct dp_spt_page_desc - secondary page table page descriptors
  93. * @next: pointer to next linked SPT page Desc
  94. * @page_v_addr: page virtual address
  95. * @page_p_addr: page physical address
  96. * @ppt_index: entry index in primary page table where this page physical
  97. address stored
  98. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  99. */
  100. struct dp_spt_page_desc {
  101. uint8_t *page_v_addr;
  102. qdf_dma_addr_t page_p_addr;
  103. uint32_t ppt_index;
  104. };
  105. /**
  106. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  107. * @cmem_offset: CMEM offset from base address for primary page table setup
  108. * @total_page_num: total DDR page allocated
  109. * @page_desc_freelist: available page Desc list
  110. * @page_desc_base: page Desc buffer base address.
  111. * @page_pool: DDR pages pool
  112. * @cc_lock: locks for page acquiring/free
  113. */
  114. struct dp_hw_cookie_conversion_t {
  115. uint32_t cmem_offset;
  116. uint32_t total_page_num;
  117. struct dp_spt_page_desc *page_desc_base;
  118. struct qdf_mem_multi_page_t page_pool;
  119. qdf_spinlock_t cc_lock;
  120. };
  121. /**
  122. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  123. * @spt_page_list_head: head of SPT page descriptor list
  124. * @spt_page_list_tail: tail of SPT page descriptor list
  125. * @num_spt_pages: number of SPT page descriptor allocated
  126. */
  127. struct dp_spt_page_desc_list {
  128. struct dp_spt_page_desc *spt_page_list_head;
  129. struct dp_spt_page_desc *spt_page_list_tail;
  130. uint16_t num_spt_pages;
  131. };
  132. /* HW reading 8 bytes for VA */
  133. #define DP_CC_HW_READ_BYTES 8
  134. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  135. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  136. = (uintptr_t)(_desc_va); }
  137. /**
  138. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  139. * @is_configured: flag indicating if this bank is configured
  140. * @ref_count: ref count indicating number of users of the bank
  141. * @bank_config: HAL TX bank configuration
  142. */
  143. struct dp_tx_bank_profile {
  144. uint8_t is_configured;
  145. qdf_atomic_t ref_count;
  146. union hal_tx_bank_config bank_config;
  147. };
  148. #ifdef WLAN_SUPPORT_PPEDS
  149. /**
  150. * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
  151. * @is_configured: Boolean that the entry is configured.
  152. */
  153. struct dp_ppe_vp_tbl_entry {
  154. bool is_configured;
  155. };
  156. /**
  157. * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
  158. * @vp_num: Virtual port number
  159. * @ppe_vp_num_idx: Index to the PPE VP table entry
  160. * @search_idx_reg_num: Address search Index register number
  161. * @drop_prec_enable: Drop precedance enable
  162. * @to_fw: To FW exception enable/disable.
  163. * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
  164. */
  165. struct dp_ppe_vp_profile {
  166. uint8_t vp_num;
  167. uint8_t ppe_vp_num_idx;
  168. uint8_t search_idx_reg_num;
  169. uint8_t drop_prec_enable;
  170. uint8_t to_fw;
  171. uint8_t use_ppe_int_pri;
  172. };
  173. #endif
  174. /**
  175. * struct dp_soc_be - Extended DP soc for BE targets
  176. * @soc: dp soc structure
  177. * @num_bank_profiles: num TX bank profiles
  178. * @bank_profiles: bank profiles for various TX banks
  179. * @cc_cmem_base: cmem offset reserved for CC
  180. * @tx_cc_ctx: Cookie conversion context for tx desc pools
  181. * @rx_cc_ctx: Cookie conversion context for rx desc pools
  182. * @monitor_soc_be: BE specific monitor object
  183. * @mlo_enabled: Flag to indicate MLO is enabled or not
  184. * @mlo_chip_id: MLO chip_id
  185. * @ml_ctxt: pointer to global ml_context
  186. * @mld_peer_hash: peer hash table for ML peers
  187. * Associated peer with this MAC address)
  188. * @mld_peer_hash_lock: lock to protect mld_peer_hash
  189. * @reo2ppe_ring: REO2PPE ring
  190. * @ppe2tcl_ring: PPE2TCL ring
  191. * @ppe_release_ring: PPE release ring
  192. * @ppe_vp_tbl: PPE VP table
  193. * @ppe_vp_tbl_lock: PPE VP table lock
  194. * @num_ppe_vp_entries : Number of PPE VP entries
  195. */
  196. struct dp_soc_be {
  197. struct dp_soc soc;
  198. uint8_t num_bank_profiles;
  199. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  200. qdf_mutex_t tx_bank_lock;
  201. #else
  202. qdf_spinlock_t tx_bank_lock;
  203. #endif
  204. struct dp_tx_bank_profile *bank_profiles;
  205. struct dp_spt_page_desc *page_desc_base;
  206. uint32_t cc_cmem_base;
  207. struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
  208. struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
  209. #ifdef WLAN_SUPPORT_PPEDS
  210. struct dp_srng reo2ppe_ring;
  211. struct dp_srng ppe2tcl_ring;
  212. struct dp_srng ppe_release_ring;
  213. struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
  214. qdf_mutex_t ppe_vp_tbl_lock;
  215. uint8_t num_ppe_vp_entries;
  216. #endif
  217. #ifdef WLAN_FEATURE_11BE_MLO
  218. #ifdef WLAN_MLO_MULTI_CHIP
  219. uint8_t mlo_enabled;
  220. uint8_t mlo_chip_id;
  221. struct dp_mlo_ctxt *ml_ctxt;
  222. #else
  223. /* Protect mld peer hash table */
  224. DP_MUTEX_TYPE mld_peer_hash_lock;
  225. struct {
  226. uint32_t mask;
  227. uint32_t idx_bits;
  228. TAILQ_HEAD(, dp_peer) * bins;
  229. } mld_peer_hash;
  230. #endif
  231. #endif
  232. };
  233. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  234. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  235. /**
  236. * struct dp_pdev_be - Extended DP pdev for BE targets
  237. * @pdev: dp pdev structure
  238. * @monitor_pdev_be: BE specific monitor object
  239. * @mlo_link_id: MLO link id for PDEV
  240. */
  241. struct dp_pdev_be {
  242. struct dp_pdev pdev;
  243. #ifdef WLAN_MLO_MULTI_CHIP
  244. uint8_t mlo_link_id;
  245. #endif
  246. };
  247. /**
  248. * struct dp_vdev_be - Extended DP vdev for BE targets
  249. * @vdev: dp vdev structure
  250. * @bank_id: bank_id to be used for TX
  251. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  252. * @ppe_vp_enabled: flag to check if PPE VP is enabled for vdev
  253. * @ppe_vp_profile: PPE VP profile
  254. */
  255. struct dp_vdev_be {
  256. struct dp_vdev vdev;
  257. int8_t bank_id;
  258. uint8_t vdev_id_check_en;
  259. #ifdef WLAN_MLO_MULTI_CHIP
  260. /* partner list used for Intra-BSS */
  261. uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  262. #ifdef WLAN_FEATURE_11BE_MLO
  263. #ifdef WLAN_MCAST_MLO
  264. /* DP MLO seq number */
  265. uint16_t seq_num;
  266. /* MLO Mcast primary vdev */
  267. bool mcast_primary;
  268. #endif
  269. #endif
  270. #endif
  271. unsigned long ppe_vp_enabled;
  272. #ifdef WLAN_SUPPORT_PPEDS
  273. struct dp_ppe_vp_profile ppe_vp_profile;
  274. #endif
  275. };
  276. /**
  277. * struct dp_peer_be - Extended DP peer for BE targets
  278. * @dp_peer: dp peer structure
  279. */
  280. struct dp_peer_be {
  281. struct dp_peer peer;
  282. };
  283. /**
  284. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  285. *
  286. * Return: value in bytes for BE specific soc structure
  287. */
  288. qdf_size_t dp_get_soc_context_size_be(void);
  289. /**
  290. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  291. * @arch_ops: arch ops pointer
  292. *
  293. * Return: none
  294. */
  295. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  296. /**
  297. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  298. * @arch_ops: arch ops pointer
  299. *
  300. * Return: size in bytes for the context_type
  301. */
  302. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  303. /**
  304. * dp_mon_get_context_size_be() - get BE specific size for mon pdev/soc
  305. * @arch_ops: arch ops pointer
  306. *
  307. * Return: size in bytes for the context_type
  308. */
  309. qdf_size_t dp_mon_get_context_size_be(enum dp_context_type context_type);
  310. /**
  311. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  312. * @soc: dp_soc pointer
  313. *
  314. * Return: dp_soc_be pointer
  315. */
  316. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  317. {
  318. return (struct dp_soc_be *)soc;
  319. }
  320. /**
  321. * dp_get_be_mon_soc_from_dp_mon_soc() - get dp_mon_soc_be from dp_mon_soc
  322. * @soc: dp_mon_soc pointer
  323. *
  324. * Return: dp_mon_soc_be pointer
  325. */
  326. static inline
  327. struct dp_mon_soc_be *dp_get_be_mon_soc_from_dp_mon_soc(struct dp_mon_soc *soc)
  328. {
  329. return (struct dp_mon_soc_be *)soc;
  330. }
  331. #ifdef WLAN_MLO_MULTI_CHIP
  332. typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
  333. /*
  334. * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
  335. *
  336. * @soc: soc handle
  337. *
  338. * return: MLD peer hash object
  339. */
  340. static inline dp_mld_peer_hash_obj_t
  341. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  342. {
  343. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  344. return be_soc->ml_ctxt;
  345. }
  346. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
  347. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO)
  348. typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
  349. struct dp_vdev *ptnr_vdev,
  350. void *arg);
  351. typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc,
  352. void *arg);
  353. /*
  354. * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
  355. * @be_soc: dp_soc_be pointer
  356. * @be_vdev: dp_vdev_be pointer
  357. * @func : function to be called for each peer
  358. * @arg : argument need to be passed to func
  359. * @mod_id: module id
  360. *
  361. * Return: None
  362. */
  363. void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  364. struct dp_vdev_be *be_vdev,
  365. dp_ptnr_vdev_iter_func func,
  366. void *arg,
  367. enum dp_mod_id mod_id);
  368. /*
  369. * dp_mcast_mlo_iter_ptnr_soc - API to iterate through ptnr soc list
  370. * @be_soc: dp_soc_be pointer
  371. * @func : function to be called for each peer
  372. * @arg : argument need to be passed to func
  373. *
  374. * Return: None
  375. */
  376. void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
  377. dp_ptnr_soc_iter_func func,
  378. void *arg);
  379. /*
  380. * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
  381. * @be_soc: dp_soc_be pointer
  382. * @be_vdev: dp_vdev_be pointer
  383. * @mod_id: module id
  384. *
  385. * Return: mcast primary DP VDEV handle on success, NULL on failure
  386. */
  387. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  388. struct dp_vdev_be *be_vdev,
  389. enum dp_mod_id mod_id);
  390. #endif
  391. #else
  392. typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
  393. static inline dp_mld_peer_hash_obj_t
  394. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  395. {
  396. return dp_get_be_soc_from_dp_soc(soc);
  397. }
  398. static inline void dp_clr_mlo_ptnr_list(struct dp_soc *soc,
  399. struct dp_vdev *vdev)
  400. {
  401. }
  402. #endif
  403. /*
  404. * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
  405. *
  406. * @mld_hash_obj: Peer has object
  407. * @hash_elems: number of entries in hash table
  408. *
  409. * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
  410. */
  411. QDF_STATUS
  412. dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
  413. int hash_elems);
  414. /*
  415. * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
  416. *
  417. * @mld_hash_obj: Peer has object
  418. *
  419. * return: void
  420. */
  421. void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
  422. /**
  423. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  424. * @pdev: dp_pdev pointer
  425. *
  426. * Return: dp_pdev_be pointer
  427. */
  428. static inline
  429. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  430. {
  431. return (struct dp_pdev_be *)pdev;
  432. }
  433. #ifdef QCA_MONITOR_2_0_SUPPORT
  434. /**
  435. * dp_get_be_mon_pdev_from_dp_mon_pdev() - get dp_mon_pdev_be from dp_mon_pdev
  436. * @pdev: dp_mon_pdev pointer
  437. *
  438. * Return: dp_mon_pdev_be pointer
  439. */
  440. static inline
  441. struct dp_mon_pdev_be *dp_get_be_mon_pdev_from_dp_mon_pdev(struct dp_mon_pdev *mon_pdev)
  442. {
  443. return (struct dp_mon_pdev_be *)mon_pdev;
  444. }
  445. #endif
  446. /**
  447. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  448. * @vdev: dp_vdev pointer
  449. *
  450. * Return: dp_vdev_be pointer
  451. */
  452. static inline
  453. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  454. {
  455. return (struct dp_vdev_be *)vdev;
  456. }
  457. /**
  458. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  459. * @peer: dp_peer pointer
  460. *
  461. * Return: dp_peer_be pointer
  462. */
  463. static inline
  464. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  465. {
  466. return (struct dp_peer_be *)peer;
  467. }
  468. QDF_STATUS
  469. dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
  470. struct dp_hw_cookie_conversion_t *cc_ctx,
  471. uint32_t num_descs,
  472. enum dp_desc_type desc_type,
  473. uint8_t desc_pool_id);
  474. QDF_STATUS
  475. dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
  476. struct dp_hw_cookie_conversion_t *cc_ctx);
  477. QDF_STATUS
  478. dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
  479. struct dp_hw_cookie_conversion_t *cc_ctx);
  480. QDF_STATUS
  481. dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
  482. struct dp_hw_cookie_conversion_t *cc_ctx);
  483. /**
  484. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  485. * @be_soc: beryllium soc handler
  486. * @list_head: pointer to page desc head
  487. * @list_tail: pointer to page desc tail
  488. * @num_desc: number of TX/RX Descs required for SPT pages
  489. *
  490. * Return: number of SPT page Desc allocated
  491. */
  492. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  493. struct dp_spt_page_desc **list_head,
  494. struct dp_spt_page_desc **list_tail,
  495. uint16_t num_desc);
  496. /**
  497. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  498. * @be_soc: beryllium soc handler
  499. * @list_head: pointer to page desc head
  500. * @list_tail: pointer to page desc tail
  501. * @page_nums: number of page desc freed back to pool
  502. */
  503. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  504. struct dp_spt_page_desc **list_head,
  505. struct dp_spt_page_desc **list_tail,
  506. uint16_t page_nums);
  507. /**
  508. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  509. DDR page 4K aligned or not
  510. * @ppt_index: offset index in primary page table
  511. * @spt_index: offset index in sceondary DDR page
  512. *
  513. * Generate SW cookie ID to match as HW expected
  514. *
  515. * Return: cookie ID
  516. */
  517. static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
  518. uint16_t spt_index)
  519. {
  520. /*
  521. * for 4k aligned case, cmem entry size is 4 bytes,
  522. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  523. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  524. * exactly same with original ppt_index value.
  525. * for 4k un-aligned case, cmem entry size is 8 bytes.
  526. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  527. */
  528. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  529. spt_index);
  530. }
  531. /**
  532. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  533. * @be_soc: be soc handle
  534. * @desc_id: TX/RX Dess ID
  535. *
  536. * Return: TX/RX Desc virtual address
  537. */
  538. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  539. uint32_t desc_id)
  540. {
  541. struct dp_soc_be *be_soc;
  542. uint16_t ppt_page_id, spt_va_id;
  543. uint8_t *spt_page_va;
  544. be_soc = dp_get_be_soc_from_dp_soc(soc);
  545. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  546. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  547. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  548. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  549. /*
  550. * ppt index in cmem is same order where the page in the
  551. * page desc array during initialization.
  552. * entry size in DDR page is 64 bits, for 32 bits system,
  553. * only lower 32 bits VA value is needed.
  554. */
  555. spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
  556. return (*((uintptr_t *)(spt_page_va +
  557. spt_va_id * DP_CC_HW_READ_BYTES)));
  558. }
  559. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  560. /**
  561. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  562. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  563. * of processing the entries in SRNG
  564. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  565. * of processing the entries in SRNG
  566. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  567. * condition and drastic steps need to be taken for processing
  568. * the entries in SRNG
  569. */
  570. enum dp_srng_near_full_levels {
  571. DP_SRNG_THRESH_SAFE,
  572. DP_SRNG_THRESH_NEAR_FULL,
  573. DP_SRNG_THRESH_CRITICAL,
  574. };
  575. /**
  576. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  577. * its corresponding near-full irq handler
  578. * @soc: Datapath SoC handle
  579. * @dp_srng: datapath handle for this SRNG
  580. *
  581. * Return: 1, if the srng was marked as near-full
  582. * 0, if the srng was not marked as near-full
  583. */
  584. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  585. struct dp_srng *dp_srng)
  586. {
  587. return qdf_atomic_read(&dp_srng->near_full);
  588. }
  589. /**
  590. * dp_srng_get_near_full_level() - Check the num available entries in the
  591. * consumer srng and return the level of the srng
  592. * near full state.
  593. * @soc: Datapath SoC Handle [To be validated by the caller]
  594. * @hal_ring_hdl: SRNG handle
  595. *
  596. * Return: near-full level
  597. */
  598. static inline int
  599. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  600. {
  601. uint32_t num_valid;
  602. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  603. dp_srng->hal_srng,
  604. true);
  605. if (num_valid > dp_srng->crit_thresh)
  606. return DP_SRNG_THRESH_CRITICAL;
  607. else if (num_valid < dp_srng->safe_thresh)
  608. return DP_SRNG_THRESH_SAFE;
  609. else
  610. return DP_SRNG_THRESH_NEAR_FULL;
  611. }
  612. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  613. /**
  614. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  615. * the reap_limit and flags to reflect the state.
  616. * @soc: Datapath soc handle
  617. * @srng: Datapath handle for the srng
  618. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  619. * per the near-full state
  620. *
  621. * Return: 1, if the srng is near full
  622. * 0, if the srng is not near full
  623. */
  624. static inline int
  625. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  626. struct dp_srng *srng,
  627. int *max_reap_limit)
  628. {
  629. int ring_near_full = 0, near_full_level;
  630. if (dp_srng_check_ring_near_full(soc, srng)) {
  631. near_full_level = dp_srng_get_near_full_level(soc, srng);
  632. switch (near_full_level) {
  633. case DP_SRNG_THRESH_CRITICAL:
  634. /* Currently not doing anything special here */
  635. /* fall through */
  636. case DP_SRNG_THRESH_NEAR_FULL:
  637. ring_near_full = 1;
  638. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  639. break;
  640. case DP_SRNG_THRESH_SAFE:
  641. qdf_atomic_set(&srng->near_full, 0);
  642. ring_near_full = 0;
  643. break;
  644. default:
  645. qdf_assert(0);
  646. break;
  647. }
  648. }
  649. return ring_near_full;
  650. }
  651. #else
  652. static inline int
  653. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  654. struct dp_srng *srng,
  655. int *max_reap_limit)
  656. {
  657. return 0;
  658. }
  659. #endif
  660. static inline
  661. uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
  662. enum dp_desc_type desc_type)
  663. {
  664. switch (desc_type) {
  665. case DP_TX_DESC_TYPE:
  666. return (DP_TX_DESC_CMEM_OFFSET +
  667. (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
  668. case DP_RX_DESC_BUF_TYPE:
  669. return (DP_RX_DESC_CMEM_OFFSET +
  670. ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
  671. DP_RX_DESC_POOL_CMEM_SIZE);
  672. default:
  673. QDF_BUG(0);
  674. }
  675. return 0;
  676. }
  677. #ifndef WLAN_MLO_MULTI_CHIP
  678. static inline
  679. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  680. struct cdp_soc_attach_params *params)
  681. {
  682. }
  683. static inline
  684. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  685. struct cdp_pdev_attach_params *params)
  686. {
  687. }
  688. #endif
  689. /*
  690. * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
  691. * @soc : DP soc handle
  692. * @vdev: pointer to vdev structure
  693. * @param: parameter type to get value
  694. * @val: value
  695. *
  696. * return: QDF_STATUS
  697. */
  698. QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
  699. struct dp_vdev *vdev,
  700. enum cdp_vdev_param_type param,
  701. cdp_config_param_type val);
  702. #endif