dp_be.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_BE_H
  20. #define __DP_BE_H
  21. #include <dp_types.h>
  22. #include <hal_be_tx.h>
  23. #ifdef WLAN_MLO_MULTI_CHIP
  24. #include "mlo/dp_mlo.h"
  25. #else
  26. #include <dp_peer.h>
  27. #endif
  28. #include <dp_mon.h>
  29. enum CMEM_MEM_CLIENTS {
  30. COOKIE_CONVERSION,
  31. FISA_FST,
  32. };
  33. /* maximum number of entries in one page of secondary page table */
  34. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  35. /* maximum number of entries in one page of secondary page table */
  36. #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
  37. /* maximum number of entries in primary page table */
  38. #define DP_CC_PPT_MAX_ENTRIES 1024
  39. /* cookie conversion required CMEM offset from CMEM pool */
  40. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  41. /* cookie conversion primary page table size 4K */
  42. #define DP_CC_PPT_MEM_SIZE 4096
  43. /* FST required CMEM offset from CMEM pool */
  44. #define DP_FST_MEM_OFFSET_IN_CMEM \
  45. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  46. /* CMEM size for FISA FST 16K */
  47. #define DP_CMEM_FST_SIZE 16384
  48. /* lower 9 bits in Desc ID for offset in page of SPT */
  49. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  50. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  51. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  52. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  53. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  54. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  55. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  56. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  57. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  58. /*
  59. * page 4K unaligned case, single SPT page physical address
  60. * need 8 bytes in PPT
  61. */
  62. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  63. /*
  64. * page 4K aligned case, single SPT page physical address
  65. * need 4 bytes in PPT
  66. */
  67. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  68. /* 4K aligned case, number of bits HW append for one PPT entry value */
  69. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  70. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  71. /* WBM2SW ring id for rx release */
  72. #define WBM2SW_REL_ERR_RING_NUM 3
  73. #else
  74. /* WBM2SW ring id for rx release */
  75. #define WBM2SW_REL_ERR_RING_NUM 5
  76. #endif
  77. /* tx descriptor are programmed at start of CMEM region*/
  78. #define DP_TX_DESC_CMEM_OFFSET 0
  79. /* size of CMEM needed for a tx desc pool*/
  80. #define DP_TX_DESC_POOL_CMEM_SIZE \
  81. ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  82. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  83. /* Offset of rx descripotor pool */
  84. #define DP_RX_DESC_CMEM_OFFSET \
  85. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  86. /* size of CMEM needed for a rx desc pool */
  87. #define DP_RX_DESC_POOL_CMEM_SIZE \
  88. ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  89. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  90. /* get ppt_id from CMEM_OFFSET */
  91. #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
  92. ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  93. /* The MAX PPE PRI2TID */
  94. #ifdef WLAN_SUPPORT_PPEDS
  95. #define DP_TX_INT_PRI2TID_MAX 15
  96. #endif
  97. /**
  98. * struct dp_spt_page_desc - secondary page table page descriptors
  99. * @next: pointer to next linked SPT page Desc
  100. * @page_v_addr: page virtual address
  101. * @page_p_addr: page physical address
  102. * @ppt_index: entry index in primary page table where this page physical
  103. address stored
  104. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  105. */
  106. struct dp_spt_page_desc {
  107. uint8_t *page_v_addr;
  108. qdf_dma_addr_t page_p_addr;
  109. uint32_t ppt_index;
  110. };
  111. /**
  112. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  113. * @cmem_offset: CMEM offset from base address for primary page table setup
  114. * @total_page_num: total DDR page allocated
  115. * @page_desc_freelist: available page Desc list
  116. * @page_desc_base: page Desc buffer base address.
  117. * @page_pool: DDR pages pool
  118. * @cc_lock: locks for page acquiring/free
  119. */
  120. struct dp_hw_cookie_conversion_t {
  121. uint32_t cmem_offset;
  122. uint32_t total_page_num;
  123. struct dp_spt_page_desc *page_desc_base;
  124. struct qdf_mem_multi_page_t page_pool;
  125. qdf_spinlock_t cc_lock;
  126. };
  127. /**
  128. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  129. * @spt_page_list_head: head of SPT page descriptor list
  130. * @spt_page_list_tail: tail of SPT page descriptor list
  131. * @num_spt_pages: number of SPT page descriptor allocated
  132. */
  133. struct dp_spt_page_desc_list {
  134. struct dp_spt_page_desc *spt_page_list_head;
  135. struct dp_spt_page_desc *spt_page_list_tail;
  136. uint16_t num_spt_pages;
  137. };
  138. /* HW reading 8 bytes for VA */
  139. #define DP_CC_HW_READ_BYTES 8
  140. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  141. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  142. = (uintptr_t)(_desc_va); }
  143. /**
  144. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  145. * @is_configured: flag indicating if this bank is configured
  146. * @ref_count: ref count indicating number of users of the bank
  147. * @bank_config: HAL TX bank configuration
  148. */
  149. struct dp_tx_bank_profile {
  150. uint8_t is_configured;
  151. qdf_atomic_t ref_count;
  152. union hal_tx_bank_config bank_config;
  153. };
  154. #ifdef WLAN_SUPPORT_PPEDS
  155. /**
  156. * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
  157. * @is_configured: Boolean that the entry is configured.
  158. */
  159. struct dp_ppe_vp_tbl_entry {
  160. bool is_configured;
  161. };
  162. /**
  163. * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
  164. * @vp_num: Virtual port number
  165. * @ppe_vp_num_idx: Index to the PPE VP table entry
  166. * @search_idx_reg_num: Address search Index register number
  167. * @drop_prec_enable: Drop precedance enable
  168. * @to_fw: To FW exception enable/disable.
  169. * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
  170. */
  171. struct dp_ppe_vp_profile {
  172. uint8_t vp_num;
  173. uint8_t ppe_vp_num_idx;
  174. uint8_t search_idx_reg_num;
  175. uint8_t drop_prec_enable;
  176. uint8_t to_fw;
  177. uint8_t use_ppe_int_pri;
  178. };
  179. #endif
  180. /**
  181. * struct dp_soc_be - Extended DP soc for BE targets
  182. * @soc: dp soc structure
  183. * @num_bank_profiles: num TX bank profiles
  184. * @bank_profiles: bank profiles for various TX banks
  185. * @cc_cmem_base: cmem offset reserved for CC
  186. * @tx_cc_ctx: Cookie conversion context for tx desc pools
  187. * @rx_cc_ctx: Cookie conversion context for rx desc pools
  188. * @monitor_soc_be: BE specific monitor object
  189. * @mlo_enabled: Flag to indicate MLO is enabled or not
  190. * @mlo_chip_id: MLO chip_id
  191. * @ml_ctxt: pointer to global ml_context
  192. * @delta_tqm: delta_tqm
  193. * @mlo_tstamp_offset: mlo timestamp offset
  194. * @mld_peer_hash: peer hash table for ML peers
  195. * Associated peer with this MAC address)
  196. * @mld_peer_hash_lock: lock to protect mld_peer_hash
  197. * @reo2ppe_ring: REO2PPE ring
  198. * @ppe2tcl_ring: PPE2TCL ring
  199. * @ppe_release_ring: PPE release ring
  200. * @ppe_vp_tbl: PPE VP table
  201. * @ppe_vp_tbl_lock: PPE VP table lock
  202. * @num_ppe_vp_entries : Number of PPE VP entries
  203. */
  204. struct dp_soc_be {
  205. struct dp_soc soc;
  206. uint8_t num_bank_profiles;
  207. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  208. qdf_mutex_t tx_bank_lock;
  209. #else
  210. qdf_spinlock_t tx_bank_lock;
  211. #endif
  212. struct dp_tx_bank_profile *bank_profiles;
  213. struct dp_spt_page_desc *page_desc_base;
  214. uint32_t cc_cmem_base;
  215. struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
  216. struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
  217. #ifdef WLAN_SUPPORT_PPEDS
  218. struct dp_srng reo2ppe_ring;
  219. struct dp_srng ppe2tcl_ring;
  220. struct dp_srng ppe_release_ring;
  221. struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
  222. qdf_mutex_t ppe_vp_tbl_lock;
  223. uint8_t num_ppe_vp_entries;
  224. #endif
  225. #ifdef WLAN_FEATURE_11BE_MLO
  226. #ifdef WLAN_MLO_MULTI_CHIP
  227. uint8_t mlo_enabled;
  228. uint8_t mlo_chip_id;
  229. struct dp_mlo_ctxt *ml_ctxt;
  230. uint64_t delta_tqm;
  231. uint64_t mlo_tstamp_offset;
  232. #else
  233. /* Protect mld peer hash table */
  234. DP_MUTEX_TYPE mld_peer_hash_lock;
  235. struct {
  236. uint32_t mask;
  237. uint32_t idx_bits;
  238. TAILQ_HEAD(, dp_peer) * bins;
  239. } mld_peer_hash;
  240. #endif
  241. #endif
  242. };
  243. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  244. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  245. /**
  246. * struct dp_pdev_be - Extended DP pdev for BE targets
  247. * @pdev: dp pdev structure
  248. * @monitor_pdev_be: BE specific monitor object
  249. * @mlo_link_id: MLO link id for PDEV
  250. * @delta_tsf2: delta_tsf2
  251. */
  252. struct dp_pdev_be {
  253. struct dp_pdev pdev;
  254. #ifdef WLAN_MLO_MULTI_CHIP
  255. uint8_t mlo_link_id;
  256. uint64_t delta_tsf2;
  257. #endif
  258. };
  259. /**
  260. * struct dp_vdev_be - Extended DP vdev for BE targets
  261. * @vdev: dp vdev structure
  262. * @bank_id: bank_id to be used for TX
  263. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  264. * @ppe_vp_enabled: flag to check if PPE VP is enabled for vdev
  265. * @ppe_vp_profile: PPE VP profile
  266. */
  267. struct dp_vdev_be {
  268. struct dp_vdev vdev;
  269. int8_t bank_id;
  270. uint8_t vdev_id_check_en;
  271. #ifdef WLAN_MLO_MULTI_CHIP
  272. /* partner list used for Intra-BSS */
  273. uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  274. #ifdef WLAN_FEATURE_11BE_MLO
  275. #ifdef WLAN_MCAST_MLO
  276. /* DP MLO seq number */
  277. uint16_t seq_num;
  278. /* MLO Mcast primary vdev */
  279. bool mcast_primary;
  280. #endif
  281. #endif
  282. #endif
  283. unsigned long ppe_vp_enabled;
  284. #ifdef WLAN_SUPPORT_PPEDS
  285. struct dp_ppe_vp_profile ppe_vp_profile;
  286. #endif
  287. };
  288. /**
  289. * struct dp_peer_be - Extended DP peer for BE targets
  290. * @dp_peer: dp peer structure
  291. */
  292. struct dp_peer_be {
  293. struct dp_peer peer;
  294. };
  295. /**
  296. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  297. *
  298. * Return: value in bytes for BE specific soc structure
  299. */
  300. qdf_size_t dp_get_soc_context_size_be(void);
  301. /**
  302. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  303. * @arch_ops: arch ops pointer
  304. *
  305. * Return: none
  306. */
  307. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  308. /**
  309. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  310. * @arch_ops: arch ops pointer
  311. *
  312. * Return: size in bytes for the context_type
  313. */
  314. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  315. /**
  316. * dp_mon_get_context_size_be() - get BE specific size for mon pdev/soc
  317. * @arch_ops: arch ops pointer
  318. *
  319. * Return: size in bytes for the context_type
  320. */
  321. qdf_size_t dp_mon_get_context_size_be(enum dp_context_type context_type);
  322. /**
  323. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  324. * @soc: dp_soc pointer
  325. *
  326. * Return: dp_soc_be pointer
  327. */
  328. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  329. {
  330. return (struct dp_soc_be *)soc;
  331. }
  332. /**
  333. * dp_get_be_mon_soc_from_dp_mon_soc() - get dp_mon_soc_be from dp_mon_soc
  334. * @soc: dp_mon_soc pointer
  335. *
  336. * Return: dp_mon_soc_be pointer
  337. */
  338. static inline
  339. struct dp_mon_soc_be *dp_get_be_mon_soc_from_dp_mon_soc(struct dp_mon_soc *soc)
  340. {
  341. return (struct dp_mon_soc_be *)soc;
  342. }
  343. #ifdef WLAN_MLO_MULTI_CHIP
  344. typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
  345. /*
  346. * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
  347. *
  348. * @soc: soc handle
  349. *
  350. * return: MLD peer hash object
  351. */
  352. static inline dp_mld_peer_hash_obj_t
  353. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  354. {
  355. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  356. return be_soc->ml_ctxt;
  357. }
  358. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
  359. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MCAST_MLO)
  360. typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
  361. struct dp_vdev *ptnr_vdev,
  362. void *arg);
  363. typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc,
  364. void *arg);
  365. /*
  366. * dp_mcast_mlo_iter_ptnr_vdev - API to iterate through ptnr vdev list
  367. * @be_soc: dp_soc_be pointer
  368. * @be_vdev: dp_vdev_be pointer
  369. * @func : function to be called for each peer
  370. * @arg : argument need to be passed to func
  371. * @mod_id: module id
  372. *
  373. * Return: None
  374. */
  375. void dp_mcast_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  376. struct dp_vdev_be *be_vdev,
  377. dp_ptnr_vdev_iter_func func,
  378. void *arg,
  379. enum dp_mod_id mod_id);
  380. /*
  381. * dp_mcast_mlo_iter_ptnr_soc - API to iterate through ptnr soc list
  382. * @be_soc: dp_soc_be pointer
  383. * @func : function to be called for each peer
  384. * @arg : argument need to be passed to func
  385. *
  386. * Return: None
  387. */
  388. void dp_mcast_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc,
  389. dp_ptnr_soc_iter_func func,
  390. void *arg);
  391. /*
  392. * dp_mlo_get_mcast_primary_vdev- get ref to mcast primary vdev
  393. * @be_soc: dp_soc_be pointer
  394. * @be_vdev: dp_vdev_be pointer
  395. * @mod_id: module id
  396. *
  397. * Return: mcast primary DP VDEV handle on success, NULL on failure
  398. */
  399. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  400. struct dp_vdev_be *be_vdev,
  401. enum dp_mod_id mod_id);
  402. #endif
  403. #else
  404. typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
  405. static inline dp_mld_peer_hash_obj_t
  406. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  407. {
  408. return dp_get_be_soc_from_dp_soc(soc);
  409. }
  410. static inline void dp_clr_mlo_ptnr_list(struct dp_soc *soc,
  411. struct dp_vdev *vdev)
  412. {
  413. }
  414. #endif
  415. /*
  416. * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
  417. *
  418. * @mld_hash_obj: Peer has object
  419. * @hash_elems: number of entries in hash table
  420. *
  421. * return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
  422. */
  423. QDF_STATUS
  424. dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
  425. int hash_elems);
  426. /*
  427. * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
  428. *
  429. * @mld_hash_obj: Peer has object
  430. *
  431. * return: void
  432. */
  433. void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
  434. /**
  435. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  436. * @pdev: dp_pdev pointer
  437. *
  438. * Return: dp_pdev_be pointer
  439. */
  440. static inline
  441. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  442. {
  443. return (struct dp_pdev_be *)pdev;
  444. }
  445. #ifdef QCA_MONITOR_2_0_SUPPORT
  446. /**
  447. * dp_get_be_mon_pdev_from_dp_mon_pdev() - get dp_mon_pdev_be from dp_mon_pdev
  448. * @pdev: dp_mon_pdev pointer
  449. *
  450. * Return: dp_mon_pdev_be pointer
  451. */
  452. static inline
  453. struct dp_mon_pdev_be *dp_get_be_mon_pdev_from_dp_mon_pdev(struct dp_mon_pdev *mon_pdev)
  454. {
  455. return (struct dp_mon_pdev_be *)mon_pdev;
  456. }
  457. #endif
  458. /**
  459. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  460. * @vdev: dp_vdev pointer
  461. *
  462. * Return: dp_vdev_be pointer
  463. */
  464. static inline
  465. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  466. {
  467. return (struct dp_vdev_be *)vdev;
  468. }
  469. /**
  470. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  471. * @peer: dp_peer pointer
  472. *
  473. * Return: dp_peer_be pointer
  474. */
  475. static inline
  476. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  477. {
  478. return (struct dp_peer_be *)peer;
  479. }
  480. QDF_STATUS
  481. dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
  482. struct dp_hw_cookie_conversion_t *cc_ctx,
  483. uint32_t num_descs,
  484. enum dp_desc_type desc_type,
  485. uint8_t desc_pool_id);
  486. QDF_STATUS
  487. dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
  488. struct dp_hw_cookie_conversion_t *cc_ctx);
  489. QDF_STATUS
  490. dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
  491. struct dp_hw_cookie_conversion_t *cc_ctx);
  492. QDF_STATUS
  493. dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
  494. struct dp_hw_cookie_conversion_t *cc_ctx);
  495. /**
  496. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  497. * @be_soc: beryllium soc handler
  498. * @list_head: pointer to page desc head
  499. * @list_tail: pointer to page desc tail
  500. * @num_desc: number of TX/RX Descs required for SPT pages
  501. *
  502. * Return: number of SPT page Desc allocated
  503. */
  504. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  505. struct dp_spt_page_desc **list_head,
  506. struct dp_spt_page_desc **list_tail,
  507. uint16_t num_desc);
  508. /**
  509. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  510. * @be_soc: beryllium soc handler
  511. * @list_head: pointer to page desc head
  512. * @list_tail: pointer to page desc tail
  513. * @page_nums: number of page desc freed back to pool
  514. */
  515. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  516. struct dp_spt_page_desc **list_head,
  517. struct dp_spt_page_desc **list_tail,
  518. uint16_t page_nums);
  519. /**
  520. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  521. DDR page 4K aligned or not
  522. * @ppt_index: offset index in primary page table
  523. * @spt_index: offset index in sceondary DDR page
  524. *
  525. * Generate SW cookie ID to match as HW expected
  526. *
  527. * Return: cookie ID
  528. */
  529. static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
  530. uint16_t spt_index)
  531. {
  532. /*
  533. * for 4k aligned case, cmem entry size is 4 bytes,
  534. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  535. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  536. * exactly same with original ppt_index value.
  537. * for 4k un-aligned case, cmem entry size is 8 bytes.
  538. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  539. */
  540. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  541. spt_index);
  542. }
  543. /**
  544. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  545. * @be_soc: be soc handle
  546. * @desc_id: TX/RX Dess ID
  547. *
  548. * Return: TX/RX Desc virtual address
  549. */
  550. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  551. uint32_t desc_id)
  552. {
  553. struct dp_soc_be *be_soc;
  554. uint16_t ppt_page_id, spt_va_id;
  555. uint8_t *spt_page_va;
  556. be_soc = dp_get_be_soc_from_dp_soc(soc);
  557. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  558. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  559. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  560. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  561. /*
  562. * ppt index in cmem is same order where the page in the
  563. * page desc array during initialization.
  564. * entry size in DDR page is 64 bits, for 32 bits system,
  565. * only lower 32 bits VA value is needed.
  566. */
  567. spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
  568. return (*((uintptr_t *)(spt_page_va +
  569. spt_va_id * DP_CC_HW_READ_BYTES)));
  570. }
  571. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  572. /**
  573. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  574. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  575. * of processing the entries in SRNG
  576. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  577. * of processing the entries in SRNG
  578. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  579. * condition and drastic steps need to be taken for processing
  580. * the entries in SRNG
  581. */
  582. enum dp_srng_near_full_levels {
  583. DP_SRNG_THRESH_SAFE,
  584. DP_SRNG_THRESH_NEAR_FULL,
  585. DP_SRNG_THRESH_CRITICAL,
  586. };
  587. /**
  588. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  589. * its corresponding near-full irq handler
  590. * @soc: Datapath SoC handle
  591. * @dp_srng: datapath handle for this SRNG
  592. *
  593. * Return: 1, if the srng was marked as near-full
  594. * 0, if the srng was not marked as near-full
  595. */
  596. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  597. struct dp_srng *dp_srng)
  598. {
  599. return qdf_atomic_read(&dp_srng->near_full);
  600. }
  601. /**
  602. * dp_srng_get_near_full_level() - Check the num available entries in the
  603. * consumer srng and return the level of the srng
  604. * near full state.
  605. * @soc: Datapath SoC Handle [To be validated by the caller]
  606. * @hal_ring_hdl: SRNG handle
  607. *
  608. * Return: near-full level
  609. */
  610. static inline int
  611. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  612. {
  613. uint32_t num_valid;
  614. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  615. dp_srng->hal_srng,
  616. true);
  617. if (num_valid > dp_srng->crit_thresh)
  618. return DP_SRNG_THRESH_CRITICAL;
  619. else if (num_valid < dp_srng->safe_thresh)
  620. return DP_SRNG_THRESH_SAFE;
  621. else
  622. return DP_SRNG_THRESH_NEAR_FULL;
  623. }
  624. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  625. /**
  626. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  627. * the reap_limit and flags to reflect the state.
  628. * @soc: Datapath soc handle
  629. * @srng: Datapath handle for the srng
  630. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  631. * per the near-full state
  632. *
  633. * Return: 1, if the srng is near full
  634. * 0, if the srng is not near full
  635. */
  636. static inline int
  637. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  638. struct dp_srng *srng,
  639. int *max_reap_limit)
  640. {
  641. int ring_near_full = 0, near_full_level;
  642. if (dp_srng_check_ring_near_full(soc, srng)) {
  643. near_full_level = dp_srng_get_near_full_level(soc, srng);
  644. switch (near_full_level) {
  645. case DP_SRNG_THRESH_CRITICAL:
  646. /* Currently not doing anything special here */
  647. fallthrough;
  648. case DP_SRNG_THRESH_NEAR_FULL:
  649. ring_near_full = 1;
  650. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  651. break;
  652. case DP_SRNG_THRESH_SAFE:
  653. qdf_atomic_set(&srng->near_full, 0);
  654. ring_near_full = 0;
  655. break;
  656. default:
  657. qdf_assert(0);
  658. break;
  659. }
  660. }
  661. return ring_near_full;
  662. }
  663. #else
  664. static inline int
  665. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  666. struct dp_srng *srng,
  667. int *max_reap_limit)
  668. {
  669. return 0;
  670. }
  671. #endif
  672. static inline
  673. uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
  674. enum dp_desc_type desc_type)
  675. {
  676. switch (desc_type) {
  677. case DP_TX_DESC_TYPE:
  678. return (DP_TX_DESC_CMEM_OFFSET +
  679. (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
  680. case DP_RX_DESC_BUF_TYPE:
  681. return (DP_RX_DESC_CMEM_OFFSET +
  682. ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
  683. DP_RX_DESC_POOL_CMEM_SIZE);
  684. default:
  685. QDF_BUG(0);
  686. }
  687. return 0;
  688. }
  689. #ifndef WLAN_MLO_MULTI_CHIP
  690. static inline
  691. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  692. struct cdp_soc_attach_params *params)
  693. {
  694. }
  695. static inline
  696. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  697. struct cdp_pdev_attach_params *params)
  698. {
  699. }
  700. static inline
  701. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  702. {
  703. }
  704. static inline
  705. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  706. {
  707. }
  708. #endif
  709. /*
  710. * dp_txrx_set_vdev_param_be: target specific ops while setting vdev params
  711. * @soc : DP soc handle
  712. * @vdev: pointer to vdev structure
  713. * @param: parameter type to get value
  714. * @val: value
  715. *
  716. * return: QDF_STATUS
  717. */
  718. QDF_STATUS dp_txrx_set_vdev_param_be(struct dp_soc *soc,
  719. struct dp_vdev *vdev,
  720. enum cdp_vdev_param_type param,
  721. cdp_config_param_type val);
  722. #endif