dp_be.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_BE_H
  20. #define __DP_BE_H
  21. #include <dp_types.h>
  22. #include <hal_be_tx.h>
  23. #ifdef WLAN_MLO_MULTI_CHIP
  24. #include "mlo/dp_mlo.h"
  25. #else
  26. #include <dp_peer.h>
  27. #endif
  28. #ifdef WIFI_MONITOR_SUPPORT
  29. #include <dp_mon.h>
  30. #endif
  31. enum CMEM_MEM_CLIENTS {
  32. COOKIE_CONVERSION,
  33. FISA_FST,
  34. };
  35. /* maximum number of entries in one page of secondary page table */
  36. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  37. /* maximum number of entries in one page of secondary page table */
  38. #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
  39. /* maximum number of entries in primary page table */
  40. #define DP_CC_PPT_MAX_ENTRIES \
  41. DP_CC_PPT_MEM_SIZE / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED
  42. /* cookie conversion required CMEM offset from CMEM pool */
  43. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  44. /* cookie conversion primary page table size 4K */
  45. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  46. #define DP_CC_PPT_MEM_SIZE 4096
  47. #else
  48. #define DP_CC_PPT_MEM_SIZE 8192
  49. #endif
  50. /* FST required CMEM offset from CMEM pool */
  51. #define DP_FST_MEM_OFFSET_IN_CMEM \
  52. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  53. /* CMEM size for FISA FST 16K */
  54. #define DP_CMEM_FST_SIZE 16384
  55. /* lower 9 bits in Desc ID for offset in page of SPT */
  56. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  57. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  58. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  59. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  60. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  61. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  62. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  63. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  64. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  65. /*
  66. * page 4K unaligned case, single SPT page physical address
  67. * need 8 bytes in PPT
  68. */
  69. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  70. /*
  71. * page 4K aligned case, single SPT page physical address
  72. * need 4 bytes in PPT
  73. */
  74. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  75. /* 4K aligned case, number of bits HW append for one PPT entry value */
  76. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  77. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  78. /* WBM2SW ring id for rx release */
  79. #define WBM2SW_REL_ERR_RING_NUM 3
  80. #else
  81. /* WBM2SW ring id for rx release */
  82. #define WBM2SW_REL_ERR_RING_NUM 5
  83. #endif
  84. #ifdef WLAN_SUPPORT_PPEDS
  85. #define DP_PPEDS_STAMODE_ASTIDX_MAP_REG_IDX 1
  86. /* The MAX PPE PRI2TID */
  87. #define DP_TX_INT_PRI2TID_MAX 15
  88. /* size of CMEM needed for a ppeds tx desc pool */
  89. #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \
  90. ((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  91. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  92. /* Offset of ppeds tx descripotor pool */
  93. #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
  94. #define PEER_ROUTING_USE_PPE 1
  95. #define PEER_ROUTING_ENABLED 1
  96. #define DP_PPE_INTR_STRNG_LEN 32
  97. #define DP_PPE_INTR_MAX 3
  98. #else
  99. #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
  100. #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0
  101. #define DP_PPE_INTR_STRNG_LEN 0
  102. #define DP_PPE_INTR_MAX 0
  103. #endif
  104. /* tx descriptor are programmed at start of CMEM region*/
  105. #define DP_TX_DESC_CMEM_OFFSET \
  106. (DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE)
  107. /* size of CMEM needed for a tx desc pool*/
  108. #define DP_TX_DESC_POOL_CMEM_SIZE \
  109. ((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  110. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  111. /* Offset of rx descripotor pool */
  112. #define DP_RX_DESC_CMEM_OFFSET \
  113. DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
  114. /* size of CMEM needed for a rx desc pool */
  115. #define DP_RX_DESC_POOL_CMEM_SIZE \
  116. ((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
  117. DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  118. /* get ppt_id from CMEM_OFFSET */
  119. #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
  120. ((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
  121. /**
  122. * struct dp_spt_page_desc - secondary page table page descriptors
  123. * @page_v_addr: page virtual address
  124. * @page_p_addr: page physical address
  125. * @ppt_index: entry index in primary page table where this page physical
  126. * address stored
  127. */
  128. struct dp_spt_page_desc {
  129. uint8_t *page_v_addr;
  130. qdf_dma_addr_t page_p_addr;
  131. uint32_t ppt_index;
  132. };
  133. /**
  134. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  135. * @cmem_offset: CMEM offset from base address for primary page table setup
  136. * @total_page_num: total DDR page allocated
  137. * @page_desc_freelist: available page Desc list
  138. * @page_desc_base: page Desc buffer base address.
  139. * @page_pool: DDR pages pool
  140. * @cc_lock: locks for page acquiring/free
  141. */
  142. struct dp_hw_cookie_conversion_t {
  143. uint32_t cmem_offset;
  144. uint32_t total_page_num;
  145. struct dp_spt_page_desc *page_desc_base;
  146. struct qdf_mem_multi_page_t page_pool;
  147. qdf_spinlock_t cc_lock;
  148. };
  149. /**
  150. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  151. * @spt_page_list_head: head of SPT page descriptor list
  152. * @spt_page_list_tail: tail of SPT page descriptor list
  153. * @num_spt_pages: number of SPT page descriptor allocated
  154. */
  155. struct dp_spt_page_desc_list {
  156. struct dp_spt_page_desc *spt_page_list_head;
  157. struct dp_spt_page_desc *spt_page_list_tail;
  158. uint16_t num_spt_pages;
  159. };
  160. /* HW reading 8 bytes for VA */
  161. #define DP_CC_HW_READ_BYTES 8
  162. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  163. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  164. = (uintptr_t)(_desc_va); }
  165. /**
  166. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  167. * @is_configured: flag indicating if this bank is configured
  168. * @ref_count: ref count indicating number of users of the bank
  169. * @bank_config: HAL TX bank configuration
  170. */
  171. struct dp_tx_bank_profile {
  172. uint8_t is_configured;
  173. qdf_atomic_t ref_count;
  174. union hal_tx_bank_config bank_config;
  175. };
  176. #ifdef WLAN_SUPPORT_PPEDS
  177. /**
  178. * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
  179. * @is_configured: Boolean that the entry is configured.
  180. */
  181. struct dp_ppe_vp_tbl_entry {
  182. bool is_configured;
  183. };
  184. /**
  185. * struct dp_ppe_vp_search_idx_tbl_entry - PPE Virtual search table entry
  186. * @is_configured: Boolean that the entry is configured.
  187. */
  188. struct dp_ppe_vp_search_idx_tbl_entry {
  189. bool is_configured;
  190. };
  191. /**
  192. * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
  193. * @is_configured: Boolean that the entry is configured.
  194. * @vp_num: Virtual port number
  195. * @ppe_vp_num_idx: Index to the PPE VP table entry
  196. * @search_idx_reg_num: Address search Index register number
  197. * @drop_prec_enable: Drop precedance enable
  198. * @to_fw: To FW exception enable/disable.
  199. * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
  200. */
  201. struct dp_ppe_vp_profile {
  202. bool is_configured;
  203. uint8_t vp_num;
  204. uint8_t ppe_vp_num_idx;
  205. uint8_t search_idx_reg_num;
  206. uint8_t drop_prec_enable;
  207. uint8_t to_fw;
  208. uint8_t use_ppe_int_pri;
  209. };
  210. /**
  211. * struct dp_ppeds_tx_desc_pool_s - PPEDS Tx Descriptor Pool
  212. * @elem_size: Size of each descriptor
  213. * @hot_list_len: Length of hotlist chain
  214. * @num_allocated: Number of used descriptors
  215. * @freelist: Chain of free descriptors
  216. * @hotlist: Chain of descriptors with attached nbufs
  217. * @desc_pages: multiple page allocation information for actual descriptors
  218. * @elem_count: Number of descriptors in the pool
  219. * @num_free: Number of free descriptors
  220. * @lock: Lock for descriptor allocation/free from/to the pool
  221. */
  222. struct dp_ppeds_tx_desc_pool_s {
  223. uint16_t elem_size;
  224. uint32_t num_allocated;
  225. uint32_t hot_list_len;
  226. struct dp_tx_desc_s *freelist;
  227. struct dp_tx_desc_s *hotlist;
  228. struct qdf_mem_multi_page_t desc_pages;
  229. uint16_t elem_count;
  230. uint32_t num_free;
  231. qdf_spinlock_t lock;
  232. };
  233. #endif
  234. /**
  235. * struct dp_ppeds_napi - napi parameters for ppe ds
  236. * @napi: napi structure to register with napi infra
  237. * @ndev: net_dev structure
  238. */
  239. struct dp_ppeds_napi {
  240. struct napi_struct napi;
  241. struct net_device ndev;
  242. };
  243. /*
  244. * NB: intentionally not using kernel-doc comment because the kernel-doc
  245. * script does not handle the TAILQ_HEAD macro
  246. * struct dp_soc_be - Extended DP soc for BE targets
  247. * @soc: dp soc structure
  248. * @num_bank_profiles: num TX bank profiles
  249. * @tx_bank_lock: lock for @bank_profiles
  250. * @bank_profiles: bank profiles for various TX banks
  251. * @page_desc_base:
  252. * @cc_cmem_base: cmem offset reserved for CC
  253. * @tx_cc_ctx: Cookie conversion context for tx desc pools
  254. * @rx_cc_ctx: Cookie conversion context for rx desc pools
  255. * @ppeds_int_mode_enabled: PPE DS interrupt mode enabled
  256. * @ppeds_stopped:
  257. * @reo2ppe_ring: REO2PPE ring
  258. * @ppe2tcl_ring: PPE2TCL ring
  259. * @ppeds_wbm_release_ring:
  260. * @ppe_vp_tbl: PPE VP table
  261. * @ppe_vp_search_idx_tbl: PPE VP search idx table
  262. * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool
  263. * @ppeds_tx_desc: PPEDS tx desc pool
  264. * @ppeds_napi_ctxt:
  265. * @ppeds_handle: PPEDS soc instance handle
  266. * @dp_ppeds_txdesc_hotlist_len: PPEDS tx desc hotlist length
  267. * @ppe_vp_tbl_lock: PPE VP table lock
  268. * @num_ppe_vp_entries: Number of PPE VP entries
  269. * @num_ppe_vp_search_idx_entries: PPEDS VP search idx entries
  270. * @irq_name: PPEDS VP irq names
  271. * @ppeds_stats: PPEDS stats
  272. * @mlo_enabled: Flag to indicate MLO is enabled or not
  273. * @mlo_chip_id: MLO chip_id
  274. * @ml_ctxt: pointer to global ml_context
  275. * @delta_tqm: delta_tqm
  276. * @mlo_tstamp_offset: mlo timestamp offset
  277. * @mld_peer_hash_lock: lock to protect mld_peer_hash
  278. * @mld_peer_hash: peer hash table for ML peers
  279. * @ipa_bank_id: TCL bank id used by IPA
  280. */
  281. struct dp_soc_be {
  282. struct dp_soc soc;
  283. uint8_t num_bank_profiles;
  284. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  285. qdf_mutex_t tx_bank_lock;
  286. #else
  287. qdf_spinlock_t tx_bank_lock;
  288. #endif
  289. struct dp_tx_bank_profile *bank_profiles;
  290. struct dp_spt_page_desc *page_desc_base;
  291. uint32_t cc_cmem_base;
  292. struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
  293. struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
  294. #ifdef WLAN_SUPPORT_PPEDS
  295. uint8_t ppeds_int_mode_enabled:1,
  296. ppeds_stopped:1;
  297. struct dp_srng reo2ppe_ring;
  298. struct dp_srng ppe2tcl_ring;
  299. struct dp_srng ppeds_wbm_release_ring;
  300. struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
  301. struct dp_ppe_vp_search_idx_tbl_entry *ppe_vp_search_idx_tbl;
  302. struct dp_ppe_vp_profile *ppe_vp_profile;
  303. struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx;
  304. struct dp_ppeds_tx_desc_pool_s ppeds_tx_desc;
  305. struct dp_ppeds_napi ppeds_napi_ctxt;
  306. void *ppeds_handle;
  307. int dp_ppeds_txdesc_hotlist_len;
  308. qdf_mutex_t ppe_vp_tbl_lock;
  309. uint8_t num_ppe_vp_entries;
  310. uint8_t num_ppe_vp_search_idx_entries;
  311. uint8_t num_ppe_vp_profiles;
  312. char irq_name[DP_PPE_INTR_MAX][DP_PPE_INTR_STRNG_LEN];
  313. struct {
  314. struct {
  315. uint64_t desc_alloc_failed;
  316. } tx;
  317. } ppeds_stats;
  318. #endif
  319. #ifdef WLAN_FEATURE_11BE_MLO
  320. #ifdef WLAN_MLO_MULTI_CHIP
  321. uint8_t mlo_enabled;
  322. uint8_t mlo_chip_id;
  323. struct dp_mlo_ctxt *ml_ctxt;
  324. uint64_t delta_tqm;
  325. uint64_t mlo_tstamp_offset;
  326. #else
  327. /* Protect mld peer hash table */
  328. DP_MUTEX_TYPE mld_peer_hash_lock;
  329. struct {
  330. uint32_t mask;
  331. uint32_t idx_bits;
  332. TAILQ_HEAD(, dp_peer) * bins;
  333. } mld_peer_hash;
  334. #endif
  335. #endif
  336. #ifdef IPA_OFFLOAD
  337. int8_t ipa_bank_id;
  338. #endif
  339. };
  340. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  341. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  342. /**
  343. * struct dp_pdev_be - Extended DP pdev for BE targets
  344. * @pdev: dp pdev structure
  345. * @monitor_pdev_be: BE specific monitor object
  346. * @mlo_link_id: MLO link id for PDEV
  347. * @delta_tsf2: delta_tsf2
  348. */
  349. struct dp_pdev_be {
  350. struct dp_pdev pdev;
  351. #ifdef WLAN_MLO_MULTI_CHIP
  352. uint8_t mlo_link_id;
  353. uint64_t delta_tsf2;
  354. #endif
  355. };
  356. /**
  357. * struct dp_vdev_be - Extended DP vdev for BE targets
  358. * @vdev: dp vdev structure
  359. * @bank_id: bank_id to be used for TX
  360. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  361. * @partner_vdev_list: partner list used for Intra-BSS
  362. * @mlo_stats: structure to hold stats for mlo unmapped peers
  363. * @seq_num: DP MLO seq number
  364. * @mcast_primary: MLO Mcast primary vdev
  365. */
  366. struct dp_vdev_be {
  367. struct dp_vdev vdev;
  368. int8_t bank_id;
  369. uint8_t vdev_id_check_en;
  370. #ifdef WLAN_MLO_MULTI_CHIP
  371. uint8_t partner_vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
  372. struct cdp_vdev_stats mlo_stats;
  373. #ifdef WLAN_FEATURE_11BE_MLO
  374. #ifdef WLAN_MCAST_MLO
  375. uint16_t seq_num;
  376. bool mcast_primary;
  377. #endif
  378. #endif
  379. #endif
  380. };
  381. /**
  382. * struct dp_peer_be - Extended DP peer for BE targets
  383. * @peer: dp peer structure
  384. * @priority_valid:
  385. */
  386. struct dp_peer_be {
  387. struct dp_peer peer;
  388. #ifdef WLAN_SUPPORT_PPEDS
  389. uint8_t priority_valid;
  390. #endif
  391. };
  392. /**
  393. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  394. *
  395. * Return: value in bytes for BE specific soc structure
  396. */
  397. qdf_size_t dp_get_soc_context_size_be(void);
  398. /**
  399. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  400. * @arch_ops: arch ops pointer
  401. *
  402. * Return: none
  403. */
  404. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  405. /**
  406. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  407. * @context_type: context type for which the size is needed
  408. *
  409. * Return: size in bytes for the context_type
  410. */
  411. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  412. /**
  413. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  414. * @soc: dp_soc pointer
  415. *
  416. * Return: dp_soc_be pointer
  417. */
  418. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  419. {
  420. return (struct dp_soc_be *)soc;
  421. }
  422. /**
  423. * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
  424. * @be_soc: dp_soc_be pointer
  425. * @func: Function to be called for each soc
  426. * @arg: context to be passed to the callback
  427. *
  428. * Return: true if mlo is enabled, false if mlo is disabled
  429. */
  430. bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
  431. void *arg);
  432. #ifdef WLAN_MLO_MULTI_CHIP
  433. typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
  434. /**
  435. * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
  436. * @soc: soc handle
  437. *
  438. * return: MLD peer hash object
  439. */
  440. static inline dp_mld_peer_hash_obj_t
  441. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  442. {
  443. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  444. return be_soc->ml_ctxt;
  445. }
  446. void dp_clr_mlo_ptnr_list(struct dp_soc *soc, struct dp_vdev *vdev);
  447. #if defined(WLAN_FEATURE_11BE_MLO)
  448. /**
  449. * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
  450. * @soc: Soc handle
  451. * @peer: DP peer handle for ML peer
  452. * @peer_id: peer_id
  453. * Return: None
  454. */
  455. void dp_mlo_partner_chips_map(struct dp_soc *soc,
  456. struct dp_peer *peer,
  457. uint16_t peer_id);
  458. /**
  459. * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
  460. * @soc: Soc handle
  461. * @peer_id: peer_id
  462. * Return: None
  463. */
  464. void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
  465. uint16_t peer_id);
  466. #ifdef WLAN_MLO_MULTI_CHIP
  467. typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
  468. struct dp_vdev *ptnr_vdev,
  469. void *arg);
  470. /**
  471. * dp_mlo_iter_ptnr_vdev() - API to iterate through ptnr vdev list
  472. * @be_soc: dp_soc_be pointer
  473. * @be_vdev: dp_vdev_be pointer
  474. * @func: function to be called for each peer
  475. * @arg: argument need to be passed to func
  476. * @mod_id: module id
  477. *
  478. * Return: None
  479. */
  480. void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
  481. struct dp_vdev_be *be_vdev,
  482. dp_ptnr_vdev_iter_func func, void *arg,
  483. enum dp_mod_id mod_id);
  484. #endif
  485. #ifdef WLAN_MCAST_MLO
  486. /**
  487. * dp_mlo_get_mcast_primary_vdev() - get ref to mcast primary vdev
  488. * @be_soc: dp_soc_be pointer
  489. * @be_vdev: dp_vdev_be pointer
  490. * @mod_id: module id
  491. *
  492. * Return: mcast primary DP VDEV handle on success, NULL on failure
  493. */
  494. struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
  495. struct dp_vdev_be *be_vdev,
  496. enum dp_mod_id mod_id);
  497. #endif
  498. #endif
  499. #else
  500. typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
  501. static inline dp_mld_peer_hash_obj_t
  502. dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
  503. {
  504. return dp_get_be_soc_from_dp_soc(soc);
  505. }
  506. static inline void dp_clr_mlo_ptnr_list(struct dp_soc *soc,
  507. struct dp_vdev *vdev)
  508. {
  509. }
  510. #endif
  511. /**
  512. * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
  513. * @mld_hash_obj: Peer has object
  514. * @hash_elems: number of entries in hash table
  515. *
  516. * Return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
  517. */
  518. QDF_STATUS
  519. dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
  520. int hash_elems);
  521. /**
  522. * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
  523. *
  524. * @mld_hash_obj: Peer has object
  525. *
  526. * Return: void
  527. */
  528. void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
  529. /**
  530. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  531. * @pdev: dp_pdev pointer
  532. *
  533. * Return: dp_pdev_be pointer
  534. */
  535. static inline
  536. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  537. {
  538. return (struct dp_pdev_be *)pdev;
  539. }
  540. /**
  541. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  542. * @vdev: dp_vdev pointer
  543. *
  544. * Return: dp_vdev_be pointer
  545. */
  546. static inline
  547. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  548. {
  549. return (struct dp_vdev_be *)vdev;
  550. }
  551. /**
  552. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  553. * @peer: dp_peer pointer
  554. *
  555. * Return: dp_peer_be pointer
  556. */
  557. static inline
  558. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  559. {
  560. return (struct dp_peer_be *)peer;
  561. }
  562. void dp_ppeds_disable_irq(struct dp_soc *soc, struct dp_srng *srng);
  563. void dp_ppeds_enable_irq(struct dp_soc *soc, struct dp_srng *srng);
  564. QDF_STATUS dp_peer_setup_ppeds_be(struct dp_soc *soc, struct dp_peer *peer,
  565. struct dp_vdev_be *be_vdev,
  566. void *args);
  567. QDF_STATUS
  568. dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
  569. struct dp_hw_cookie_conversion_t *cc_ctx,
  570. uint32_t num_descs,
  571. enum qdf_dp_desc_type desc_type,
  572. uint8_t desc_pool_id);
  573. void dp_reo_shared_qaddr_detach(struct dp_soc *soc);
  574. QDF_STATUS
  575. dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
  576. struct dp_hw_cookie_conversion_t *cc_ctx);
  577. QDF_STATUS
  578. dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
  579. struct dp_hw_cookie_conversion_t *cc_ctx);
  580. QDF_STATUS
  581. dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
  582. struct dp_hw_cookie_conversion_t *cc_ctx);
  583. /**
  584. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  585. * @be_soc: beryllium soc handler
  586. * @list_head: pointer to page desc head
  587. * @list_tail: pointer to page desc tail
  588. * @num_desc: number of TX/RX Descs required for SPT pages
  589. *
  590. * Return: number of SPT page Desc allocated
  591. */
  592. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  593. struct dp_spt_page_desc **list_head,
  594. struct dp_spt_page_desc **list_tail,
  595. uint16_t num_desc);
  596. /**
  597. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  598. * @be_soc: beryllium soc handler
  599. * @list_head: pointer to page desc head
  600. * @list_tail: pointer to page desc tail
  601. * @page_nums: number of page desc freed back to pool
  602. */
  603. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  604. struct dp_spt_page_desc **list_head,
  605. struct dp_spt_page_desc **list_tail,
  606. uint16_t page_nums);
  607. /**
  608. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  609. * DDR page 4K aligned or not
  610. * @ppt_index: offset index in primary page table
  611. * @spt_index: offset index in sceondary DDR page
  612. *
  613. * Generate SW cookie ID to match as HW expected
  614. *
  615. * Return: cookie ID
  616. */
  617. static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
  618. uint16_t spt_index)
  619. {
  620. /*
  621. * for 4k aligned case, cmem entry size is 4 bytes,
  622. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  623. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  624. * exactly same with original ppt_index value.
  625. * for 4k un-aligned case, cmem entry size is 8 bytes.
  626. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  627. */
  628. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  629. spt_index);
  630. }
  631. /**
  632. * dp_cc_desc_find() - find TX/RX Descs virtual address by ID
  633. * @soc: be soc handle
  634. * @desc_id: TX/RX Dess ID
  635. *
  636. * Return: TX/RX Desc virtual address
  637. */
  638. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  639. uint32_t desc_id)
  640. {
  641. struct dp_soc_be *be_soc;
  642. uint16_t ppt_page_id, spt_va_id;
  643. uint8_t *spt_page_va;
  644. be_soc = dp_get_be_soc_from_dp_soc(soc);
  645. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  646. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  647. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  648. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  649. /*
  650. * ppt index in cmem is same order where the page in the
  651. * page desc array during initialization.
  652. * entry size in DDR page is 64 bits, for 32 bits system,
  653. * only lower 32 bits VA value is needed.
  654. */
  655. spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
  656. return (*((uintptr_t *)(spt_page_va +
  657. spt_va_id * DP_CC_HW_READ_BYTES)));
  658. }
  659. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  660. /**
  661. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  662. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  663. * of processing the entries in SRNG
  664. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  665. * of processing the entries in SRNG
  666. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  667. * condition and drastic steps need to be taken for processing
  668. * the entries in SRNG
  669. */
  670. enum dp_srng_near_full_levels {
  671. DP_SRNG_THRESH_SAFE,
  672. DP_SRNG_THRESH_NEAR_FULL,
  673. DP_SRNG_THRESH_CRITICAL,
  674. };
  675. /**
  676. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  677. * its corresponding near-full irq handler
  678. * @soc: Datapath SoC handle
  679. * @dp_srng: datapath handle for this SRNG
  680. *
  681. * Return: 1, if the srng was marked as near-full
  682. * 0, if the srng was not marked as near-full
  683. */
  684. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  685. struct dp_srng *dp_srng)
  686. {
  687. return qdf_atomic_read(&dp_srng->near_full);
  688. }
  689. /**
  690. * dp_srng_get_near_full_level() - Check the num available entries in the
  691. * consumer srng and return the level of the srng
  692. * near full state.
  693. * @soc: Datapath SoC Handle [To be validated by the caller]
  694. * @dp_srng: SRNG handle
  695. *
  696. * Return: near-full level
  697. */
  698. static inline int
  699. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  700. {
  701. uint32_t num_valid;
  702. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  703. dp_srng->hal_srng,
  704. true);
  705. if (num_valid > dp_srng->crit_thresh)
  706. return DP_SRNG_THRESH_CRITICAL;
  707. else if (num_valid < dp_srng->safe_thresh)
  708. return DP_SRNG_THRESH_SAFE;
  709. else
  710. return DP_SRNG_THRESH_NEAR_FULL;
  711. }
  712. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  713. /**
  714. * _dp_srng_test_and_update_nf_params() - Test the near full level and update
  715. * the reap_limit and flags to reflect the state.
  716. * @soc: Datapath soc handle
  717. * @srng: Datapath handle for the srng
  718. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  719. * per the near-full state
  720. *
  721. * Return: 1, if the srng is near full
  722. * 0, if the srng is not near full
  723. */
  724. static inline int
  725. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  726. struct dp_srng *srng,
  727. int *max_reap_limit)
  728. {
  729. int ring_near_full = 0, near_full_level;
  730. if (dp_srng_check_ring_near_full(soc, srng)) {
  731. near_full_level = dp_srng_get_near_full_level(soc, srng);
  732. switch (near_full_level) {
  733. case DP_SRNG_THRESH_CRITICAL:
  734. /* Currently not doing anything special here */
  735. fallthrough;
  736. case DP_SRNG_THRESH_NEAR_FULL:
  737. ring_near_full = 1;
  738. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  739. break;
  740. case DP_SRNG_THRESH_SAFE:
  741. qdf_atomic_set(&srng->near_full, 0);
  742. ring_near_full = 0;
  743. break;
  744. default:
  745. qdf_assert(0);
  746. break;
  747. }
  748. }
  749. return ring_near_full;
  750. }
  751. #else
  752. static inline int
  753. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  754. struct dp_srng *srng,
  755. int *max_reap_limit)
  756. {
  757. return 0;
  758. }
  759. #endif
  760. static inline
  761. uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
  762. enum qdf_dp_desc_type desc_type)
  763. {
  764. switch (desc_type) {
  765. case QDF_DP_TX_DESC_TYPE:
  766. return (DP_TX_DESC_CMEM_OFFSET +
  767. (desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
  768. case QDF_DP_RX_DESC_BUF_TYPE:
  769. return (DP_RX_DESC_CMEM_OFFSET +
  770. ((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
  771. DP_RX_DESC_POOL_CMEM_SIZE);
  772. case QDF_DP_TX_PPEDS_DESC_TYPE:
  773. return DP_TX_PPEDS_DESC_CMEM_OFFSET;
  774. default:
  775. QDF_BUG(0);
  776. }
  777. return 0;
  778. }
  779. #ifndef WLAN_MLO_MULTI_CHIP
  780. static inline
  781. void dp_soc_mlo_fill_params(struct dp_soc *soc,
  782. struct cdp_soc_attach_params *params)
  783. {
  784. }
  785. static inline
  786. void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
  787. struct cdp_pdev_attach_params *params)
  788. {
  789. }
  790. static inline
  791. void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
  792. {
  793. }
  794. static inline
  795. void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
  796. {
  797. }
  798. #endif
  799. #endif