dp_be.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef __DP_BE_H
  19. #define __DP_BE_H
  20. #include <dp_types.h>
  21. #include <hal_be_tx.h>
  22. /* maximum number of entries in one page of secondary page table */
  23. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  24. /* maximum number of entries in primary page table */
  25. #define DP_CC_PPT_MAX_ENTRIES 1024
  26. /* cookie conversion required CMEM offset from CMEM pool */
  27. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  28. /* cookie conversion primary page table size 4K */
  29. #define DP_CC_PPT_MEM_SIZE 4096
  30. /* FST required CMEM offset from CMEM pool */
  31. #define DP_FST_MEM_OFFSET_IN_CMEM \
  32. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  33. /* lower 9 bits in Desc ID for offset in page of SPT */
  34. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  35. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  36. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  37. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  38. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  39. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  40. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  41. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  42. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  43. /*
  44. * page 4K unaligned case, single SPT page physical address
  45. * need 8 bytes in PPT
  46. */
  47. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  48. /*
  49. * page 4K aligned case, single SPT page physical address
  50. * need 4 bytes in PPT
  51. */
  52. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  53. /* 4K aligned case, number of bits HW append for one PPT entry value */
  54. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  55. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  56. /* WBM2SW ring id for rx release */
  57. #define WBM2SW_REL_ERR_RING_NUM 3
  58. #else
  59. /* WBM2SW ring id for rx release */
  60. #define WBM2SW_REL_ERR_RING_NUM 5
  61. #endif
  62. /**
  63. * struct dp_spt_page_desc - secondary page table page descriptors
  64. * @next: pointer to next linked SPT page Desc
  65. * @page_v_addr: page virtual address
  66. * @page_p_addr: page physical address
  67. * @ppt_index: entry index in primary page table where this page physical
  68. address stored
  69. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  70. */
  71. struct dp_spt_page_desc {
  72. struct dp_spt_page_desc *next;
  73. uint8_t *page_v_addr;
  74. qdf_dma_addr_t page_p_addr;
  75. uint16_t ppt_index;
  76. uint16_t avail_entry_index;
  77. };
  78. /**
  79. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  80. * @cmem_base: CMEM base address for primary page table setup
  81. * @total_page_num: total DDR page allocated
  82. * @free_page_num: available DDR page number for TX/RX Desc ID initialization
  83. * @page_desc_freelist: available page Desc list
  84. * @page_desc_base: page Desc buffer base address.
  85. * @page_pool: DDR pages pool
  86. * @cc_lock: locks for page acquiring/free
  87. */
  88. struct dp_hw_cookie_conversion_t {
  89. uint32_t cmem_base;
  90. uint32_t total_page_num;
  91. uint32_t free_page_num;
  92. struct dp_spt_page_desc *page_desc_freelist;
  93. struct dp_spt_page_desc *page_desc_base;
  94. struct qdf_mem_multi_page_t page_pool;
  95. qdf_spinlock_t cc_lock;
  96. };
  97. /**
  98. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  99. * @spt_page_list_head: head of SPT page descriptor list
  100. * @spt_page_list_tail: tail of SPT page descriptor list
  101. * @num_spt_pages: number of SPT page descriptor allocated
  102. */
  103. struct dp_spt_page_desc_list {
  104. struct dp_spt_page_desc *spt_page_list_head;
  105. struct dp_spt_page_desc *spt_page_list_tail;
  106. uint16_t num_spt_pages;
  107. };
  108. /* HW reading 8 bytes for VA */
  109. #define DP_CC_HW_READ_BYTES 8
  110. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  111. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  112. = (uintptr_t)(_desc_va); }
  113. /**
  114. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  115. * @is_configured: flag indicating if this bank is configured
  116. * @ref_count: ref count indicating number of users of the bank
  117. * @bank_config: HAL TX bank configuration
  118. */
  119. struct dp_tx_bank_profile {
  120. uint8_t is_configured;
  121. qdf_atomic_t ref_count;
  122. union hal_tx_bank_config bank_config;
  123. };
  124. /**
  125. * struct dp_soc_be - Extended DP soc for BE targets
  126. * @soc: dp soc structure
  127. * @num_bank_profiles: num TX bank profiles
  128. * @bank_profiles: bank profiles for various TX banks
  129. * @hw_cc_ctx: core context of HW cookie conversion
  130. * @tx_spt_page_desc: spt page desc allocated for TX desc pool
  131. * @rx_spt_page_desc: spt page desc allocated for RX desc pool
  132. * @monitor_soc_be: BE specific monitor object
  133. */
  134. struct dp_soc_be {
  135. struct dp_soc soc;
  136. uint8_t num_bank_profiles;
  137. qdf_mutex_t tx_bank_lock;
  138. struct dp_tx_bank_profile *bank_profiles;
  139. struct dp_hw_cookie_conversion_t hw_cc_ctx;
  140. struct dp_spt_page_desc_list tx_spt_page_desc[MAX_TXDESC_POOLS];
  141. struct dp_spt_page_desc_list rx_spt_page_desc[MAX_RXDESC_POOLS];
  142. #ifdef WLAN_SUPPORT_PPEDS
  143. struct dp_srng reo2ppe_ring;
  144. struct dp_srng ppe2tcl_ring;
  145. struct dp_srng ppe_release_ring;
  146. #endif
  147. #if !defined(DISABLE_MON_CONFIG)
  148. struct dp_mon_soc_be *monitor_soc_be;
  149. #endif
  150. };
  151. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  152. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  153. /**
  154. * struct dp_pdev_be - Extended DP pdev for BE targets
  155. * @pdev: dp pdev structure
  156. * @monitor_pdev_be: BE specific monitor object
  157. */
  158. struct dp_pdev_be {
  159. struct dp_pdev pdev;
  160. #if !defined(DISABLE_MON_CONFIG)
  161. struct dp_mon_pdev_be *monitor_pdev_be;
  162. #endif
  163. };
  164. /**
  165. * struct dp_vdev_be - Extended DP vdev for BE targets
  166. * @vdev: dp vdev structure
  167. * @bank_id: bank_id to be used for TX
  168. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  169. */
  170. struct dp_vdev_be {
  171. struct dp_vdev vdev;
  172. int8_t bank_id;
  173. uint8_t vdev_id_check_en;
  174. };
  175. /**
  176. * struct dp_peer_be - Extended DP peer for BE targets
  177. * @dp_peer: dp peer structure
  178. */
  179. struct dp_peer_be {
  180. struct dp_peer peer;
  181. };
  182. /**
  183. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  184. *
  185. * Return: value in bytes for BE specific soc structure
  186. */
  187. qdf_size_t dp_get_soc_context_size_be(void);
  188. /**
  189. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  190. * @arch_ops: arch ops pointer
  191. *
  192. * Return: none
  193. */
  194. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  195. /**
  196. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  197. * @arch_ops: arch ops pointer
  198. *
  199. * Return: size in bytes for the context_type
  200. */
  201. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  202. /**
  203. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  204. * @soc: dp_soc pointer
  205. *
  206. * Return: dp_soc_be pointer
  207. */
  208. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  209. {
  210. return (struct dp_soc_be *)soc;
  211. }
  212. /**
  213. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  214. * @pdev: dp_pdev pointer
  215. *
  216. * Return: dp_pdev_be pointer
  217. */
  218. static inline
  219. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  220. {
  221. return (struct dp_pdev_be *)pdev;
  222. }
  223. /**
  224. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  225. * @vdev: dp_vdev pointer
  226. *
  227. * Return: dp_vdev_be pointer
  228. */
  229. static inline
  230. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  231. {
  232. return (struct dp_vdev_be *)vdev;
  233. }
  234. /**
  235. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  236. * @peer: dp_peer pointer
  237. *
  238. * Return: dp_peer_be pointer
  239. */
  240. static inline
  241. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  242. {
  243. return (struct dp_peer_be *)peer;
  244. }
  245. /**
  246. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  247. * @be_soc: beryllium soc handler
  248. * @list_head: pointer to page desc head
  249. * @list_tail: pointer to page desc tail
  250. * @num_desc: number of TX/RX Descs required for SPT pages
  251. *
  252. * Return: number of SPT page Desc allocated
  253. */
  254. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  255. struct dp_spt_page_desc **list_head,
  256. struct dp_spt_page_desc **list_tail,
  257. uint16_t num_desc);
  258. /**
  259. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  260. * @be_soc: beryllium soc handler
  261. * @list_head: pointer to page desc head
  262. * @list_tail: pointer to page desc tail
  263. * @page_nums: number of page desc freed back to pool
  264. */
  265. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  266. struct dp_spt_page_desc **list_head,
  267. struct dp_spt_page_desc **list_tail,
  268. uint16_t page_nums);
  269. /**
  270. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  271. DDR page 4K aligned or not
  272. * @ppt_index: offset index in primary page table
  273. * @spt_index: offset index in sceondary DDR page
  274. *
  275. * Generate SW cookie ID to match as HW expected
  276. *
  277. * Return: cookie ID
  278. */
  279. static inline uint32_t dp_cc_desc_id_generate(uint16_t ppt_index,
  280. uint16_t spt_index)
  281. {
  282. /*
  283. * for 4k aligned case, cmem entry size is 4 bytes,
  284. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  285. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  286. * exactly same with original ppt_index value.
  287. * for 4k un-aligned case, cmem entry size is 8 bytes.
  288. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  289. */
  290. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  291. spt_index);
  292. }
  293. /**
  294. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  295. * @be_soc: be soc handle
  296. * @desc_id: TX/RX Dess ID
  297. *
  298. * Return: TX/RX Desc virtual address
  299. */
  300. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  301. uint32_t desc_id)
  302. {
  303. struct dp_soc_be *be_soc;
  304. struct dp_hw_cookie_conversion_t *cc_ctx;
  305. uint16_t ppt_page_id, spt_va_id;
  306. uint8_t *spt_page_va;
  307. be_soc = dp_get_be_soc_from_dp_soc(soc);
  308. cc_ctx = &be_soc->hw_cc_ctx;
  309. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  310. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  311. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  312. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  313. /*
  314. * ppt index in cmem is same order where the page in the
  315. * page desc array during initialization.
  316. * entry size in DDR page is 64 bits, for 32 bits system,
  317. * only lower 32 bits VA value is needed.
  318. */
  319. spt_page_va = cc_ctx->page_desc_base[ppt_page_id].page_v_addr;
  320. return (*((uintptr_t *)(spt_page_va +
  321. spt_va_id * DP_CC_HW_READ_BYTES)));
  322. }
  323. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  324. /**
  325. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  326. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  327. * of processing the entries in SRNG
  328. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  329. * of processing the entries in SRNG
  330. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  331. * condition and drastic steps need to be taken for processing
  332. * the entries in SRNG
  333. */
  334. enum dp_srng_near_full_levels {
  335. DP_SRNG_THRESH_SAFE,
  336. DP_SRNG_THRESH_NEAR_FULL,
  337. DP_SRNG_THRESH_CRITICAL,
  338. };
  339. /**
  340. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  341. * its corresponding near-full irq handler
  342. * @soc: Datapath SoC handle
  343. * @dp_srng: datapath handle for this SRNG
  344. *
  345. * Return: 1, if the srng was marked as near-full
  346. * 0, if the srng was not marked as near-full
  347. */
  348. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  349. struct dp_srng *dp_srng)
  350. {
  351. return qdf_atomic_read(&dp_srng->near_full);
  352. }
  353. /**
  354. * dp_srng_get_near_full_level() - Check the num available entries in the
  355. * consumer srng and return the level of the srng
  356. * near full state.
  357. * @soc: Datapath SoC Handle [To be validated by the caller]
  358. * @hal_ring_hdl: SRNG handle
  359. *
  360. * Return: near-full level
  361. */
  362. static inline int
  363. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  364. {
  365. uint32_t num_valid;
  366. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  367. dp_srng->hal_srng,
  368. true);
  369. if (num_valid > dp_srng->crit_thresh)
  370. return DP_SRNG_THRESH_CRITICAL;
  371. else if (num_valid < dp_srng->safe_thresh)
  372. return DP_SRNG_THRESH_SAFE;
  373. else
  374. return DP_SRNG_THRESH_NEAR_FULL;
  375. }
  376. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  377. /**
  378. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  379. * the reap_limit and flags to reflect the state.
  380. * @soc: Datapath soc handle
  381. * @srng: Datapath handle for the srng
  382. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  383. * per the near-full state
  384. *
  385. * Return: 1, if the srng is near full
  386. * 0, if the srng is not near full
  387. */
  388. static inline int
  389. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  390. struct dp_srng *srng,
  391. int *max_reap_limit)
  392. {
  393. int ring_near_full = 0, near_full_level;
  394. if (dp_srng_check_ring_near_full(soc, srng)) {
  395. near_full_level = dp_srng_get_near_full_level(soc, srng);
  396. switch (near_full_level) {
  397. case DP_SRNG_THRESH_CRITICAL:
  398. /* Currently not doing anything special here */
  399. /* fall through */
  400. case DP_SRNG_THRESH_NEAR_FULL:
  401. ring_near_full = 1;
  402. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  403. break;
  404. case DP_SRNG_THRESH_SAFE:
  405. qdf_atomic_set(&srng->near_full, 0);
  406. ring_near_full = 0;
  407. break;
  408. default:
  409. qdf_assert(0);
  410. break;
  411. }
  412. }
  413. return ring_near_full;
  414. }
  415. #else
  416. static inline int
  417. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  418. struct dp_srng *srng,
  419. int *max_reap_limit)
  420. {
  421. return 0;
  422. }
  423. #endif
  424. #endif