dp_be.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef __DP_BE_H
  19. #define __DP_BE_H
  20. #include <dp_types.h>
  21. #include <hal_be_tx.h>
  22. /* maximum number of entries in one page of secondary page table */
  23. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  24. /* maximum number of entries in primary page table */
  25. #define DP_CC_PPT_MAX_ENTRIES 1024
  26. /* cookie conversion required CMEM offset from CMEM pool */
  27. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  28. /* cookie conversion primary page table size 4K */
  29. #define DP_CC_PPT_MEM_SIZE 4096
  30. /* FST required CMEM offset from CMEM pool */
  31. #define DP_FST_MEM_OFFSET_IN_CMEM \
  32. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  33. /* lower 9 bits in Desc ID for offset in page of SPT */
  34. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  35. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  36. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  37. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  38. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  39. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  40. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  41. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  42. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  43. /*
  44. * page 4K unaligned case, single SPT page physical address
  45. * need 8 bytes in PPT
  46. */
  47. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  48. /*
  49. * page 4K aligned case, single SPT page physical address
  50. * need 4 bytes in PPT
  51. */
  52. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  53. /* 4K aligned case, number of bits HW append for one PPT entry value */
  54. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  55. /**
  56. * struct dp_spt_page_desc - secondary page table page descriptors
  57. * @next: pointer to next linked SPT page Desc
  58. * @page_v_addr: page virtual address
  59. * @page_p_addr: page physical address
  60. * @ppt_index: entry index in primary page table where this page physical
  61. address stored
  62. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  63. */
  64. struct dp_spt_page_desc {
  65. struct dp_spt_page_desc *next;
  66. uint8_t *page_v_addr;
  67. qdf_dma_addr_t page_p_addr;
  68. uint16_t ppt_index;
  69. uint16_t avail_entry_index;
  70. };
  71. /**
  72. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  73. * @cmem_base: CMEM base address for primary page table setup
  74. * @total_page_num: total DDR page allocated
  75. * @free_page_num: available DDR page number for TX/RX Desc ID initialization
  76. * @page_desc_freelist: available page Desc list
  77. * @page_desc_base: page Desc buffer base address.
  78. * @page_pool: DDR pages pool
  79. * @cc_lock: locks for page acquiring/free
  80. */
  81. struct dp_hw_cookie_conversion_t {
  82. uint32_t cmem_base;
  83. uint32_t total_page_num;
  84. uint32_t free_page_num;
  85. struct dp_spt_page_desc *page_desc_freelist;
  86. struct dp_spt_page_desc *page_desc_base;
  87. struct qdf_mem_multi_page_t page_pool;
  88. qdf_spinlock_t cc_lock;
  89. };
  90. /**
  91. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  92. * @spt_page_list_head: head of SPT page descriptor list
  93. * @spt_page_list_tail: tail of SPT page descriptor list
  94. * @num_spt_pages: number of SPT page descriptor allocated
  95. */
  96. struct dp_spt_page_desc_list {
  97. struct dp_spt_page_desc *spt_page_list_head;
  98. struct dp_spt_page_desc *spt_page_list_tail;
  99. uint16_t num_spt_pages;
  100. };
  101. /* HW reading 8 bytes for VA */
  102. #define DP_CC_HW_READ_BYTES 8
  103. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  104. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  105. = (uintptr_t)(_desc_va); }
  106. /**
  107. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  108. * @is_configured: flag indicating if this bank is configured
  109. * @ref_count: ref count indicating number of users of the bank
  110. * @bank_config: HAL TX bank configuration
  111. */
  112. struct dp_tx_bank_profile {
  113. uint8_t is_configured;
  114. qdf_atomic_t ref_count;
  115. union hal_tx_bank_config bank_config;
  116. };
  117. /**
  118. * struct dp_soc_be - Extended DP soc for BE targets
  119. * @soc: dp soc structure
  120. * @num_bank_profiles: num TX bank profiles
  121. * @bank_profiles: bank profiles for various TX banks
  122. * @hw_cc_ctx: core context of HW cookie conversion
  123. * @tx_spt_page_desc: spt page desc allocated for TX desc pool
  124. * @rx_spt_page_desc: spt page desc allocated for RX desc pool
  125. * @monitor_soc_be: BE specific monitor object
  126. */
  127. struct dp_soc_be {
  128. struct dp_soc soc;
  129. uint8_t num_bank_profiles;
  130. qdf_mutex_t tx_bank_lock;
  131. struct dp_tx_bank_profile *bank_profiles;
  132. struct dp_hw_cookie_conversion_t hw_cc_ctx;
  133. struct dp_spt_page_desc_list tx_spt_page_desc[MAX_TXDESC_POOLS];
  134. struct dp_spt_page_desc_list rx_spt_page_desc[MAX_RXDESC_POOLS];
  135. #ifdef WLAN_SUPPORT_PPEDS
  136. struct dp_srng reo2ppe_ring;
  137. struct dp_srng ppe2tcl_ring;
  138. struct dp_srng ppe_release_ring;
  139. #endif
  140. #if !defined(DISABLE_MON_CONFIG)
  141. struct dp_mon_soc_be *monitor_soc_be;
  142. #endif
  143. };
  144. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  145. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  146. /**
  147. * struct dp_pdev_be - Extended DP pdev for BE targets
  148. * @pdev: dp pdev structure
  149. * @monitor_pdev_be: BE specific monitor object
  150. */
  151. struct dp_pdev_be {
  152. struct dp_pdev pdev;
  153. #if !defined(DISABLE_MON_CONFIG)
  154. struct dp_mon_pdev_be *monitor_pdev_be;
  155. #endif
  156. };
  157. /**
  158. * struct dp_vdev_be - Extended DP vdev for BE targets
  159. * @vdev: dp vdev structure
  160. * @bank_id: bank_id to be used for TX
  161. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  162. */
  163. struct dp_vdev_be {
  164. struct dp_vdev vdev;
  165. int8_t bank_id;
  166. uint8_t vdev_id_check_en;
  167. };
  168. /**
  169. * struct dp_peer_be - Extended DP peer for BE targets
  170. * @dp_peer: dp peer structure
  171. */
  172. struct dp_peer_be {
  173. struct dp_peer peer;
  174. };
  175. /**
  176. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  177. *
  178. * Return: value in bytes for BE specific soc structure
  179. */
  180. qdf_size_t dp_get_soc_context_size_be(void);
  181. /**
  182. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  183. * @arch_ops: arch ops pointer
  184. *
  185. * Return: none
  186. */
  187. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  188. /**
  189. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  190. * @arch_ops: arch ops pointer
  191. *
  192. * Return: size in bytes for the context_type
  193. */
  194. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  195. /**
  196. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  197. * @soc: dp_soc pointer
  198. *
  199. * Return: dp_soc_be pointer
  200. */
  201. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  202. {
  203. return (struct dp_soc_be *)soc;
  204. }
  205. /**
  206. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  207. * @pdev: dp_pdev pointer
  208. *
  209. * Return: dp_pdev_be pointer
  210. */
  211. static inline
  212. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  213. {
  214. return (struct dp_pdev_be *)pdev;
  215. }
  216. /**
  217. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  218. * @vdev: dp_vdev pointer
  219. *
  220. * Return: dp_vdev_be pointer
  221. */
  222. static inline
  223. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  224. {
  225. return (struct dp_vdev_be *)vdev;
  226. }
  227. /**
  228. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  229. * @peer: dp_peer pointer
  230. *
  231. * Return: dp_peer_be pointer
  232. */
  233. static inline
  234. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  235. {
  236. return (struct dp_peer_be *)peer;
  237. }
  238. /**
  239. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  240. * @be_soc: beryllium soc handler
  241. * @list_head: pointer to page desc head
  242. * @list_tail: pointer to page desc tail
  243. * @num_desc: number of TX/RX Descs required for SPT pages
  244. *
  245. * Return: number of SPT page Desc allocated
  246. */
  247. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  248. struct dp_spt_page_desc **list_head,
  249. struct dp_spt_page_desc **list_tail,
  250. uint16_t num_desc);
  251. /**
  252. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  253. * @be_soc: beryllium soc handler
  254. * @list_head: pointer to page desc head
  255. * @list_tail: pointer to page desc tail
  256. * @page_nums: number of page desc freed back to pool
  257. */
  258. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  259. struct dp_spt_page_desc **list_head,
  260. struct dp_spt_page_desc **list_tail,
  261. uint16_t page_nums);
  262. /**
  263. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  264. DDR page 4K aligned or not
  265. * @ppt_index: offset index in primary page table
  266. * @spt_index: offset index in sceondary DDR page
  267. *
  268. * Generate SW cookie ID to match as HW expected
  269. *
  270. * Return: cookie ID
  271. */
  272. static inline uint32_t dp_cc_desc_id_generate(uint16_t ppt_index,
  273. uint16_t spt_index)
  274. {
  275. /*
  276. * for 4k aligned case, cmem entry size is 4 bytes,
  277. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  278. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  279. * exactly same with original ppt_index value.
  280. * for 4k un-aligned case, cmem entry size is 8 bytes.
  281. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  282. */
  283. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  284. spt_index);
  285. }
  286. /**
  287. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  288. * @be_soc: be soc handle
  289. * @desc_id: TX/RX Dess ID
  290. *
  291. * Return: TX/RX Desc virtual address
  292. */
  293. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  294. uint32_t desc_id)
  295. {
  296. struct dp_soc_be *be_soc;
  297. struct dp_hw_cookie_conversion_t *cc_ctx;
  298. uint16_t ppt_page_id, spt_va_id;
  299. uint8_t *spt_page_va;
  300. be_soc = dp_get_be_soc_from_dp_soc(soc);
  301. cc_ctx = &be_soc->hw_cc_ctx;
  302. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  303. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  304. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  305. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  306. /*
  307. * ppt index in cmem is same order where the page in the
  308. * page desc array during initialization.
  309. * entry size in DDR page is 64 bits, for 32 bits system,
  310. * only lower 32 bits VA value is needed.
  311. */
  312. spt_page_va = cc_ctx->page_desc_base[ppt_page_id].page_v_addr;
  313. return (*((uintptr_t *)(spt_page_va +
  314. spt_va_id * DP_CC_HW_READ_BYTES)));
  315. }
  316. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  317. /**
  318. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  319. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  320. * of processing the entries in SRNG
  321. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  322. * of processing the entries in SRNG
  323. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  324. * condition and drastic steps need to be taken for processing
  325. * the entries in SRNG
  326. */
  327. enum dp_srng_near_full_levels {
  328. DP_SRNG_THRESH_SAFE,
  329. DP_SRNG_THRESH_NEAR_FULL,
  330. DP_SRNG_THRESH_CRITICAL,
  331. };
  332. /**
  333. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  334. * its corresponding near-full irq handler
  335. * @soc: Datapath SoC handle
  336. * @dp_srng: datapath handle for this SRNG
  337. *
  338. * Return: 1, if the srng was marked as near-full
  339. * 0, if the srng was not marked as near-full
  340. */
  341. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  342. struct dp_srng *dp_srng)
  343. {
  344. return qdf_atomic_read(&dp_srng->near_full);
  345. }
  346. /**
  347. * dp_srng_get_near_full_level() - Check the num available entries in the
  348. * consumer srng and return the level of the srng
  349. * near full state.
  350. * @soc: Datapath SoC Handle [To be validated by the caller]
  351. * @hal_ring_hdl: SRNG handle
  352. *
  353. * Return: near-full level
  354. */
  355. static inline int
  356. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  357. {
  358. uint32_t num_valid;
  359. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  360. dp_srng->hal_srng,
  361. true);
  362. if (num_valid > dp_srng->crit_thresh)
  363. return DP_SRNG_THRESH_CRITICAL;
  364. else if (num_valid < dp_srng->safe_thresh)
  365. return DP_SRNG_THRESH_SAFE;
  366. else
  367. return DP_SRNG_THRESH_NEAR_FULL;
  368. }
  369. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  370. /**
  371. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  372. * the reap_limit and flags to reflect the state.
  373. * @soc: Datapath soc handle
  374. * @srng: Datapath handle for the srng
  375. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  376. * per the near-full state
  377. *
  378. * Return: 1, if the srng is near full
  379. * 0, if the srng is not near full
  380. */
  381. static inline int
  382. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  383. struct dp_srng *srng,
  384. int *max_reap_limit)
  385. {
  386. int ring_near_full = 0, near_full_level;
  387. if (dp_srng_check_ring_near_full(soc, srng)) {
  388. near_full_level = dp_srng_get_near_full_level(soc, srng);
  389. switch (near_full_level) {
  390. case DP_SRNG_THRESH_CRITICAL:
  391. /* Currently not doing anything special here */
  392. /* fall through */
  393. case DP_SRNG_THRESH_NEAR_FULL:
  394. ring_near_full = 1;
  395. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  396. break;
  397. case DP_SRNG_THRESH_SAFE:
  398. qdf_atomic_set(&srng->near_full, 0);
  399. ring_near_full = 0;
  400. break;
  401. default:
  402. qdf_assert(0);
  403. break;
  404. }
  405. }
  406. return ring_near_full;
  407. }
  408. #else
  409. static inline int
  410. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  411. struct dp_srng *srng,
  412. int *max_reap_limit)
  413. {
  414. return 0;
  415. }
  416. #endif
  417. #endif