dp_be.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef __DP_BE_H
  19. #define __DP_BE_H
  20. #include <dp_types.h>
  21. #include <hal_be_tx.h>
  22. /* maximum number of entries in one page of secondary page table */
  23. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  24. /* maximum number of entries in primary page table */
  25. #define DP_CC_PPT_MAX_ENTRIES 1024
  26. /* cookie conversion required CMEM offset from CMEM pool */
  27. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  28. /* cookie conversion primary page table size 4K */
  29. #define DP_CC_PPT_MEM_SIZE 4096
  30. /* FST required CMEM offset from CMEM pool */
  31. #define DP_FST_MEM_OFFSET_IN_CMEM \
  32. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  33. /* lower 9 bits in Desc ID for offset in page of SPT */
  34. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  35. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  36. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  37. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  38. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  39. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  40. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  41. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  42. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  43. /*
  44. * page 4K unaligned case, single SPT page physical address
  45. * need 8 bytes in PPT
  46. */
  47. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  48. /*
  49. * page 4K aligned case, single SPT page physical address
  50. * need 4 bytes in PPT
  51. */
  52. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  53. /* 4K aligned case, number of bits HW append for one PPT entry value */
  54. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  55. /**
  56. * struct dp_spt_page_desc - secondary page table page descriptors
  57. * @next: pointer to next linked SPT page Desc
  58. * @page_v_addr: page virtual address
  59. * @page_p_addr: page physical address
  60. * @ppt_index: entry index in primary page table where this page physical
  61. address stored
  62. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  63. */
  64. struct dp_spt_page_desc {
  65. struct dp_spt_page_desc *next;
  66. uint8_t *page_v_addr;
  67. qdf_dma_addr_t page_p_addr;
  68. uint16_t ppt_index;
  69. uint16_t avail_entry_index;
  70. };
  71. /**
  72. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  73. * @cmem_base: CMEM base address for primary page table setup
  74. * @total_page_num: total DDR page allocated
  75. * @free_page_num: available DDR page number for TX/RX Desc ID initialization
  76. * @page_desc_freelist: available page Desc list
  77. * @page_desc_base: page Desc buffer base address.
  78. * @page_pool: DDR pages pool
  79. * @cc_lock: locks for page acquiring/free
  80. */
  81. struct dp_hw_cookie_conversion_t {
  82. uint32_t cmem_base;
  83. uint32_t total_page_num;
  84. uint32_t free_page_num;
  85. struct dp_spt_page_desc *page_desc_freelist;
  86. struct dp_spt_page_desc *page_desc_base;
  87. struct qdf_mem_multi_page_t page_pool;
  88. qdf_spinlock_t cc_lock;
  89. };
  90. /**
  91. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  92. * @spt_page_list_head: head of SPT page descriptor list
  93. * @spt_page_list_tail: tail of SPT page descriptor list
  94. * @num_spt_pages: number of SPT page descriptor allocated
  95. */
  96. struct dp_spt_page_desc_list {
  97. struct dp_spt_page_desc *spt_page_list_head;
  98. struct dp_spt_page_desc *spt_page_list_tail;
  99. uint16_t num_spt_pages;
  100. };
  101. /* HW reading 8 bytes for VA */
  102. #define DP_CC_HW_READ_BYTES 8
  103. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  104. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  105. = (uintptr_t)(_desc_va); }
  106. /**
  107. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  108. * @is_configured: flag indicating if this bank is configured
  109. * @ref_count: ref count indicating number of users of the bank
  110. * @bank_config: HAL TX bank configuration
  111. */
  112. struct dp_tx_bank_profile {
  113. uint8_t is_configured;
  114. qdf_atomic_t ref_count;
  115. union hal_tx_bank_config bank_config;
  116. };
  117. /**
  118. * struct dp_soc_be - Extended DP soc for BE targets
  119. * @soc: dp soc structure
  120. * @num_bank_profiles: num TX bank profiles
  121. * @bank_profiles: bank profiles for various TX banks
  122. * @hw_cc_ctx: core context of HW cookie conversion
  123. * @tx_spt_page_desc: spt page desc allocated for TX desc pool
  124. * @rx_spt_page_desc: spt page desc allocated for RX desc pool
  125. */
  126. struct dp_soc_be {
  127. struct dp_soc soc;
  128. uint8_t num_bank_profiles;
  129. qdf_mutex_t tx_bank_lock;
  130. struct dp_tx_bank_profile *bank_profiles;
  131. struct dp_hw_cookie_conversion_t hw_cc_ctx;
  132. struct dp_spt_page_desc_list tx_spt_page_desc[MAX_TXDESC_POOLS];
  133. struct dp_spt_page_desc_list rx_spt_page_desc[MAX_RXDESC_POOLS];
  134. #ifdef WLAN_SUPPORT_PPEDS
  135. struct dp_srng reo2ppe_ring;
  136. struct dp_srng ppe2tcl_ring;
  137. struct dp_srng ppe_release_ring;
  138. #endif
  139. };
  140. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  141. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  142. /**
  143. * struct dp_pdev_be - Extended DP pdev for BE targets
  144. * @pdev: dp pdev structure
  145. */
  146. struct dp_pdev_be {
  147. struct dp_pdev pdev;
  148. };
  149. /**
  150. * struct dp_vdev_be - Extended DP vdev for BE targets
  151. * @vdev: dp vdev structure
  152. * @bank_id: bank_id to be used for TX
  153. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  154. */
  155. struct dp_vdev_be {
  156. struct dp_vdev vdev;
  157. int8_t bank_id;
  158. uint8_t vdev_id_check_en;
  159. };
  160. /**
  161. * struct dp_peer_be - Extended DP peer for BE targets
  162. * @dp_peer: dp peer structure
  163. */
  164. struct dp_peer_be {
  165. struct dp_peer peer;
  166. };
  167. /**
  168. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  169. *
  170. * Return: value in bytes for BE specific soc structure
  171. */
  172. qdf_size_t dp_get_soc_context_size_be(void);
  173. /**
  174. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  175. * @arch_ops: arch ops pointer
  176. *
  177. * Return: none
  178. */
  179. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  180. /**
  181. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  182. * @arch_ops: arch ops pointer
  183. *
  184. * Return: size in bytes for the context_type
  185. */
  186. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  187. /**
  188. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  189. * @soc: dp_soc pointer
  190. *
  191. * Return: dp_soc_be pointer
  192. */
  193. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  194. {
  195. return (struct dp_soc_be *)soc;
  196. }
  197. /**
  198. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  199. * @pdev: dp_pdev pointer
  200. *
  201. * Return: dp_pdev_be pointer
  202. */
  203. static inline
  204. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  205. {
  206. return (struct dp_pdev_be *)pdev;
  207. }
  208. /**
  209. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  210. * @vdev: dp_vdev pointer
  211. *
  212. * Return: dp_vdev_be pointer
  213. */
  214. static inline
  215. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  216. {
  217. return (struct dp_vdev_be *)vdev;
  218. }
  219. /**
  220. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  221. * @peer: dp_peer pointer
  222. *
  223. * Return: dp_peer_be pointer
  224. */
  225. static inline
  226. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  227. {
  228. return (struct dp_peer_be *)peer;
  229. }
  230. /**
  231. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  232. * @be_soc: beryllium soc handler
  233. * @list_head: pointer to page desc head
  234. * @list_tail: pointer to page desc tail
  235. * @num_desc: number of TX/RX Descs required for SPT pages
  236. *
  237. * Return: number of SPT page Desc allocated
  238. */
  239. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  240. struct dp_spt_page_desc **list_head,
  241. struct dp_spt_page_desc **list_tail,
  242. uint16_t num_desc);
  243. /**
  244. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  245. * @be_soc: beryllium soc handler
  246. * @list_head: pointer to page desc head
  247. * @list_tail: pointer to page desc tail
  248. * @page_nums: number of page desc freed back to pool
  249. */
  250. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  251. struct dp_spt_page_desc **list_head,
  252. struct dp_spt_page_desc **list_tail,
  253. uint16_t page_nums);
  254. /**
  255. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  256. DDR page 4K aligned or not
  257. * @ppt_index: offset index in primary page table
  258. * @spt_index: offset index in sceondary DDR page
  259. *
  260. * Generate SW cookie ID to match as HW expected
  261. *
  262. * Return: cookie ID
  263. */
  264. static inline uint32_t dp_cc_desc_id_generate(uint16_t ppt_index,
  265. uint16_t spt_index)
  266. {
  267. /*
  268. * for 4k aligned case, cmem entry size is 4 bytes,
  269. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  270. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  271. * exactly same with original ppt_index value.
  272. * for 4k un-aligned case, cmem entry size is 8 bytes.
  273. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  274. */
  275. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  276. spt_index);
  277. }
  278. /**
  279. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  280. * @be_soc: be soc handle
  281. * @desc_id: TX/RX Dess ID
  282. *
  283. * Return: TX/RX Desc virtual address
  284. */
  285. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  286. uint32_t desc_id)
  287. {
  288. struct dp_soc_be *be_soc;
  289. struct dp_hw_cookie_conversion_t *cc_ctx;
  290. uint16_t ppt_page_id, spt_va_id;
  291. uint8_t *spt_page_va;
  292. be_soc = dp_get_be_soc_from_dp_soc(soc);
  293. cc_ctx = &be_soc->hw_cc_ctx;
  294. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  295. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  296. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  297. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  298. /*
  299. * ppt index in cmem is same order where the page in the
  300. * page desc array during initialization.
  301. * entry size in DDR page is 64 bits, for 32 bits system,
  302. * only lower 32 bits VA value is needed.
  303. */
  304. spt_page_va = cc_ctx->page_desc_base[ppt_page_id].page_v_addr;
  305. return (*((uintptr_t *)(spt_page_va +
  306. spt_va_id * DP_CC_HW_READ_BYTES)));
  307. }
  308. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  309. /**
  310. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  311. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  312. * of processing the entries in SRNG
  313. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  314. * of processing the entries in SRNG
  315. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  316. * condition and drastic steps need to be taken for processing
  317. * the entries in SRNG
  318. */
  319. enum dp_srng_near_full_levels {
  320. DP_SRNG_THRESH_SAFE,
  321. DP_SRNG_THRESH_NEAR_FULL,
  322. DP_SRNG_THRESH_CRITICAL,
  323. };
  324. /**
  325. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  326. * its corresponding near-full irq handler
  327. * @soc: Datapath SoC handle
  328. * @dp_srng: datapath handle for this SRNG
  329. *
  330. * Return: 1, if the srng was marked as near-full
  331. * 0, if the srng was not marked as near-full
  332. */
  333. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  334. struct dp_srng *dp_srng)
  335. {
  336. return qdf_atomic_read(&dp_srng->near_full);
  337. }
  338. /**
  339. * dp_srng_get_near_full_level() - Check the num available entries in the
  340. * consumer srng and return the level of the srng
  341. * near full state.
  342. * @soc: Datapath SoC Handle [To be validated by the caller]
  343. * @hal_ring_hdl: SRNG handle
  344. *
  345. * Return: near-full level
  346. */
  347. static inline int
  348. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  349. {
  350. uint32_t num_valid;
  351. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  352. dp_srng->hal_srng,
  353. true);
  354. if (num_valid > dp_srng->crit_thresh)
  355. return DP_SRNG_THRESH_CRITICAL;
  356. else if (num_valid < dp_srng->safe_thresh)
  357. return DP_SRNG_THRESH_SAFE;
  358. else
  359. return DP_SRNG_THRESH_NEAR_FULL;
  360. }
  361. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  362. /**
  363. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  364. * the reap_limit and flags to reflect the state.
  365. * @soc: Datapath soc handle
  366. * @srng: Datapath handle for the srng
  367. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  368. * per the near-full state
  369. *
  370. * Return: 1, if the srng is near full
  371. * 0, if the srng is not near full
  372. */
  373. static inline int
  374. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  375. struct dp_srng *srng,
  376. int *max_reap_limit)
  377. {
  378. int ring_near_full = 0, near_full_level;
  379. if (dp_srng_check_ring_near_full(soc, srng)) {
  380. near_full_level = dp_srng_get_near_full_level(soc, srng);
  381. switch (near_full_level) {
  382. case DP_SRNG_THRESH_CRITICAL:
  383. /* Currently not doing anything special here */
  384. /* fall through */
  385. case DP_SRNG_THRESH_NEAR_FULL:
  386. ring_near_full = 1;
  387. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  388. break;
  389. case DP_SRNG_THRESH_SAFE:
  390. qdf_atomic_set(&srng->near_full, 0);
  391. ring_near_full = 0;
  392. break;
  393. default:
  394. qdf_assert(0);
  395. break;
  396. }
  397. }
  398. return ring_near_full;
  399. }
  400. #else
  401. static inline int
  402. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  403. struct dp_srng *srng,
  404. int *max_reap_limit)
  405. {
  406. return 0;
  407. }
  408. #endif
  409. #endif