dp_be.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. /*
  2. * Copyright (c) 2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef __DP_BE_H
  19. #define __DP_BE_H
  20. #include <dp_types.h>
  21. #include <hal_be_tx.h>
  22. /* maximum number of entries in one page of secondary page table */
  23. #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
  24. /* maximum number of entries in primary page table */
  25. #define DP_CC_PPT_MAX_ENTRIES 1024
  26. /* cookie conversion required CMEM offset from CMEM pool */
  27. #define DP_CC_MEM_OFFSET_IN_CMEM 0
  28. /* cookie conversion primary page table size 4K */
  29. #define DP_CC_PPT_MEM_SIZE 4096
  30. /* FST required CMEM offset from CMEM pool */
  31. #define DP_FST_MEM_OFFSET_IN_CMEM \
  32. (DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
  33. /* lower 9 bits in Desc ID for offset in page of SPT */
  34. #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
  35. #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
  36. #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
  37. #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
  38. /* higher 11 bits in Desc ID for offset in CMEM of PPT */
  39. #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
  40. #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
  41. #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
  42. #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
  43. /*
  44. * page 4K unaligned case, single SPT page physical address
  45. * need 8 bytes in PPT
  46. */
  47. #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
  48. /*
  49. * page 4K aligned case, single SPT page physical address
  50. * need 4 bytes in PPT
  51. */
  52. #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
  53. /* 4K aligned case, number of bits HW append for one PPT entry value */
  54. #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
  55. /**
  56. * struct dp_spt_page_desc - secondary page table page descriptors
  57. * @next: pointer to next linked SPT page Desc
  58. * @page_v_addr: page virtual address
  59. * @page_p_addr: page physical address
  60. * @ppt_index: entry index in primary page table where this page physical
  61. address stored
  62. * @avail_entry_index: index for available entry that store TX/RX Desc VA
  63. */
  64. struct dp_spt_page_desc {
  65. struct dp_spt_page_desc *next;
  66. uint8_t *page_v_addr;
  67. qdf_dma_addr_t page_p_addr;
  68. uint16_t ppt_index;
  69. uint16_t avail_entry_index;
  70. };
  71. /**
  72. * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
  73. * @cmem_base: CMEM base address for primary page table setup
  74. * @total_page_num: total DDR page allocated
  75. * @free_page_num: available DDR page number for TX/RX Desc ID initialization
  76. * @page_desc_freelist: available page Desc list
  77. * @page_desc_base: page Desc buffer base address.
  78. * @page_pool: DDR pages pool
  79. * @cc_lock: locks for page acquiring/free
  80. */
  81. struct dp_hw_cookie_conversion_t {
  82. uint32_t cmem_base;
  83. uint32_t total_page_num;
  84. uint32_t free_page_num;
  85. struct dp_spt_page_desc *page_desc_freelist;
  86. struct dp_spt_page_desc *page_desc_base;
  87. struct qdf_mem_multi_page_t page_pool;
  88. qdf_spinlock_t cc_lock;
  89. };
  90. /**
  91. * struct dp_spt_page_desc_list - containor of SPT page desc list info
  92. * @spt_page_list_head: head of SPT page descriptor list
  93. * @spt_page_list_tail: tail of SPT page descriptor list
  94. * @num_spt_pages: number of SPT page descriptor allocated
  95. */
  96. struct dp_spt_page_desc_list {
  97. struct dp_spt_page_desc *spt_page_list_head;
  98. struct dp_spt_page_desc *spt_page_list_tail;
  99. uint16_t num_spt_pages;
  100. };
  101. /* HW reading 8 bytes for VA */
  102. #define DP_CC_HW_READ_BYTES 8
  103. #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
  104. { *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
  105. = (uintptr_t)(_desc_va); }
  106. /**
  107. * struct dp_tx_bank_profile - DP wrapper for TCL banks
  108. * @is_configured: flag indicating if this bank is configured
  109. * @ref_count: ref count indicating number of users of the bank
  110. * @bank_config: HAL TX bank configuration
  111. */
  112. struct dp_tx_bank_profile {
  113. uint8_t is_configured;
  114. qdf_atomic_t ref_count;
  115. union hal_tx_bank_config bank_config;
  116. };
  117. /**
  118. * struct dp_soc_be - Extended DP soc for BE targets
  119. * @soc: dp soc structure
  120. * @num_bank_profiles: num TX bank profiles
  121. * @bank_profiles: bank profiles for various TX banks
  122. * @hw_cc_ctx: core context of HW cookie conversion
  123. * @tx_spt_page_desc: spt page desc allocated for TX desc pool
  124. * @rx_spt_page_desc: spt page desc allocated for RX desc pool
  125. */
  126. struct dp_soc_be {
  127. struct dp_soc soc;
  128. uint8_t num_bank_profiles;
  129. qdf_mutex_t tx_bank_lock;
  130. struct dp_tx_bank_profile *bank_profiles;
  131. struct dp_hw_cookie_conversion_t hw_cc_ctx;
  132. struct dp_spt_page_desc_list tx_spt_page_desc[MAX_TXDESC_POOLS];
  133. struct dp_spt_page_desc_list rx_spt_page_desc[MAX_RXDESC_POOLS];
  134. };
  135. /* convert struct dp_soc_be pointer to struct dp_soc pointer */
  136. #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
  137. /**
  138. * struct dp_pdev_be - Extended DP pdev for BE targets
  139. * @pdev: dp pdev structure
  140. */
  141. struct dp_pdev_be {
  142. struct dp_pdev pdev;
  143. };
  144. /**
  145. * struct dp_vdev_be - Extended DP vdev for BE targets
  146. * @vdev: dp vdev structure
  147. * @bank_id: bank_id to be used for TX
  148. * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
  149. */
  150. struct dp_vdev_be {
  151. struct dp_vdev vdev;
  152. int8_t bank_id;
  153. uint8_t vdev_id_check_en;
  154. };
  155. /**
  156. * struct dp_peer_be - Extended DP peer for BE targets
  157. * @dp_peer: dp peer structure
  158. */
  159. struct dp_peer_be {
  160. struct dp_peer peer;
  161. };
  162. /**
  163. * dp_get_soc_context_size_be() - get context size for target specific DP soc
  164. *
  165. * Return: value in bytes for BE specific soc structure
  166. */
  167. qdf_size_t dp_get_soc_context_size_be(void);
  168. /**
  169. * dp_initialize_arch_ops_be() - initialize BE specific arch ops
  170. * @arch_ops: arch ops pointer
  171. *
  172. * Return: none
  173. */
  174. void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
  175. /**
  176. * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
  177. * @arch_ops: arch ops pointer
  178. *
  179. * Return: size in bytes for the context_type
  180. */
  181. qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
  182. /**
  183. * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
  184. * @soc: dp_soc pointer
  185. *
  186. * Return: dp_soc_be pointer
  187. */
  188. static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
  189. {
  190. return (struct dp_soc_be *)soc;
  191. }
  192. /**
  193. * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
  194. * @pdev: dp_pdev pointer
  195. *
  196. * Return: dp_pdev_be pointer
  197. */
  198. static inline
  199. struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
  200. {
  201. return (struct dp_pdev_be *)pdev;
  202. }
  203. /**
  204. * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
  205. * @vdev: dp_vdev pointer
  206. *
  207. * Return: dp_vdev_be pointer
  208. */
  209. static inline
  210. struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
  211. {
  212. return (struct dp_vdev_be *)vdev;
  213. }
  214. /**
  215. * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
  216. * @peer: dp_peer pointer
  217. *
  218. * Return: dp_peer_be pointer
  219. */
  220. static inline
  221. struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
  222. {
  223. return (struct dp_peer_be *)peer;
  224. }
  225. /**
  226. * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  227. * @be_soc: beryllium soc handler
  228. * @list_head: pointer to page desc head
  229. * @list_tail: pointer to page desc tail
  230. * @num_desc: number of TX/RX Descs required for SPT pages
  231. *
  232. * Return: number of SPT page Desc allocated
  233. */
  234. uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
  235. struct dp_spt_page_desc **list_head,
  236. struct dp_spt_page_desc **list_tail,
  237. uint16_t num_desc);
  238. /**
  239. * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
  240. * @be_soc: beryllium soc handler
  241. * @list_head: pointer to page desc head
  242. * @list_tail: pointer to page desc tail
  243. * @page_nums: number of page desc freed back to pool
  244. */
  245. void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  246. struct dp_spt_page_desc **list_head,
  247. struct dp_spt_page_desc **list_tail,
  248. uint16_t page_nums);
  249. /**
  250. * dp_cc_desc_id_generate() - generate SW cookie ID according to
  251. DDR page 4K aligned or not
  252. * @ppt_index: offset index in primary page table
  253. * @spt_index: offset index in sceondary DDR page
  254. *
  255. * Generate SW cookie ID to match as HW expected
  256. *
  257. * Return: cookie ID
  258. */
  259. static inline uint32_t dp_cc_desc_id_generate(uint16_t ppt_index,
  260. uint16_t spt_index)
  261. {
  262. /*
  263. * for 4k aligned case, cmem entry size is 4 bytes,
  264. * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
  265. * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
  266. * exactly same with original ppt_index value.
  267. * for 4k un-aligned case, cmem entry size is 8 bytes.
  268. * bit19 ~ bit9 will be HW index value, same as ppt_index value.
  269. */
  270. return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
  271. spt_index);
  272. }
  273. /**
  274. * dp_cc_desc_va_find() - find TX/RX Descs virtual address by ID
  275. * @be_soc: be soc handle
  276. * @desc_id: TX/RX Dess ID
  277. *
  278. * Return: TX/RX Desc virtual address
  279. */
  280. static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
  281. uint32_t desc_id)
  282. {
  283. struct dp_soc_be *be_soc;
  284. struct dp_hw_cookie_conversion_t *cc_ctx;
  285. uint16_t ppt_page_id, spt_va_id;
  286. uint8_t *spt_page_va;
  287. be_soc = dp_get_be_soc_from_dp_soc(soc);
  288. cc_ctx = &be_soc->hw_cc_ctx;
  289. ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
  290. DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
  291. spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
  292. DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
  293. /*
  294. * ppt index in cmem is same order where the page in the
  295. * page desc array during initialization.
  296. * entry size in DDR page is 64 bits, for 32 bits system,
  297. * only lower 32 bits VA value is needed.
  298. */
  299. spt_page_va = cc_ctx->page_desc_base[ppt_page_id].page_v_addr;
  300. return (*((uintptr_t *)(spt_page_va +
  301. spt_va_id * DP_CC_HW_READ_BYTES)));
  302. }
  303. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  304. /**
  305. * enum dp_srng_near_full_levels - SRNG Near FULL levels
  306. * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
  307. * of processing the entries in SRNG
  308. * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
  309. * of processing the entries in SRNG
  310. * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
  311. * condition and drastic steps need to be taken for processing
  312. * the entries in SRNG
  313. */
  314. enum dp_srng_near_full_levels {
  315. DP_SRNG_THRESH_SAFE,
  316. DP_SRNG_THRESH_NEAR_FULL,
  317. DP_SRNG_THRESH_CRITICAL,
  318. };
  319. /**
  320. * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
  321. * its corresponding near-full irq handler
  322. * @soc: Datapath SoC handle
  323. * @dp_srng: datapath handle for this SRNG
  324. *
  325. * Return: 1, if the srng was marked as near-full
  326. * 0, if the srng was not marked as near-full
  327. */
  328. static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
  329. struct dp_srng *dp_srng)
  330. {
  331. return qdf_atomic_read(&dp_srng->near_full);
  332. }
  333. /**
  334. * dp_srng_get_near_full_level() - Check the num available entries in the
  335. * consumer srng and return the level of the srng
  336. * near full state.
  337. * @soc: Datapath SoC Handle [To be validated by the caller]
  338. * @hal_ring_hdl: SRNG handle
  339. *
  340. * Return: near-full level
  341. */
  342. static inline int
  343. dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
  344. {
  345. uint32_t num_valid;
  346. num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
  347. dp_srng->hal_srng,
  348. true);
  349. if (num_valid > dp_srng->crit_thresh)
  350. return DP_SRNG_THRESH_CRITICAL;
  351. else if (num_valid < dp_srng->safe_thresh)
  352. return DP_SRNG_THRESH_SAFE;
  353. else
  354. return DP_SRNG_THRESH_NEAR_FULL;
  355. }
  356. #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER 2
  357. /**
  358. * dp_srng_test_and_update_nf_params() - Test the near full level and update
  359. * the reap_limit and flags to reflect the state.
  360. * @soc: Datapath soc handle
  361. * @srng: Datapath handle for the srng
  362. * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
  363. * per the near-full state
  364. *
  365. * Return: 1, if the srng is near full
  366. * 0, if the srng is not near full
  367. */
  368. static inline int
  369. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  370. struct dp_srng *srng,
  371. int *max_reap_limit)
  372. {
  373. int ring_near_full = 0, near_full_level;
  374. if (dp_srng_check_ring_near_full(soc, srng)) {
  375. near_full_level = dp_srng_get_near_full_level(soc, srng);
  376. switch (near_full_level) {
  377. case DP_SRNG_THRESH_CRITICAL:
  378. /* Currently not doing anything special here */
  379. /* fall through */
  380. case DP_SRNG_THRESH_NEAR_FULL:
  381. ring_near_full = 1;
  382. *max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
  383. break;
  384. case DP_SRNG_THRESH_SAFE:
  385. qdf_atomic_set(&srng->near_full, 0);
  386. ring_near_full = 0;
  387. break;
  388. default:
  389. qdf_assert(0);
  390. break;
  391. }
  392. }
  393. return ring_near_full;
  394. }
  395. #else
  396. static inline int
  397. _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
  398. struct dp_srng *srng,
  399. int *max_reap_limit)
  400. {
  401. return 0;
  402. }
  403. #endif
  404. #endif