adreno_a6xx_gmu.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __ADRENO_A6XX_GMU_H
  7. #define __ADRENO_A6XX_GMU_H
  8. #include <linux/mailbox_client.h>
  9. #include "adreno_a6xx_hfi.h"
  10. #include "kgsl_gmu_core.h"
  11. /**
  12. * struct a6xx_gmu_device - GMU device structure
  13. * @ver: GMU Version information
  14. * @irq: GMU interrupt number
  15. * @fw_image: GMU FW image
  16. * @hfi_mem: pointer to HFI shared memory
  17. * @dump_mem: pointer to GMU debug dump memory
  18. * @gmu_log: gmu event log memory
  19. * @hfi: HFI controller
  20. * @num_gpupwrlevels: number GPU frequencies in GPU freq table
  21. * @num_bwlevel: number of GPU BW levels
  22. * @num_cnocbwlevel: number CNOC BW levels
  23. * @rpmh_votes: RPMh TCS command set for GPU, GMU voltage and bw scaling
  24. * @clks: GPU subsystem clocks required for GMU functionality
  25. * @wakeup_pwrlevel: GPU wake up power/DCVS level in case different
  26. * than default power level
  27. * @idle_level: Minimal GPU idle power level
  28. * @fault_count: GMU fault count
  29. * @mailbox: Messages to AOP for ACD enable/disable go through this
  30. * @log_wptr_retention: Store the log wptr offset on slumber
  31. */
  32. struct a6xx_gmu_device {
  33. struct {
  34. u32 core;
  35. u32 core_dev;
  36. u32 pwr;
  37. u32 pwr_dev;
  38. u32 hfi;
  39. } ver;
  40. struct platform_device *pdev;
  41. int irq;
  42. const struct firmware *fw_image;
  43. struct kgsl_memdesc *dump_mem;
  44. struct kgsl_memdesc *gmu_log;
  45. /** @vrb: GMU virtual register bank memory */
  46. struct kgsl_memdesc *vrb;
  47. /** @trace: gmu trace container */
  48. struct kgsl_gmu_trace trace;
  49. struct a6xx_hfi hfi;
  50. struct clk_bulk_data *clks;
  51. /** @num_clks: Number of entries in the @clks array */
  52. int num_clks;
  53. unsigned int idle_level;
  54. /** @freqs: Array of GMU frequencies */
  55. u32 freqs[GMU_MAX_PWRLEVELS];
  56. /** @vlvls: Array of GMU voltage levels */
  57. u32 vlvls[GMU_MAX_PWRLEVELS];
  58. struct kgsl_mailbox mailbox;
  59. bool preallocations;
  60. /** @gmu_globals: Array to store gmu global buffers */
  61. struct kgsl_memdesc gmu_globals[GMU_KERNEL_ENTRIES];
  62. /** @global_entries: To keep track of number of gmu buffers */
  63. u32 global_entries;
  64. struct gmu_vma_entry *vma;
  65. unsigned int log_wptr_retention;
  66. /** @cm3_fault: whether gmu received a cm3 fault interrupt */
  67. atomic_t cm3_fault;
  68. /**
  69. * @itcm_shadow: Copy of the itcm block in firmware binary used for
  70. * snapshot
  71. */
  72. void *itcm_shadow;
  73. /** @flags: Internal gmu flags */
  74. unsigned long flags;
  75. /** @rscc_virt: Pointer where RSCC block is mapped */
  76. void __iomem *rscc_virt;
  77. /** @domain: IOMMU domain for the kernel context */
  78. struct iommu_domain *domain;
  79. /** @rdpm_cx_virt: Pointer where the RDPM CX block is mapped */
  80. void __iomem *rdpm_cx_virt;
  81. /** @rdpm_mx_virt: Pointer where the RDPM MX block is mapped */
  82. void __iomem *rdpm_mx_virt;
  83. /** @log_stream_enable: GMU log streaming enable. Disabled by default */
  84. bool log_stream_enable;
  85. /** @log_group_mask: Allows overriding default GMU log group mask */
  86. u32 log_group_mask;
  87. struct kobject log_kobj;
  88. /*
  89. * @perf_ddr_bw: The lowest ddr bandwidth that puts CX at a corner at
  90. * which GMU can run at higher frequency.
  91. */
  92. u32 perf_ddr_bw;
  93. /** @num_oob_perfcntr: Number of active oob_perfcntr requests */
  94. u32 num_oob_perfcntr;
  95. /** @pdc_cfg_base: Base address of PDC cfg registers */
  96. void __iomem *pdc_cfg_base;
  97. /** @pdc_seq_base: Base address of PDC seq registers */
  98. void __iomem *pdc_seq_base;
  99. /** @stats_enable: GMU stats feature enable */
  100. bool stats_enable;
  101. /** @stats_mask: GMU performance countables to enable */
  102. u32 stats_mask;
  103. /** @stats_interval: GMU performance counters sampling interval */
  104. u32 stats_interval;
  105. /** @stats_kobj: kernel object for GMU stats directory in sysfs */
  106. struct kobject stats_kobj;
  107. };
  108. /* Helper function to get to a6xx gmu device from adreno device */
  109. struct a6xx_gmu_device *to_a6xx_gmu(struct adreno_device *adreno_dev);
  110. /* Helper function to get to adreno device from a6xx gmu device */
  111. struct adreno_device *a6xx_gmu_to_adreno(struct a6xx_gmu_device *gmu);
  112. /**
  113. * reserve_gmu_kernel_block() - Allocate a gmu buffer
  114. * @gmu: Pointer to the a6xx gmu device
  115. * @addr: Desired gmu virtual address
  116. * @size: Size of the buffer in bytes
  117. * @vma_id: Target gmu vma where this bufer should be mapped
  118. * @va_align: Alignment as a power of two(2^n) bytes for the GMU VA
  119. *
  120. * This function allocates a buffer and maps it in
  121. * the desired gmu vma
  122. *
  123. * Return: Pointer to the memory descriptor or error pointer on failure
  124. */
  125. struct kgsl_memdesc *reserve_gmu_kernel_block(struct a6xx_gmu_device *gmu,
  126. u32 addr, u32 size, u32 vma_id, u32 va_align);
  127. /**
  128. * reserve_gmu_kernel_block_fixed() - Maps phyical resource address to gmu
  129. * @gmu: Pointer to the a6xx gmu device
  130. * @addr: Desired gmu virtual address
  131. * @size: Size of the buffer in bytes
  132. * @vma_id: Target gmu vma where this buffer should be mapped
  133. * @resource: Name of the resource to get the size and address to allocate
  134. * @attrs: Attributes for the mapping
  135. * @va_align: Alignment as a power of two(2^n) bytes for the GMU VA
  136. *
  137. * This function maps the physcial resource address to desired gmu vma
  138. *
  139. * Return: Pointer to the memory descriptor or error pointer on failure
  140. */
  141. struct kgsl_memdesc *reserve_gmu_kernel_block_fixed(struct a6xx_gmu_device *gmu,
  142. u32 addr, u32 size, u32 vma_id, const char *resource, int attrs, u32 va_align);
  143. /**
  144. * a6xx_build_rpmh_tables - Build the rpmh tables
  145. * @adreno_dev: Pointer to the adreno device
  146. *
  147. * This function creates the gpu dcvs and bw tables
  148. *
  149. * Return: 0 on success and negative error on failure
  150. */
  151. int a6xx_build_rpmh_tables(struct adreno_device *adreno_dev);
  152. /**
  153. * a6xx_gmu_gx_is_on - Check if GX is on
  154. * @adreno_dev: Pointer to the adreno device
  155. *
  156. * This function reads pwr status registers to check if GX
  157. * is on or off
  158. */
  159. bool a6xx_gmu_gx_is_on(struct adreno_device *adreno_dev);
  160. /**
  161. * a6xx_gmu_device_snapshot - A6XX GMU snapshot function
  162. * @device: Device being snapshotted
  163. * @snapshot: Pointer to the snapshot instance
  164. *
  165. * This is where all of the A6XX GMU specific bits and pieces are grabbed
  166. * into the snapshot memory
  167. */
  168. void a6xx_gmu_device_snapshot(struct kgsl_device *device,
  169. struct kgsl_snapshot *snapshot);
  170. /**
  171. * a6xx_gmu_device_probe - A6XX GMU snapshot function
  172. * @pdev: Pointer to the platform device
  173. * @chipid: Chipid of the target
  174. * @gpucore: Pointer to the gpucore
  175. *
  176. * The target specific probe function for gmu based a6xx targets.
  177. */
  178. int a6xx_gmu_device_probe(struct platform_device *pdev,
  179. u32 chipid, const struct adreno_gpu_core *gpucore);
  180. /**
  181. * a6xx_gmu_reset - Reset and restart the gmu
  182. * @adreno_dev: Pointer to the adreno device
  183. *
  184. * Return: 0 on success or negative error on failure
  185. */
  186. int a6xx_gmu_reset(struct adreno_device *adreno_dev);
  187. /**
  188. * a6xx_enable_gpu_irq - Enable gpu interrupt
  189. * @adreno_dev: Pointer to the adreno device
  190. */
  191. void a6xx_enable_gpu_irq(struct adreno_device *adreno_dev);
  192. /**
  193. * a6xx_disable_gpu_irq - Disable gpu interrupt
  194. * @adreno_dev: Pointer to the adreno device
  195. */
  196. void a6xx_disable_gpu_irq(struct adreno_device *adreno_dev);
  197. /**
  198. * a6xx_gmu_snapshot- Take snapshot for gmu targets
  199. * @adreno_dev: Pointer to the adreno device
  200. * @snapshot: Pointer to the snapshot structure
  201. *
  202. * Send an NMI to gmu if we hit a gmu fault. Then take gmu
  203. * snapshot and carry on with rest of the a6xx snapshot
  204. */
  205. void a6xx_gmu_snapshot(struct adreno_device *adreno_dev,
  206. struct kgsl_snapshot *snapshot);
  207. /**
  208. * a6xx_gmu_probe - Probe a6xx gmu resources
  209. * @device: Pointer to the kgsl device
  210. * @pdev: Pointer to the gmu platform device
  211. *
  212. * Probe the gmu and hfi resources
  213. *
  214. * Return: 0 on success or negative error on failure
  215. */
  216. int a6xx_gmu_probe(struct kgsl_device *device,
  217. struct platform_device *pdev);
  218. /**
  219. * a6xx_gmu_parse_fw - Parse the gmu fw binary
  220. * @adreno_dev: Pointer to the adreno device
  221. *
  222. * Return: 0 on success or negative error on failure
  223. */
  224. int a6xx_gmu_parse_fw(struct adreno_device *adreno_dev);
  225. /**
  226. * a6xx_gmu_memory_init - Allocate gmu memory
  227. * @adreno_dev: Pointer to the adreno device
  228. *
  229. * Allocates the gmu log buffer and others if ndeeded.
  230. *
  231. * Return: 0 on success or negative error on failure
  232. */
  233. int a6xx_gmu_memory_init(struct adreno_device *adreno_dev);
  234. /**
  235. * a6xx_gmu_aop_send_acd_state - Enable or disable acd feature in aop
  236. * @gmu: Pointer to the a6xx gmu device
  237. * @flag: Boolean to enable or disable acd in aop
  238. *
  239. * This function enables or disables gpu acd feature using mailbox
  240. */
  241. void a6xx_gmu_aop_send_acd_state(struct a6xx_gmu_device *gmu, bool flag);
  242. /**
  243. * a6xx_gmu_disable_gdsc - Disable gmu gdsc
  244. * @adreno_dev: Pointer to the adreno device
  245. */
  246. void a6xx_gmu_disable_gdsc(struct adreno_device *adreno_dev);
  247. /**
  248. * a6xx_gmu_load_fw - Load gmu firmware
  249. * @adreno_dev: Pointer to the adreno device
  250. *
  251. * Loads the gmu firmware binary into TCMs and memory
  252. *
  253. * Return: 0 on success or negative error on failure
  254. */
  255. int a6xx_gmu_load_fw(struct adreno_device *adreno_dev);
  256. /**
  257. * a6xx_gmu_device_start - Bring gmu out of reset
  258. * @adreno_dev: Pointer to the adreno device
  259. *
  260. * Return: 0 on success or negative error on failure
  261. */
  262. int a6xx_gmu_device_start(struct adreno_device *adreno_dev);
  263. /**
  264. * a6xx_gmu_hfi_start - Indicate hfi start to gmu
  265. * @device: Pointer to the kgsl device
  266. *
  267. * Return: 0 on success or negative error on failure
  268. */
  269. int a6xx_gmu_hfi_start(struct adreno_device *adreno_dev);
  270. /**
  271. * a6xx_gmu_itcm_shadow - Create itcm shadow copy for snapshot
  272. * @adreno_dev: Pointer to the adreno device
  273. *
  274. * Return: 0 on success or negative error on failure
  275. */
  276. int a6xx_gmu_itcm_shadow(struct adreno_device *adreno_dev);
  277. /**
  278. * a6xx_gmu_register_config - gmu register configuration
  279. * @adreno_dev: Pointer to the adreno device
  280. *
  281. * Program gmu regsiters based on features
  282. */
  283. void a6xx_gmu_register_config(struct adreno_device *adreno_dev);
  284. /**
  285. * a6xx_gmu_version_info - Get gmu firmware version
  286. * @adreno_dev: Pointer to the adreno device
  287. */
  288. void a6xx_gmu_version_info(struct adreno_device *adreno_dev);
  289. /**
  290. * a6xx_gmu_irq_enable - Enable gmu interrupts
  291. * @adreno_dev: Pointer to the adreno device
  292. */
  293. void a6xx_gmu_irq_enable(struct adreno_device *adreno_dev);
  294. /**
  295. * a6xx_gmu_irq_disable - Disaable gmu interrupts
  296. * @adreno_dev: Pointer to the adreno device
  297. */
  298. void a6xx_gmu_irq_disable(struct adreno_device *adreno_dev);
  299. /**
  300. * a6xx_gmu_suspend - Hard reset the gpu and gmu
  301. * @adreno_dev: Pointer to the adreno device
  302. *
  303. * In case we hit a gmu fault, hard reset the gpu and gmu
  304. * to recover from the fault
  305. */
  306. void a6xx_gmu_suspend(struct adreno_device *adreno_dev);
  307. /**
  308. * a6xx_gmu_oob_set - send gmu oob request
  309. * @device: Pointer to the kgsl device
  310. * @req: Type of oob request as defined in enum oob_request
  311. *
  312. * Request gmu to keep gpu powered up till the oob is cleared
  313. *
  314. * Return: 0 on success or negative error on failure
  315. */
  316. int a6xx_gmu_oob_set(struct kgsl_device *device, enum oob_request oob);
  317. /**
  318. * a6xx_gmu_oob_clear - clear an asserted oob request
  319. * @device: Pointer to the kgsl device
  320. * @req: Type of oob request as defined in enum oob_request
  321. *
  322. * Clear a previously requested oob so that gmu can power
  323. * collapse the gpu
  324. */
  325. void a6xx_gmu_oob_clear(struct kgsl_device *device, enum oob_request oob);
  326. /**
  327. * a6xx_gmu_wait_for_lowest_idle - wait for gmu to complete ifpc
  328. * @adreno_dev: Pointer to the adreno device
  329. *
  330. * If ifpc is enabled, wait for gmu to put gpu into ifpc.
  331. *
  332. * Return: 0 on success or negative error on failure
  333. */
  334. int a6xx_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev);
  335. /**
  336. * a6xx_gmu_wait_for_idle - Wait for gmu to become idle
  337. * @adreno_dev: Pointer to the adreno device
  338. *
  339. * Return: 0 on success or negative error on failure
  340. */
  341. int a6xx_gmu_wait_for_idle(struct adreno_device *adreno_dev);
  342. /**
  343. * a6xx_rscc_sleep_sequence - Trigger rscc sleep sequence
  344. * @adreno_dev: Pointer to the adreno device
  345. *
  346. * Return: 0 on success or negative error on failure
  347. */
  348. int a6xx_rscc_sleep_sequence(struct adreno_device *adreno_dev);
  349. /**
  350. * a6xx_rscc_wakeup_sequence - Trigger rscc wakeup sequence
  351. * @adreno_dev: Pointer to the adreno device
  352. *
  353. * Return: 0 on success or negative error on failure
  354. */
  355. int a6xx_rscc_wakeup_sequence(struct adreno_device *adreno_dev);
  356. /**
  357. * a6xx_halt_gbif - Halt CX and GX requests in GBIF
  358. * @adreno_dev: Pointer to the adreno device
  359. *
  360. * Clear any pending GX or CX transactions in GBIF and
  361. * deassert GBIF halt
  362. *
  363. * Return: 0 on success or negative error on failure
  364. */
  365. int a6xx_halt_gbif(struct adreno_device *adreno_dev);
  366. /**
  367. * a6xx_load_pdc_ucode - Load and enable pdc sequence
  368. * @adreno_dev: Pointer to the adreno device
  369. *
  370. * Return: 0 on success or negative error on failure
  371. */
  372. int a6xx_load_pdc_ucode(struct adreno_device *adreno_dev);
  373. /**
  374. * a6xx_load_rsc_ucode - Load rscc sequence
  375. * @adreno_dev: Pointer to the adreno device
  376. */
  377. void a6xx_load_rsc_ucode(struct adreno_device *adreno_dev);
  378. /**
  379. * a6xx_gmu_remove - Clean up gmu probed resources
  380. * @device: Pointer to the kgsl device
  381. */
  382. void a6xx_gmu_remove(struct kgsl_device *device);
  383. /**
  384. * a6xx_gmu_enable_clks - Enable gmu clocks
  385. * @adreno_dev: Pointer to the adreno device
  386. * @level: GMU frequency level
  387. *
  388. * Return: 0 on success or negative error on failure
  389. */
  390. int a6xx_gmu_enable_clks(struct adreno_device *adreno_dev, u32 level);
  391. /**
  392. * a6xx_gmu_handle_watchdog - Handle watchdog interrupt
  393. * @adreno_dev: Pointer to the adreno device
  394. */
  395. void a6xx_gmu_handle_watchdog(struct adreno_device *adreno_dev);
  396. /**
  397. * a6xx_gmu_send_nmi - Send NMI to GMU
  398. * @device: Pointer to the kgsl device
  399. * @force: Boolean to forcefully send NMI irrespective of GMU state
  400. */
  401. void a6xx_gmu_send_nmi(struct kgsl_device *device, bool force);
  402. /**
  403. * a6xx_gmu_add_to_minidump - Register a6xx_device with va minidump
  404. * @adreno_dev: Pointer to the adreno device
  405. */
  406. int a6xx_gmu_add_to_minidump(struct adreno_device *adreno_dev);
  407. #endif