adreno_gen8_gmu.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __ADRENO_GEN8_GMU_H
  7. #define __ADRENO_GEN8_GMU_H
  8. #include <linux/soc/qcom/qcom_aoss.h>
  9. #include "adreno_gen8_hfi.h"
  10. #include "kgsl_gmu_core.h"
  11. struct gen8_dcvs_table {
  12. u32 gpu_level_num;
  13. u32 gmu_level_num;
  14. struct opp_gx_desc gx_votes[MAX_GX_LEVELS];
  15. struct opp_desc cx_votes[MAX_CX_LEVELS];
  16. };
  17. /**
  18. * struct gen8_gmu_device - GMU device structure
  19. * @ver: GMU Version information
  20. * @irq: GMU interrupt number
  21. * @fw_image: GMU FW image
  22. * @hfi_mem: pointer to HFI shared memory
  23. * @dump_mem: pointer to GMU debug dump memory
  24. * @gmu_log: gmu event log memory
  25. * @hfi: HFI controller
  26. * @num_gpupwrlevels: number GPU frequencies in GPU freq table
  27. * @num_bwlevel: number of GPU BW levels
  28. * @num_cnocbwlevel: number CNOC BW levels
  29. * @rpmh_votes: RPMh TCS command set for GPU, GMU voltage and bw scaling
  30. * @clks: GPU subsystem clocks required for GMU functionality
  31. * @wakeup_pwrlevel: GPU wake up power/DCVS level in case different
  32. * than default power level
  33. * @idle_level: Minimal GPU idle power level
  34. * @fault_count: GMU fault count
  35. * @log_wptr_retention: Store the log wptr offset on slumber
  36. */
  37. struct gen8_gmu_device {
  38. struct {
  39. u32 core;
  40. u32 core_dev;
  41. u32 pwr;
  42. u32 pwr_dev;
  43. u32 hfi;
  44. } ver;
  45. struct platform_device *pdev;
  46. int irq;
  47. const struct firmware *fw_image;
  48. struct kgsl_memdesc *dump_mem;
  49. struct kgsl_memdesc *gmu_log;
  50. /** @vrb: GMU virtual register bank memory */
  51. struct kgsl_memdesc *vrb;
  52. /** @trace: gmu trace container */
  53. struct kgsl_gmu_trace trace;
  54. /** @gmu_init_scratch: Memory to store the initial HFI messages */
  55. struct kgsl_memdesc *gmu_init_scratch;
  56. /** @gpu_boot_scratch: Memory to store the bootup HFI messages */
  57. struct kgsl_memdesc *gpu_boot_scratch;
  58. struct gen8_hfi hfi;
  59. /** @pwrlevels: Array of GMU power levels */
  60. struct clk_bulk_data *clks;
  61. /** @num_clks: Number of entries in the @clks array */
  62. int num_clks;
  63. u32 idle_level;
  64. /** @freqs: Array of GMU frequencies */
  65. u32 freqs[GMU_MAX_PWRLEVELS];
  66. /** @vlvls: Array of GMU voltage levels */
  67. u32 vlvls[GMU_MAX_PWRLEVELS];
  68. /** @qmp: aoss_qmp handle */
  69. struct qmp *qmp;
  70. /** @gmu_globals: Array to store gmu global buffers */
  71. struct kgsl_memdesc gmu_globals[GMU_KERNEL_ENTRIES];
  72. /** @global_entries: To keep track of number of gmu buffers */
  73. u32 global_entries;
  74. struct gmu_vma_entry *vma;
  75. u32 log_wptr_retention;
  76. /** @cm3_fault: whether gmu received a cm3 fault interrupt */
  77. atomic_t cm3_fault;
  78. /**
  79. * @itcm_shadow: Copy of the itcm block in firmware binary used for
  80. * snapshot
  81. */
  82. void *itcm_shadow;
  83. /** @flags: Internal gmu flags */
  84. unsigned long flags;
  85. /** @rscc_virt: Pointer where RSCC block is mapped */
  86. void __iomem *rscc_virt;
  87. /** @domain: IOMMU domain for the kernel context */
  88. struct iommu_domain *domain;
  89. /** @log_stream_enable: GMU log streaming enable. Disabled by default */
  90. bool log_stream_enable;
  91. /** @log_group_mask: Allows overriding default GMU log group mask */
  92. u32 log_group_mask;
  93. struct kobject log_kobj;
  94. /*
  95. * @perf_ddr_bw: The lowest ddr bandwidth that puts CX at a corner at
  96. * which GMU can run at higher frequency.
  97. */
  98. u32 perf_ddr_bw;
  99. /** @rdpm_cx_virt: Pointer where the RDPM CX block is mapped */
  100. void __iomem *rdpm_cx_virt;
  101. /** @rdpm_mx_virt: Pointer where the RDPM MX block is mapped */
  102. void __iomem *rdpm_mx_virt;
  103. /** @num_oob_perfcntr: Number of active oob_perfcntr requests */
  104. u32 num_oob_perfcntr;
  105. /** @acd_debug_val: DVM value to calibrate ACD for a level */
  106. u32 acd_debug_val;
  107. /** @stats_enable: GMU stats feature enable */
  108. bool stats_enable;
  109. /** @stats_mask: GMU performance countables to enable */
  110. u32 stats_mask;
  111. /** @stats_interval: GMU performance counters sampling interval */
  112. u32 stats_interval;
  113. /** @stats_kobj: kernel object for GMU stats directory in sysfs */
  114. struct kobject stats_kobj;
  115. /** @cp_init_hdr: raw command header for cp_init */
  116. u32 cp_init_hdr;
  117. /** @switch_to_unsec_hdr: raw command header for switch to unsecure packet */
  118. u32 switch_to_unsec_hdr;
  119. /** @dcvs_table: Table for gpu dcvs levels */
  120. struct gen8_dcvs_table dcvs_table;
  121. };
  122. /* Helper function to get to gen8 gmu device from adreno device */
  123. struct gen8_gmu_device *to_gen8_gmu(struct adreno_device *adreno_dev);
  124. /* Helper function to get to adreno device from gen8 gmu device */
  125. struct adreno_device *gen8_gmu_to_adreno(struct gen8_gmu_device *gmu);
  126. /**
  127. * gen8_reserve_gmu_kernel_block() - Allocate a global gmu buffer
  128. * @gmu: Pointer to the gen8 gmu device
  129. * @addr: Desired gmu virtual address
  130. * @size: Size of the buffer in bytes
  131. * @vma_id: Target gmu vma where this buffer should be mapped
  132. * @align: Alignment for the GMU VA and GMU mapping size
  133. *
  134. * This function allocates a global gmu buffer and maps it in
  135. * the desired gmu vma
  136. *
  137. * Return: Pointer to the memory descriptor or error pointer on failure
  138. */
  139. struct kgsl_memdesc *gen8_reserve_gmu_kernel_block(struct gen8_gmu_device *gmu,
  140. u32 addr, u32 size, u32 vma_id, u32 align);
  141. /**
  142. * gen8_reserve_gmu_kernel_block_fixed() - Maps phyical resource address to gmu
  143. * @gmu: Pointer to the gen8 gmu device
  144. * @addr: Desired gmu virtual address
  145. * @size: Size of the buffer in bytes
  146. * @vma_id: Target gmu vma where this buffer should be mapped
  147. * @resource: Name of the resource to get the size and address to allocate
  148. * @attrs: Attributes for the mapping
  149. * @align: Alignment for the GMU VA and GMU mapping size
  150. *
  151. * This function maps the physcial resource address to desired gmu vma
  152. *
  153. * Return: Pointer to the memory descriptor or error pointer on failure
  154. */
  155. struct kgsl_memdesc *gen8_reserve_gmu_kernel_block_fixed(struct gen8_gmu_device *gmu,
  156. u32 addr, u32 size, u32 vma_id, const char *resource, int attrs, u32 align);
  157. /**
  158. * gen8_alloc_gmu_kernel_block() - Allocate a gmu buffer
  159. * @gmu: Pointer to the gen8 gmu device
  160. * @md: Pointer to the memdesc
  161. * @size: Size of the buffer in bytes
  162. * @vma_id: Target gmu vma where this buffer should be mapped
  163. * @attrs: Attributes for the mapping
  164. *
  165. * This function allocates a buffer and maps it in the desired gmu vma
  166. *
  167. * Return: 0 on success or error code on failure
  168. */
  169. int gen8_alloc_gmu_kernel_block(struct gen8_gmu_device *gmu,
  170. struct kgsl_memdesc *md, u32 size, u32 vma_id, int attrs);
  171. /**
  172. * gen8_gmu_import_buffer() - Import a gmu buffer
  173. * @gmu: Pointer to the gen8 gmu device
  174. * @vma_id: Target gmu vma where this buffer should be mapped
  175. * @md: Pointer to the memdesc to be mapped
  176. * @attrs: Attributes for the mapping
  177. * @align: Alignment for the GMU VA and GMU mapping size
  178. *
  179. * This function imports and maps a buffer to a gmu vma
  180. *
  181. * Return: 0 on success or error code on failure
  182. */
  183. int gen8_gmu_import_buffer(struct gen8_gmu_device *gmu, u32 vma_id,
  184. struct kgsl_memdesc *md, u32 attrs, u32 align);
  185. /**
  186. * gen8_free_gmu_block() - Free a gmu buffer
  187. * @gmu: Pointer to the gen8 gmu device
  188. * @md: Pointer to the memdesc that is to be freed
  189. *
  190. * This function frees a gmu block allocated by gen8_reserve_gmu_kernel_block()
  191. */
  192. void gen8_free_gmu_block(struct gen8_gmu_device *gmu, struct kgsl_memdesc *md);
  193. /**
  194. * gen8_build_rpmh_tables - Build the rpmh tables
  195. * @adreno_dev: Pointer to the adreno device
  196. *
  197. * This function creates the gpu dcvs and bw tables
  198. *
  199. * Return: 0 on success and negative error on failure
  200. */
  201. int gen8_build_rpmh_tables(struct adreno_device *adreno_dev);
  202. /**
  203. * gen8_gmu_gx_is_on - Check if GX is on
  204. * @adreno_dev: Pointer to the adreno device
  205. *
  206. * This function reads pwr status registers to check if GX
  207. * is on or off
  208. */
  209. bool gen8_gmu_gx_is_on(struct adreno_device *adreno_dev);
  210. /**
  211. * gen8_gmu_device_probe - GEN8 GMU snapshot function
  212. * @pdev: Pointer to the platform device
  213. * @chipid: Chipid of the target
  214. * @gpucore: Pointer to the gpucore
  215. *
  216. * The target specific probe function for gmu based gen8 targets.
  217. */
  218. int gen8_gmu_device_probe(struct platform_device *pdev,
  219. u32 chipid, const struct adreno_gpu_core *gpucore);
  220. /**
  221. * gen8_gmu_reset - Reset and restart the gmu
  222. * @adreno_dev: Pointer to the adreno device
  223. *
  224. * Return: 0 on success or negative error on failure
  225. */
  226. int gen8_gmu_reset(struct adreno_device *adreno_dev);
  227. /**
  228. * gen8_enable_gpu_irq - Enable gpu interrupt
  229. * @adreno_dev: Pointer to the adreno device
  230. */
  231. void gen8_enable_gpu_irq(struct adreno_device *adreno_dev);
  232. /**
  233. * gen8_disable_gpu_irq - Disable gpu interrupt
  234. * @adreno_dev: Pointer to the adreno device
  235. */
  236. void gen8_disable_gpu_irq(struct adreno_device *adreno_dev);
  237. /**
  238. * gen8_gmu_snapshot- Take snapshot for gmu targets
  239. * @adreno_dev: Pointer to the adreno device
  240. * @snapshot: Pointer to the snapshot structure
  241. *
  242. * Send an NMI to gmu if we hit a gmu fault. Then take gmu
  243. * snapshot and carry on with rest of the gen8 snapshot
  244. */
  245. void gen8_gmu_snapshot(struct adreno_device *adreno_dev,
  246. struct kgsl_snapshot *snapshot);
  247. /**
  248. * gen8_gmu_probe - Probe gen8 gmu resources
  249. * @device: Pointer to the kgsl device
  250. * @pdev: Pointer to the gmu platform device
  251. *
  252. * Probe the gmu and hfi resources
  253. *
  254. * Return: 0 on success or negative error on failure
  255. */
  256. int gen8_gmu_probe(struct kgsl_device *device,
  257. struct platform_device *pdev);
  258. /**
  259. * gen8_gmu_parse_fw - Parse the gmu fw binary
  260. * @adreno_dev: Pointer to the adreno device
  261. *
  262. * Return: 0 on success or negative error on failure
  263. */
  264. int gen8_gmu_parse_fw(struct adreno_device *adreno_dev);
  265. /**
  266. * gen8_gmu_memory_init - Allocate gmu memory
  267. * @adreno_dev: Pointer to the adreno device
  268. *
  269. * Allocates the gmu log buffer and others if ndeeded.
  270. *
  271. * Return: 0 on success or negative error on failure
  272. */
  273. int gen8_gmu_memory_init(struct adreno_device *adreno_dev);
  274. /**
  275. * gen8_gmu_aop_send_acd_state - Enable or disable acd feature in aop
  276. * @gmu: Pointer to the gen8 gmu device
  277. * @flag: Boolean to enable or disable acd in aop
  278. *
  279. * This function enables or disables gpu acd feature using qmp
  280. */
  281. void gen8_gmu_aop_send_acd_state(struct gen8_gmu_device *gmu, bool flag);
  282. /**
  283. * gen8_gmu_load_fw - Load gmu firmware
  284. * @adreno_dev: Pointer to the adreno device
  285. *
  286. * Loads the gmu firmware binary into TCMs and memory
  287. *
  288. * Return: 0 on success or negative error on failure
  289. */
  290. int gen8_gmu_load_fw(struct adreno_device *adreno_dev);
  291. /**
  292. * gen8_gmu_device_start - Bring gmu out of reset
  293. * @adreno_dev: Pointer to the adreno device
  294. *
  295. * Return: 0 on success or negative error on failure
  296. */
  297. int gen8_gmu_device_start(struct adreno_device *adreno_dev);
  298. /**
  299. * gen8_gmu_hfi_start - Indicate hfi start to gmu
  300. * @device: Pointer to the kgsl device
  301. *
  302. * Return: 0 on success or negative error on failure
  303. */
  304. int gen8_gmu_hfi_start(struct adreno_device *adreno_dev);
  305. /**
  306. * gen8_gmu_itcm_shadow - Create itcm shadow copy for snapshot
  307. * @adreno_dev: Pointer to the adreno device
  308. *
  309. * Return: 0 on success or negative error on failure
  310. */
  311. int gen8_gmu_itcm_shadow(struct adreno_device *adreno_dev);
  312. /**
  313. * gen8_gmu_register_config - gmu register configuration
  314. * @adreno_dev: Pointer to the adreno device
  315. *
  316. * Program gmu regsiters based on features
  317. */
  318. void gen8_gmu_register_config(struct adreno_device *adreno_dev);
  319. /**
  320. * gen8_gmu_version_info - Get gmu firmware version
  321. * @adreno_dev: Pointer to the adreno device
  322. *
  323. * Return: 0 on success or negative error on failure
  324. */
  325. int gen8_gmu_version_info(struct adreno_device *adreno_dev);
  326. /**
  327. * gen8_gmu_irq_enable - Enable gmu interrupts
  328. * @adreno_dev: Pointer to the adreno device
  329. */
  330. void gen8_gmu_irq_enable(struct adreno_device *adreno_dev);
  331. /**
  332. * gen8_gmu_irq_disable - Disaable gmu interrupts
  333. * @adreno_dev: Pointer to the adreno device
  334. */
  335. void gen8_gmu_irq_disable(struct adreno_device *adreno_dev);
  336. /**
  337. * gen8_gmu_suspend - Hard reset the gpu and gmu
  338. * @adreno_dev: Pointer to the adreno device
  339. *
  340. * In case we hit a gmu fault, hard reset the gpu and gmu
  341. * to recover from the fault
  342. */
  343. void gen8_gmu_suspend(struct adreno_device *adreno_dev);
  344. /**
  345. * gen8_gmu_oob_set - send gmu oob request
  346. * @device: Pointer to the kgsl device
  347. * @req: Type of oob request as defined in enum oob_request
  348. *
  349. * Request gmu to keep gpu powered up till the oob is cleared
  350. *
  351. * Return: 0 on success or negative error on failure
  352. */
  353. int gen8_gmu_oob_set(struct kgsl_device *device, enum oob_request oob);
  354. /**
  355. * gen8_gmu_oob_clear - clear an asserted oob request
  356. * @device: Pointer to the kgsl device
  357. * @req: Type of oob request as defined in enum oob_request
  358. *
  359. * Clear a previously requested oob so that gmu can power
  360. * collapse the gpu
  361. */
  362. void gen8_gmu_oob_clear(struct kgsl_device *device, enum oob_request oob);
  363. /**
  364. * gen8_gmu_wait_for_lowest_idle - wait for gmu to complete ifpc
  365. * @adreno_dev: Pointer to the adreno device
  366. *
  367. * If ifpc is enabled, wait for gmu to put gpu into ifpc.
  368. *
  369. * Return: 0 on success or negative error on failure
  370. */
  371. int gen8_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev);
  372. /**
  373. * gen8_gmu_wait_for_idle - Wait for gmu to become idle
  374. * @adreno_dev: Pointer to the adreno device
  375. *
  376. * Return: 0 on success or negative error on failure
  377. */
  378. int gen8_gmu_wait_for_idle(struct adreno_device *adreno_dev);
  379. /**
  380. * gen8_rscc_sleep_sequence - Trigger rscc sleep sequence
  381. * @adreno_dev: Pointer to the adreno device
  382. *
  383. * Return: 0 on success or negative error on failure
  384. */
  385. int gen8_rscc_sleep_sequence(struct adreno_device *adreno_dev);
  386. /**
  387. * gen8_rscc_wakeup_sequence - Trigger rscc wakeup sequence
  388. * @adreno_dev: Pointer to the adreno device
  389. *
  390. * Return: 0 on success or negative error on failure
  391. */
  392. int gen8_rscc_wakeup_sequence(struct adreno_device *adreno_dev);
  393. /**
  394. * gen8_halt_gbif - Halt CX and GX requests in GBIF
  395. * @adreno_dev: Pointer to the adreno device
  396. *
  397. * Clear any pending GX or CX transactions in GBIF and
  398. * deassert GBIF halt
  399. *
  400. * Return: 0 on success or negative error on failure
  401. */
  402. int gen8_halt_gbif(struct adreno_device *adreno_dev);
  403. /**
  404. * gen8_gmu_remove - Clean up gmu probed resources
  405. * @device: Pointer to the kgsl device
  406. */
  407. void gen8_gmu_remove(struct kgsl_device *device);
  408. /**
  409. * gen8_gmu_enable_clks - Enable gmu clocks
  410. * @adreno_dev: Pointer to the adreno device
  411. * @level: GMU frequency level
  412. *
  413. * Return: 0 on success or negative error on failure
  414. */
  415. int gen8_gmu_enable_clks(struct adreno_device *adreno_dev, u32 level);
  416. /**
  417. * gen8_gmu_handle_watchdog - Handle watchdog interrupt
  418. * @adreno_dev: Pointer to the adreno device
  419. */
  420. void gen8_gmu_handle_watchdog(struct adreno_device *adreno_dev);
  421. /**
  422. * gen8_gmu_send_nmi - Send NMI to GMU
  423. * @device: Pointer to the kgsl device
  424. * @force: Boolean to forcefully send NMI irrespective of GMU state
  425. */
  426. void gen8_gmu_send_nmi(struct kgsl_device *device, bool force);
  427. /**
  428. * gen8_gmu_add_to_minidump - Register gen8_device with va minidump
  429. * @adreno_dev: Pointer to the adreno device
  430. */
  431. int gen8_gmu_add_to_minidump(struct adreno_device *adreno_dev);
  432. /**
  433. * gen8_snapshot_gmu_mem - Snapshot a GMU memory descriptor
  434. * @device: Pointer to the kgsl device
  435. * @buf: Destination snapshot buffer
  436. * @remain: Remaining size of the snapshot buffer
  437. * @priv: Opaque handle
  438. *
  439. * Return: Number of bytes written to snapshot buffer
  440. */
  441. size_t gen8_snapshot_gmu_mem(struct kgsl_device *device,
  442. u8 *buf, size_t remain, void *priv);
  443. /**
  444. * gen8_bus_ab_quantize - Calculate the AB vote that needs to be sent to GMU
  445. * @adreno_dev: Handle to the adreno device
  446. * @ab: ab request that needs to be scaled in MBps
  447. *
  448. * Returns the AB value that needs to be prefixed to bandwidth vote in kbps
  449. */
  450. u32 gen8_bus_ab_quantize(struct adreno_device *adreno_dev, u32 ab);
  451. /**
  452. * gen8_gmu_rpmh_pwr_state_is_active - Check the state of GPU HW
  453. * @device: Pointer to the kgsl device
  454. *
  455. * Returns true on active or false otherwise
  456. */
  457. bool gen8_gmu_rpmh_pwr_state_is_active(struct kgsl_device *device);
  458. #endif