adreno_a6xx.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef _ADRENO_A6XX_H_
  7. #define _ADRENO_A6XX_H_
  8. #include <linux/delay.h>
  9. #include "a6xx_reg.h"
  10. #include "adreno_a6xx_gmu.h"
  11. #include "adreno_a6xx_rgmu.h"
  12. extern const struct adreno_power_ops a6xx_gmu_power_ops;
  13. extern const struct adreno_power_ops a6xx_rgmu_power_ops;
  14. extern const struct adreno_power_ops a630_gmu_power_ops;
  15. extern const struct adreno_power_ops a6xx_hwsched_power_ops;
  16. struct a6xx_gpudev {
  17. struct adreno_gpudev base;
  18. int (*hfi_probe)(struct adreno_device *adreno_dev);
  19. void (*hfi_remove)(struct adreno_device *adreno_dev);
  20. void (*handle_watchdog)(struct adreno_device *adreno_dev);
  21. };
  22. extern const struct a6xx_gpudev adreno_a630_gpudev;
  23. extern const struct a6xx_gpudev adreno_a6xx_gmu_gpudev;
  24. extern const struct a6xx_gpudev adreno_a6xx_hwsched_gpudev;
  25. /**
  26. * struct a6xx_device - Container for the a6xx_device
  27. */
  28. struct a6xx_device {
  29. /** @gmu: Container for the a6xx GMU device */
  30. struct a6xx_gmu_device gmu;
  31. /** @rgmu: Container for the a6xx rGMU device */
  32. struct a6xx_rgmu_device rgmu;
  33. /** @adreno_dev: Container for the generic adreno device */
  34. struct adreno_device adreno_dev;
  35. };
  36. /**
  37. * struct adreno_a6xx_core - a6xx specific GPU core definitions
  38. */
  39. struct adreno_a6xx_core {
  40. /** @base: Container for the generic GPU definitions */
  41. struct adreno_gpu_core base;
  42. /** @gmu_major: The maximum GMU version supported by the core */
  43. u32 gmu_major;
  44. /** @gmu_minor: The minimum GMU version supported by the core */
  45. u32 gmu_minor;
  46. /** @prim_fifo_threshold: target specific value for PC_DBG_ECO_CNTL */
  47. unsigned int prim_fifo_threshold;
  48. /** @sqefw_name: Name of the SQE microcode file */
  49. const char *sqefw_name;
  50. /** @gmufw_name: Name of the GMU firmware file */
  51. const char *gmufw_name;
  52. /** @zap_name: Name of the CPZ zap file */
  53. const char *zap_name;
  54. /** @hwcg: List of registers and values to write for HWCG */
  55. const struct kgsl_regmap_list *hwcg;
  56. /** @hwcg_count: Number of registers in @hwcg */
  57. u32 hwcg_count;
  58. /** @vbif: List of registers and values to write for VBIF */
  59. const struct kgsl_regmap_list *vbif;
  60. /** @vbif_count: Number of registers in @vbif */
  61. u32 vbif_count;
  62. /** @veto_fal10: veto status for fal10 feature */
  63. bool veto_fal10;
  64. /** @pdc_in_aop: True if PDC programmed in AOP */
  65. bool pdc_in_aop;
  66. /** @hang_detect_cycles: Hang detect counter timeout value */
  67. u32 hang_detect_cycles;
  68. /** @protected_regs: Array of protected registers for the target */
  69. const struct adreno_protected_regs *protected_regs;
  70. /** @disable_tseskip: True if TSESkip logic is disabled */
  71. bool disable_tseskip;
  72. /** @gx_cpr_toggle: True to toggle GX CPR FSM to avoid CPR stalls */
  73. bool gx_cpr_toggle;
  74. /** @highest_bank_bit: The bit of the highest DDR bank */
  75. u32 highest_bank_bit;
  76. /** @ctxt_record_size: Size of the preemption record in bytes */
  77. u64 ctxt_record_size;
  78. /** @gmu_hub_clk_freq: Gmu hub interface clock frequency */
  79. u64 gmu_hub_clk_freq;
  80. };
  81. #define SPTPRAC_POWERON_CTRL_MASK 0x00778000
  82. #define SPTPRAC_POWEROFF_CTRL_MASK 0x00778001
  83. #define SPTPRAC_POWEROFF_STATUS_MASK BIT(2)
  84. #define SPTPRAC_POWERON_STATUS_MASK BIT(3)
  85. #define A6XX_RETAIN_FF_ENABLE_ENABLE_MASK BIT(11)
  86. #define CP_CLUSTER_FE 0x0
  87. #define CP_CLUSTER_SP_VS 0x1
  88. #define CP_CLUSTER_PC_VS 0x2
  89. #define CP_CLUSTER_GRAS 0x3
  90. #define CP_CLUSTER_SP_PS 0x4
  91. #define CP_CLUSTER_PS 0x5
  92. #define CP_CLUSTER_VPC_PS 0x6
  93. /**
  94. * struct a6xx_cp_preemption_record - CP context record for
  95. * preemption.
  96. * @magic: (00) Value at this offset must be equal to
  97. * A6XX_CP_CTXRECORD_MAGIC_REF.
  98. * @info: (04) Type of record. Written non-zero (usually) by CP.
  99. * we must set to zero for all ringbuffers.
  100. * @errno: (08) Error code. Initialize this to A6XX_CP_CTXRECORD_ERROR_NONE.
  101. * CP will update to another value if a preemption error occurs.
  102. * @data: (12) DATA field in YIELD and SET_MARKER packets.
  103. * Written by CP when switching out. Not used on switch-in. Initialized to 0.
  104. * @cntl: (16) RB_CNTL, saved and restored by CP. We must initialize this.
  105. * @rptr: (20) RB_RPTR, saved and restored by CP. We must initialize this.
  106. * @wptr: (24) RB_WPTR, saved and restored by CP. We must initialize this.
  107. * @_pad28: (28) Reserved/padding.
  108. * @rptr_addr: (32) RB_RPTR_ADDR_LO|HI saved and restored. We must initialize.
  109. * rbase: (40) RB_BASE_LO|HI saved and restored.
  110. * counter: (48) Pointer to preemption counter.
  111. */
  112. struct a6xx_cp_preemption_record {
  113. uint32_t magic;
  114. uint32_t info;
  115. uint32_t errno;
  116. uint32_t data;
  117. uint32_t cntl;
  118. uint32_t rptr;
  119. uint32_t wptr;
  120. uint32_t _pad28;
  121. uint64_t rptr_addr;
  122. uint64_t rbase;
  123. uint64_t counter;
  124. };
  125. /**
  126. * struct a6xx_cp_smmu_info - CP preemption SMMU info.
  127. * @magic: (00) The value at this offset must be equal to
  128. * A6XX_CP_SMMU_INFO_MAGIC_REF.
  129. * @_pad4: (04) Reserved/padding
  130. * @ttbr0: (08) Base address of the page table for the
  131. * incoming context.
  132. * @context_idr: (16) Context Identification Register value.
  133. */
  134. struct a6xx_cp_smmu_info {
  135. uint32_t magic;
  136. uint32_t _pad4;
  137. uint64_t ttbr0;
  138. uint32_t asid;
  139. uint32_t context_idr;
  140. };
  141. #define A6XX_CP_SMMU_INFO_MAGIC_REF 0x241350D5UL
  142. #define A6XX_CP_CTXRECORD_MAGIC_REF 0xAE399D6EUL
  143. /* Size of each CP preemption record */
  144. #define A6XX_CP_CTXRECORD_SIZE_IN_BYTES (2112 * 1024)
  145. /* Size of the user context record block (in bytes) */
  146. #define A6XX_CP_CTXRECORD_USER_RESTORE_SIZE (192 * 1024)
  147. /* Size of the performance counter save/restore block (in bytes) */
  148. #define A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE (4 * 1024)
  149. #define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
  150. (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
  151. /* Size of the CP_INIT pm4 stream in dwords */
  152. #define A6XX_CP_INIT_DWORDS 11
  153. #define A6XX_INT_MASK \
  154. ((1 << A6XX_INT_CP_AHB_ERROR) | \
  155. (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
  156. (1 << A6XX_INT_RBBM_GPC_ERROR) | \
  157. (1 << A6XX_INT_CP_SW) | \
  158. (1 << A6XX_INT_CP_HW_ERROR) | \
  159. (1 << A6XX_INT_CP_IB2) | \
  160. (1 << A6XX_INT_CP_IB1) | \
  161. (1 << A6XX_INT_CP_RB) | \
  162. (1 << A6XX_INT_CP_CACHE_FLUSH_TS) | \
  163. (1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
  164. (1 << A6XX_INT_RBBM_HANG_DETECT) | \
  165. (1 << A6XX_INT_UCHE_OOB_ACCESS) | \
  166. (1 << A6XX_INT_UCHE_TRAP_INTR) | \
  167. (1 << A6XX_INT_TSB_WRITE_ERROR))
  168. #define A6XX_HWSCHED_INT_MASK \
  169. ((1 << A6XX_INT_CP_AHB_ERROR) | \
  170. (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) | \
  171. (1 << A6XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
  172. (1 << A6XX_INT_UCHE_OOB_ACCESS) | \
  173. (1 << A6XX_INT_UCHE_TRAP_INTR) | \
  174. (1 << A6XX_INT_TSB_WRITE_ERROR))
  175. /**
  176. * to_a6xx_core - return the a6xx specific GPU core struct
  177. * @adreno_dev: An Adreno GPU device handle
  178. *
  179. * Returns:
  180. * A pointer to the a6xx specific GPU core struct
  181. */
  182. static inline const struct adreno_a6xx_core *
  183. to_a6xx_core(struct adreno_device *adreno_dev)
  184. {
  185. const struct adreno_gpu_core *core = adreno_dev->gpucore;
  186. return container_of(core, struct adreno_a6xx_core, base);
  187. }
  188. /* Preemption functions */
  189. void a6xx_preemption_trigger(struct adreno_device *adreno_dev, bool atomic);
  190. void a6xx_preemption_schedule(struct adreno_device *adreno_dev);
  191. void a6xx_preemption_start(struct adreno_device *adreno_dev);
  192. int a6xx_preemption_init(struct adreno_device *adreno_dev);
  193. /**
  194. * a6xx_preemption_post_ibsubmit - Insert commands following a submission
  195. * @adreno_dev: Adreno GPU handle
  196. * @cmds: Pointer to the ringbuffer to insert opcodes
  197. *
  198. * Return: The number of dwords written to @cmds
  199. */
  200. u32 a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev, u32 *cmds);
  201. /**
  202. * a6xx_preemption_post_ibsubmit - Insert opcodes before a submission
  203. * @adreno_dev: Adreno GPU handle
  204. * @rb: The ringbuffer being written
  205. * @drawctxt: The draw context being written
  206. * @cmds: Pointer to the ringbuffer to insert opcodes
  207. *
  208. * Return: The number of dwords written to @cmds
  209. */
  210. u32 a6xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev,
  211. struct adreno_ringbuffer *rb, struct adreno_context *drawctxt,
  212. u32 *cmds);
  213. unsigned int a6xx_set_marker(unsigned int *cmds,
  214. enum adreno_cp_marker_type type);
  215. void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit);
  216. int a6xx_preemption_context_init(struct kgsl_context *context);
  217. void a6xx_preemption_context_destroy(struct kgsl_context *context);
  218. void a6xx_snapshot(struct adreno_device *adreno_dev,
  219. struct kgsl_snapshot *snapshot);
  220. void a6xx_crashdump_init(struct adreno_device *adreno_dev);
  221. int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev);
  222. void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev);
  223. bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev);
  224. bool a619_holi_gx_is_on(struct adreno_device *adreno_dev);
  225. /**
  226. * a6xx_read_alwayson - Read the current always on clock value
  227. * @adreno_dev: An Adreno GPU handle
  228. *
  229. * Return: The current value of the GMU always on counter
  230. */
  231. u64 a6xx_read_alwayson(struct adreno_device *adreno_dev);
  232. /**
  233. * a6xx_start - Program a6xx registers
  234. * @adreno_dev: An Adreno GPU handle
  235. *
  236. * This function does all a6xx register programming every
  237. * time we boot the gpu
  238. */
  239. void a6xx_start(struct adreno_device *adreno_dev);
  240. /**
  241. * a6xx_init - Initialize a6xx resources
  242. * @adreno_dev: An Adreno GPU handle
  243. *
  244. * This function does a6xx specific one time initialization
  245. * and is invoked when the very first client opens a
  246. * kgsl instance
  247. *
  248. * Return: Zero on success and negative error on failure
  249. */
  250. int a6xx_init(struct adreno_device *adreno_dev);
  251. /**
  252. * a6xx_rb_start - A6xx specific ringbuffer setup
  253. * @adreno_dev: An Adreno GPU handle
  254. *
  255. * This function does a6xx specific ringbuffer setup and
  256. * attempts to submit CP INIT and bring GPU out of secure mode
  257. *
  258. * Return: Zero on success and negative error on failure
  259. */
  260. int a6xx_rb_start(struct adreno_device *adreno_dev);
  261. /**
  262. * a6xx_microcode_read - Get the cp microcode from the filesystem
  263. * @adreno_dev: An Adreno GPU handle
  264. *
  265. * This function gets the firmware from filesystem and sets up
  266. * the micorocode global buffer
  267. *
  268. * Return: Zero on success and negative error on failure
  269. */
  270. int a6xx_microcode_read(struct adreno_device *adreno_dev);
  271. /**
  272. * a6xx_probe_common - Probe common a6xx resources
  273. * @pdev: Pointer to the platform device
  274. * @adreno_dev: Pointer to the adreno device
  275. * @chipid: Chipid of the target
  276. * @gpucore: Pointer to the gpucore strucure
  277. *
  278. * This function sets up the a6xx resources common across all
  279. * a6xx targets
  280. */
  281. int a6xx_probe_common(struct platform_device *pdev,
  282. struct adreno_device *adreno_dev, u32 chipid,
  283. const struct adreno_gpu_core *gpucore);
  284. /**
  285. * a6xx_hw_isidle - Check whether a6xx gpu is idle or not
  286. * @adreno_dev: An Adreno GPU handle
  287. *
  288. * Return: True if gpu is idle, otherwise false
  289. */
  290. bool a6xx_hw_isidle(struct adreno_device *adreno_dev);
  291. /**
  292. * a6xx_spin_idle_debug - Debug logging used when gpu fails to idle
  293. * @adreno_dev: An Adreno GPU handle
  294. *
  295. * This function logs interesting registers and triggers a snapshot
  296. */
  297. void a6xx_spin_idle_debug(struct adreno_device *adreno_dev,
  298. const char *str);
  299. /**
  300. * a6xx_perfcounter_update - Update the IFPC perfcounter list
  301. * @adreno_dev: An Adreno GPU handle
  302. * @reg: Perfcounter reg struct to add/remove to the list
  303. * @update_reg: true if the perfcounter needs to be programmed by the CPU
  304. *
  305. * Return: 0 on success or -EBUSY if the lock couldn't be taken
  306. */
  307. int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
  308. struct adreno_perfcount_register *reg, bool update_reg);
  309. /*
  310. * a6xx_ringbuffer_init - Initialize the ringbuffers
  311. * @adreno_dev: An Adreno GPU handle
  312. *
  313. * Initialize the ringbuffer(s) for a6xx.
  314. * Return: 0 on success or negative on failure
  315. */
  316. int a6xx_ringbuffer_init(struct adreno_device *adreno_dev);
  317. extern const struct adreno_perfcounters adreno_a630_perfcounters;
  318. extern const struct adreno_perfcounters adreno_a6xx_perfcounters;
  319. extern const struct adreno_perfcounters adreno_a6xx_legacy_perfcounters;
  320. extern const struct adreno_perfcounters adreno_a6xx_hwsched_perfcounters;
  321. /**
  322. * a6xx_rdpm_mx_freq_update - Update the mx frequency
  323. * @gmu: An Adreno GMU handle
  324. * @freq: Frequency in KHz
  325. *
  326. * This function communicates GPU mx frequency(in Mhz) changes to rdpm.
  327. */
  328. void a6xx_rdpm_mx_freq_update(struct a6xx_gmu_device *gmu, u32 freq);
  329. /**
  330. * a6xx_rdpm_cx_freq_update - Update the cx frequency
  331. * @gmu: An Adreno GMU handle
  332. * @freq: Frequency in KHz
  333. *
  334. * This function communicates GPU cx frequency(in Mhz) changes to rdpm.
  335. */
  336. void a6xx_rdpm_cx_freq_update(struct a6xx_gmu_device *gmu, u32 freq);
  337. /**
  338. * a6xx_ringbuffer_addcmds - Submit a command to the ringbuffer
  339. * @adreno_dev: An Adreno GPU handle
  340. * @rb: Pointer to the ringbuffer to submit on
  341. * @drawctxt: Pointer to the draw context for the submission, or NULL for
  342. * internal submissions
  343. * @flags: Flags for the submission
  344. * @in: Commands to write to the ringbuffer
  345. * @dwords: Size of @in (in dwords)
  346. * @timestamp: Timestamp for the submission
  347. * @time: Optional pointer to a submit time structure
  348. *
  349. * Submit a command to the ringbuffer.
  350. * Return: 0 on success or negative on failure
  351. */
  352. int a6xx_ringbuffer_addcmds(struct adreno_device *adreno_dev,
  353. struct adreno_ringbuffer *rb, struct adreno_context *drawctxt,
  354. u32 flags, u32 *in, u32 dwords, u32 timestamp,
  355. struct adreno_submit_time *time);
  356. /**
  357. * a6xx_ringbuffer_submitcmd - Submit a user command to the ringbuffer
  358. * @adreno_dev: An Adreno GPU handle
  359. * @cmdobj: Pointer to a user command object
  360. * @flags: Internal submit flags
  361. * @time: Optional pointer to a adreno_submit_time container
  362. *
  363. * Return: 0 on success or negative on failure
  364. */
  365. int a6xx_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
  366. struct kgsl_drawobj_cmd *cmdobj, u32 flags,
  367. struct adreno_submit_time *time);
  368. int a6xx_fenced_write(struct adreno_device *adreno_dev, u32 offset,
  369. u32 value, u32 mask);
  370. int a6xx_ringbuffer_submit(struct adreno_ringbuffer *rb,
  371. struct adreno_submit_time *time, bool sync);
  372. void a6xx_cp_init_cmds(struct adreno_device *adreno_dev, u32 *cmds);
  373. int a6xx_gmu_hfi_probe(struct adreno_device *adreno_dev);
  374. static inline const struct a6xx_gpudev *
  375. to_a6xx_gpudev(const struct adreno_gpudev *gpudev)
  376. {
  377. return container_of(gpudev, struct a6xx_gpudev, base);
  378. }
  379. /**
  380. * a6xx_reset_preempt_records - Reset the preemption buffers
  381. * @adreno_dev: Handle to the adreno device
  382. *
  383. * Reset the preemption records at the time of hard reset
  384. */
  385. void a6xx_reset_preempt_records(struct adreno_device *adreno_dev);
  386. /**
  387. * a6xx_irq_pending - Check if there is any gpu irq pending
  388. * @adreno_dev: Handle to the adreno device
  389. *
  390. * Return true if there is any gpu irq pending
  391. */
  392. bool a6xx_irq_pending(struct adreno_device *adreno_dev);
  393. #ifdef CONFIG_QCOM_KGSL_CORESIGHT
  394. void a6xx_coresight_init(struct adreno_device *device);
  395. #else
  396. static inline void a6xx_coresight_init(struct adreno_device *device) { }
  397. #endif
  398. #endif