adreno_hfi.h 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef __ADRENO_HFI_H
  7. #define __ADRENO_HFI_H
  8. #include "kgsl_util.h"
  9. #define HW_FENCE_QUEUE_SIZE SZ_4K
  10. #define HFI_QUEUE_SIZE SZ_4K /* bytes, must be base 4dw */
  11. #define MAX_RCVD_PAYLOAD_SIZE 16 /* dwords */
  12. #define MAX_RCVD_SIZE (MAX_RCVD_PAYLOAD_SIZE + 3) /* dwords */
  13. #define HFI_MAX_MSG_SIZE (SZ_1K)
  14. #define HFI_CMD_ID 0
  15. #define HFI_MSG_ID 1
  16. #define HFI_DBG_ID 2
  17. #define HFI_DSP_ID_0 3
  18. #define HFI_CMD_IDX 0
  19. #define HFI_MSG_IDX 1
  20. #define HFI_DBG_IDX 2
  21. #define HFI_DSP_IDX_BASE 3
  22. #define HFI_DSP_IDX_0 3
  23. #define HFI_CMD_IDX_LEGACY 0
  24. #define HFI_DSP_IDX_0_LEGACY 1
  25. #define HFI_MSG_IDX_LEGACY 4
  26. #define HFI_DBG_IDX_LEGACY 5
  27. #define HFI_QUEUE_STATUS_DISABLED 0
  28. #define HFI_QUEUE_STATUS_ENABLED 1
  29. /* HTOF queue priority, 1 is highest priority */
  30. #define HFI_CMD_PRI 10
  31. #define HFI_MSG_PRI 10
  32. #define HFI_DBG_PRI 40
  33. #define HFI_DSP_PRI_0 20
  34. #define HFI_IRQ_SIDEMSGQ_MASK BIT(1)
  35. #define HFI_IRQ_DBGQ_MASK BIT(2)
  36. #define HFI_IRQ_CM3_FAULT_MASK BIT(15)
  37. #define HFI_IRQ_OOB_MASK GENMASK(31, 16)
  38. #define HFI_IRQ_MASK (HFI_IRQ_SIDEMSGQ_MASK |\
  39. HFI_IRQ_DBGQ_MASK |\
  40. HFI_IRQ_CM3_FAULT_MASK)
  41. #define DCVS_ACK_NONBLOCK 0
  42. #define DCVS_ACK_BLOCK 1
  43. #define HFI_FEATURE_DCVS 0
  44. #define HFI_FEATURE_HWSCHED 1
  45. #define HFI_FEATURE_PREEMPTION 2
  46. #define HFI_FEATURE_CLOCKS_ON 3
  47. #define HFI_FEATURE_BUS_ON 4
  48. #define HFI_FEATURE_RAIL_ON 5
  49. #define HFI_FEATURE_HWCG 6
  50. #define HFI_FEATURE_LM 7
  51. #define HFI_FEATURE_THROTTLE 8
  52. #define HFI_FEATURE_IFPC 9
  53. #define HFI_FEATURE_NAP 10
  54. #define HFI_FEATURE_BCL 11
  55. #define HFI_FEATURE_ACD 12
  56. #define HFI_FEATURE_DIDT 13
  57. #define HFI_FEATURE_DEPRECATED 14
  58. #define HFI_FEATURE_CB 15
  59. #define HFI_FEATURE_KPROF 16
  60. #define HFI_FEATURE_BAIL_OUT_TIMER 17
  61. #define HFI_FEATURE_GMU_STATS 18
  62. #define HFI_FEATURE_DBQ 19
  63. #define HFI_FEATURE_MINBW 20
  64. #define HFI_FEATURE_CLX 21
  65. #define HFI_FEATURE_LSR 23
  66. #define HFI_FEATURE_LPAC 24
  67. #define HFI_FEATURE_HW_FENCE 25
  68. #define HFI_FEATURE_PERF_NORETAIN 26
  69. #define HFI_FEATURE_DMS 27
  70. #define HFI_FEATURE_AQE 29
  71. /* Types to be used with H2F_MSG_TABLE */
  72. enum hfi_table_type {
  73. HFI_TABLE_BW_VOTE = 0,
  74. HFI_TABLE_GPU_PERF = 1,
  75. HFI_TABLE_DIDT = 2,
  76. HFI_TABLE_ACD = 3,
  77. HFI_TABLE_CLX_V1 = 4,
  78. HFI_TABLE_CLX_V2 = 5,
  79. HFI_TABLE_THERM = 6,
  80. HFI_TABLE_DCVS_DATA = 7,
  81. HFI_TABLE_MAX,
  82. };
  83. /* A6xx uses a different value for KPROF */
  84. #define HFI_FEATURE_A6XX_KPROF 14
  85. /* For Gen7 & Gen8 ACD */
  86. #define F_PWR_ACD_CALIBRATE 78
  87. #define HFI_VALUE_FT_POLICY 100
  88. #define HFI_VALUE_RB_MAX_CMDS 101
  89. #define HFI_VALUE_CTX_MAX_CMDS 102
  90. #define HFI_VALUE_ADDRESS 103
  91. #define HFI_VALUE_MAX_GPU_PERF_INDEX 104
  92. #define HFI_VALUE_MIN_GPU_PERF_INDEX 105
  93. #define HFI_VALUE_MAX_BW_PERF_INDEX 106
  94. #define HFI_VALUE_MIN_BW_PERF_INDEX 107
  95. #define HFI_VALUE_MAX_GPU_THERMAL_INDEX 108
  96. #define HFI_VALUE_GPUCLK 109
  97. #define HFI_VALUE_CLK_TIME 110
  98. #define HFI_VALUE_LOG_GROUP 111
  99. #define HFI_VALUE_LOG_EVENT_ON 112
  100. #define HFI_VALUE_LOG_EVENT_OFF 113
  101. #define HFI_VALUE_DCVS_OBJ 114
  102. #define HFI_VALUE_LM_CS0 115
  103. #define HFI_VALUE_DBG 116
  104. #define HFI_VALUE_BIN_TIME 117
  105. #define HFI_VALUE_LOG_STREAM_ENABLE 119
  106. #define HFI_VALUE_PREEMPT_COUNT 120
  107. #define HFI_VALUE_CONTEXT_QUEUE 121
  108. #define HFI_VALUE_GMU_AB_VOTE 122
  109. #define HFI_VALUE_RB_GPU_QOS 123
  110. #define HFI_VALUE_RB_IB_RULE 124
  111. #define HFI_VALUE_GMU_WARMBOOT 125
  112. #define HFI_VALUE_GLOBAL_TOKEN 0xFFFFFFFF
  113. #define HFI_CTXT_FLAG_PMODE BIT(0)
  114. #define HFI_CTXT_FLAG_SWITCH_INTERNAL BIT(1)
  115. #define HFI_CTXT_FLAG_SWITCH BIT(3)
  116. #define HFI_CTXT_FLAG_NOTIFY BIT(5)
  117. #define HFI_CTXT_FLAG_NO_FAULT_TOLERANCE BIT(9)
  118. #define HFI_CTXT_FLAG_PWR_RULE BIT(11)
  119. #define HFI_CTXT_FLAG_PRIORITY_MASK GENMASK(15, 12)
  120. #define HFI_CTXT_FLAG_IFH_NOP BIT(16)
  121. #define HFI_CTXT_FLAG_SECURE BIT(17)
  122. #define HFI_CTXT_FLAG_TYPE_MASK GENMASK(24, 20)
  123. #define HFI_CTXT_FLAG_TYPE_ANY 0
  124. #define HFI_CTXT_FLAG_TYPE_GL 1
  125. #define HFI_CTXT_FLAG_TYPE_CL 2
  126. #define HFI_CTXT_FLAG_TYPE_C2D 3
  127. #define HFI_CTXT_FLAG_TYPE_RS 4
  128. #define HFI_CTXT_FLAG_TYPE_VK 5
  129. #define HFI_CTXT_FLAG_TYPE_UNKNOWN 0x1e
  130. #define HFI_CTXT_FLAG_PREEMPT_STYLE_MASK GENMASK(27, 25)
  131. #define HFI_CTXT_FLAG_PREEMPT_STYLE_ANY 0
  132. #define HFI_CTXT_FLAG_PREEMPT_STYLE_RB 1
  133. #define HFI_CTXT_FLAG_PREEMPT_STYLE_FG 2
  134. /* Default sampling interval in units of 50 us */
  135. #define HFI_FEATURE_GMU_STATS_INTERVAL 4
  136. enum hfi_mem_kind {
  137. /** @HFI_MEMKIND_GENERIC: Used for requesting generic memory */
  138. HFI_MEMKIND_GENERIC = 0,
  139. /** @HFI_MEMKIND_RB: Used for requesting ringbuffer memory */
  140. HFI_MEMKIND_RB,
  141. /** @HFI_MEMKIND_SCRATCH: Used for requesting scratch memory */
  142. HFI_MEMKIND_SCRATCH,
  143. /**
  144. * @HFI_MEMKIND_CSW_SMMU_INFO: Used for requesting SMMU record for
  145. * preemption context switching
  146. */
  147. HFI_MEMKIND_CSW_SMMU_INFO,
  148. /**
  149. * @HFI_MEMKIND_CSW_PRIV_NON_SECURE: Used for requesting privileged non
  150. * secure preemption records
  151. */
  152. HFI_MEMKIND_CSW_PRIV_NON_SECURE,
  153. /**
  154. * @HFI_MEMKIND_CSW_PRIV_SECURE: Used for requesting privileged secure
  155. * preemption records
  156. */
  157. HFI_MEMKIND_CSW_PRIV_SECURE,
  158. /**
  159. * @HFI_MEMKIND_CSW_NON_PRIV: Used for requesting non privileged per
  160. * context preemption buffer
  161. */
  162. HFI_MEMKIND_CSW_NON_PRIV,
  163. /**
  164. * @HFI_MEMKIND_CSW_COUNTER: Used for requesting preemption performance
  165. * counter save/restore buffer
  166. */
  167. HFI_MEMKIND_CSW_COUNTER,
  168. /**
  169. * @HFI_MEMKIND_CTXTREC_PREEMPT_CNTR: Used for requesting preemption
  170. * counter buffer
  171. */
  172. HFI_MEMKIND_CTXTREC_PREEMPT_CNTR,
  173. /** @HFI_MEMKIND_SYSLOG: Used for requesting system log memory */
  174. HFI_MEMKIND_SYS_LOG,
  175. /** @HFI_MEMKIND_CRASH_DUMP: Used for requesting carsh dumper memory */
  176. HFI_MEMKIND_CRASH_DUMP,
  177. /**
  178. * @HFI_MEMKIND_MMIO_DPU: Used for requesting Display processing unit's
  179. * register space
  180. */
  181. HFI_MEMKIND_MMIO_DPU,
  182. /**
  183. * @HFI_MEMKIND_MMIO_TCSR: Used for requesting Top CSR(contains SoC
  184. * doorbells) register space
  185. */
  186. HFI_MEMKIND_MMIO_TCSR,
  187. /**
  188. * @HFI_MEMKIND_MMIO_QDSS_STM: Used for requesting QDSS STM register
  189. * space
  190. */
  191. HFI_MEMKIND_MMIO_QDSS_STM,
  192. /** @HFI_MEMKIND_PROFILE: Used for kernel profiling */
  193. HFI_MEMKIND_PROFILE,
  194. /** @HFI_MEMKIND_USER_PROFILING_IBS: Used for user profiling */
  195. HFI_MEMKIND_USER_PROFILE_IBS,
  196. /** @MEMKIND_CMD_BUFFER: Used for composing ringbuffer content */
  197. HFI_MEMKIND_CMD_BUFFER,
  198. /**
  199. * @HFI_MEMKIND_GPU_BUSY_DATA_BUFFER: Used for GPU busy buffer for
  200. * all the contexts
  201. */
  202. HFI_MEMKIND_GPU_BUSY_DATA_BUFFER,
  203. /** @HFI_MEMKIND_GPU_BUSY_CMD_BUFFER: Used for GPU busy cmd buffer
  204. * (Only readable to GPU)
  205. */
  206. HFI_MEMKIND_GPU_BUSY_CMD_BUFFER,
  207. /**
  208. *@MEMKIND_MMIO_IPC_CORE: Used for IPC_core region mapping to GMU space
  209. * for EVA to GPU communication.
  210. */
  211. HFI_MEMKIND_MMIO_IPC_CORE,
  212. /** @HFIMEMKIND_MMIO_IPCC_AOSS: Used for IPCC AOSS, second memory region */
  213. HFI_MEMKIND_MMIO_IPCC_AOSS,
  214. /**
  215. * @MEMKIND_CSW_LPAC_PRIV_NON_SECURE: Used for privileged nonsecure
  216. * memory for LPAC context record
  217. */
  218. HFI_MEMKIND_CSW_LPAC_PRIV_NON_SECURE,
  219. /** @HFI_MEMKIND_MEMSTORE: Buffer used to query a context's GPU sop/eop timestamps */
  220. HFI_MEMKIND_MEMSTORE,
  221. /** @HFI_MEMKIND_HW_FENCE: Hardware fence Tx/Rx headers and queues */
  222. HFI_MEMKIND_HW_FENCE,
  223. /** @HFI_MEMKIND_PREEMPT_SCRATCH: Used for Preemption scratch memory */
  224. HFI_MEMKIND_PREEMPT_SCRATCH,
  225. /**
  226. * @HFI_MEMKIND_AQE_BUFFER: Sandbox memory used by AQE to switch
  227. * between LPAC and GC
  228. */
  229. HFI_MEMKIND_AQE_BUFFER,
  230. HFI_MEMKIND_MAX,
  231. };
  232. static const char * const hfi_memkind_strings[] = {
  233. [HFI_MEMKIND_GENERIC] = "GMU GENERIC",
  234. [HFI_MEMKIND_RB] = "GMU RB",
  235. [HFI_MEMKIND_SCRATCH] = "GMU SCRATCH",
  236. [HFI_MEMKIND_CSW_SMMU_INFO] = "GMU SMMU INFO",
  237. [HFI_MEMKIND_CSW_PRIV_NON_SECURE] = "GMU CSW PRIV NON SECURE",
  238. [HFI_MEMKIND_CSW_PRIV_SECURE] = "GMU CSW PRIV SECURE",
  239. [HFI_MEMKIND_CSW_NON_PRIV] = "GMU CSW NON PRIV",
  240. [HFI_MEMKIND_CSW_COUNTER] = "GMU CSW COUNTER",
  241. [HFI_MEMKIND_CTXTREC_PREEMPT_CNTR] = "GMU PREEMPT CNTR",
  242. [HFI_MEMKIND_SYS_LOG] = "GMU SYS LOG",
  243. [HFI_MEMKIND_CRASH_DUMP] = "GMU CRASHDUMP",
  244. [HFI_MEMKIND_MMIO_DPU] = "GMU MMIO DPU",
  245. [HFI_MEMKIND_MMIO_TCSR] = "GMU MMIO TCSR",
  246. [HFI_MEMKIND_MMIO_QDSS_STM] = "GMU MMIO QDSS STM",
  247. [HFI_MEMKIND_PROFILE] = "GMU KERNEL PROFILING",
  248. [HFI_MEMKIND_USER_PROFILE_IBS] = "GMU USER PROFILING",
  249. [HFI_MEMKIND_CMD_BUFFER] = "GMU CMD BUFFER",
  250. [HFI_MEMKIND_GPU_BUSY_DATA_BUFFER] = "GMU BUSY DATA BUFFER",
  251. [HFI_MEMKIND_GPU_BUSY_CMD_BUFFER] = "GMU BUSY CMD BUFFER",
  252. [HFI_MEMKIND_MMIO_IPC_CORE] = "GMU MMIO IPC",
  253. [HFI_MEMKIND_MMIO_IPCC_AOSS] = "GMU MMIO IPCC AOSS",
  254. [HFI_MEMKIND_CSW_LPAC_PRIV_NON_SECURE] = "GMU CSW LPAC PRIV NON SECURE",
  255. [HFI_MEMKIND_MEMSTORE] = "GMU MEMSTORE",
  256. [HFI_MEMKIND_HW_FENCE] = "GMU HW FENCE",
  257. [HFI_MEMKIND_PREEMPT_SCRATCH] = "GMU PREEMPTION",
  258. [HFI_MEMKIND_AQE_BUFFER] = "GMU AQE BUFFER",
  259. [HFI_MEMKIND_MAX] = "GMU UNKNOWN",
  260. };
  261. /* CP/GFX pipeline can access */
  262. #define HFI_MEMFLAG_GFX_ACC BIT(0)
  263. /* Buffer has APRIV protection in GFX PTEs */
  264. #define HFI_MEMFLAG_GFX_PRIV BIT(1)
  265. /* Buffer is read-write for GFX PTEs. A 0 indicates read-only */
  266. #define HFI_MEMFLAG_GFX_WRITEABLE BIT(2)
  267. /* GMU can access */
  268. #define HFI_MEMFLAG_GMU_ACC BIT(3)
  269. /* Buffer has APRIV protection in GMU PTEs */
  270. #define HFI_MEMFLAG_GMU_PRIV BIT(4)
  271. /* Buffer is read-write for GMU PTEs. A 0 indicates read-only */
  272. #define HFI_MEMFLAG_GMU_WRITEABLE BIT(5)
  273. /* Buffer is located in GMU's non-cached bufferable VA range */
  274. #define HFI_MEMFLAG_GMU_BUFFERABLE BIT(6)
  275. /* Buffer is located in GMU's cacheable VA range */
  276. #define HFI_MEMFLAG_GMU_CACHEABLE BIT(7)
  277. /* Host can access */
  278. #define HFI_MEMFLAG_HOST_ACC BIT(8)
  279. /* Host initializes(zero-init) the buffer */
  280. #define HFI_MEMFLAG_HOST_INIT BIT(9)
  281. /* Gfx buffer needs to be secure */
  282. #define HFI_MEMFLAG_GFX_SECURE BIT(12)
  283. /**
  284. * struct hfi_queue_table_header - HFI queue table structure
  285. * @version: HFI protocol version
  286. * @size: queue table size in dwords
  287. * @qhdr0_offset: first queue header offset (dwords) in this table
  288. * @qhdr_size: queue header size
  289. * @num_q: number of queues defined in this table
  290. * @num_active_q: number of active queues
  291. */
  292. struct hfi_queue_table_header {
  293. u32 version;
  294. u32 size;
  295. u32 qhdr0_offset;
  296. u32 qhdr_size;
  297. u32 num_q;
  298. u32 num_active_q;
  299. } __packed;
  300. /**
  301. * struct gmu_context_queue_header - GMU context queue header structure
  302. */
  303. struct gmu_context_queue_header {
  304. /** @version: Version of the header */
  305. u32 version;
  306. /** @start_addr: GMU VA of start of the queue */
  307. u32 start_addr;
  308. /** @queue_size: queue size in dwords */
  309. u32 queue_size;
  310. /** @out_fence_ts: Timestamp of last hardware fence sent to Tx Queue */
  311. volatile u32 out_fence_ts;
  312. /** @sync_obj_ts: Timestamp of last sync object that GMU has digested */
  313. volatile u32 sync_obj_ts;
  314. /** @read_index: Read index of the queue */
  315. volatile u32 read_index;
  316. /** @write_index: Write index of the queue */
  317. volatile u32 write_index;
  318. /**
  319. * @hw_fence_buffer_va: GMU VA of the buffer to store output hardware fences for this
  320. * context
  321. */
  322. u32 hw_fence_buffer_va;
  323. /**
  324. * @hw_fence_buffer_size: Size of the buffer to store output hardware fences for this
  325. * context
  326. */
  327. u32 hw_fence_buffer_size;
  328. u32 unused1;
  329. u32 unused2;
  330. u32 unused3;
  331. } __packed;
  332. /**
  333. * struct hfi_queue_header - HFI queue header structure
  334. * @status: active: 1; inactive: 0
  335. * @start_addr: starting address of the queue in GMU VA space
  336. * @type: queue type encoded the priority, ID and send/recevie types
  337. * @queue_size: size of the queue
  338. * @msg_size: size of the message if each message has fixed size.
  339. * Otherwise, 0 means variable size of message in the queue.
  340. * @read_index: read index of the queue
  341. * @write_index: write index of the queue
  342. */
  343. struct hfi_queue_header {
  344. u32 status;
  345. u32 start_addr;
  346. u32 type;
  347. u32 queue_size;
  348. u32 msg_size;
  349. u32 unused0;
  350. u32 unused1;
  351. u32 unused2;
  352. u32 unused3;
  353. u32 unused4;
  354. volatile u32 read_index;
  355. volatile u32 write_index;
  356. } __packed;
  357. #define HFI_MSG_CMD 0 /* V1 and V2 */
  358. #define HFI_MSG_ACK 1 /* V2 only */
  359. /* Used to NOP a command when executing warmboot sequence */
  360. #define HFI_MSG_NOP BIT(18)
  361. /* Used to record a command when executing coldboot sequence */
  362. #define HFI_MSG_RECORD BIT(19)
  363. #define HFI_V1_MSG_POST 1 /* V1 only */
  364. #define HFI_V1_MSG_ACK 2/* V1 only */
  365. #define MSG_HDR_SET_SIZE(hdr, size) \
  366. (((size & 0xFF) << 8) | hdr)
  367. #define CREATE_MSG_HDR(id, type) \
  368. (((type) << 16) | ((id) & 0xFF))
  369. #define ACK_MSG_HDR(id) CREATE_MSG_HDR(id, HFI_MSG_ACK)
  370. #define HFI_QUEUE_DEFAULT_CNT 3
  371. #define HFI_QUEUE_DISPATCH_MAX_CNT 14
  372. #define HFI_QUEUE_HDR_MAX (HFI_QUEUE_DEFAULT_CNT + HFI_QUEUE_DISPATCH_MAX_CNT)
  373. struct hfi_queue_table {
  374. struct hfi_queue_table_header qtbl_hdr;
  375. struct hfi_queue_header qhdr[HFI_QUEUE_HDR_MAX];
  376. } __packed;
  377. #define HFI_QUEUE_OFFSET(i) \
  378. (ALIGN(sizeof(struct hfi_queue_table), SZ_16) + \
  379. ((i) * HFI_QUEUE_SIZE))
  380. #define GMU_QUEUE_START_ADDR(gmuaddr, i) \
  381. (gmuaddr + HFI_QUEUE_OFFSET(i))
  382. #define HOST_QUEUE_START_ADDR(hfi_mem, i) \
  383. ((hfi_mem)->hostptr + HFI_QUEUE_OFFSET(i))
  384. #define MSG_HDR_GET_ID(hdr) ((hdr) & 0xFF)
  385. #define MSG_HDR_GET_SIZE(hdr) (((hdr) >> 8) & 0xFF)
  386. #define MSG_HDR_GET_TYPE(hdr) (((hdr) >> 16) & 0xF)
  387. #define MSG_HDR_GET_SEQNUM(hdr) (((hdr) >> 20) & 0xFFF)
  388. /* Clear the HFI_MSG_RECORD bit from both headers since some acks may have it set, and some not. */
  389. #define CMP_HFI_ACK_HDR(sent, rcvd) ((sent &= ~HFI_MSG_RECORD) == (rcvd &= ~HFI_MSG_RECORD))
  390. #define MSG_HDR_SET_SEQNUM(hdr, num) \
  391. (((hdr) & 0xFFFFF) | ((num) << 20))
  392. #define MSG_HDR_SET_SEQNUM_SIZE(hdr, seqnum, sizedwords) \
  393. (FIELD_PREP(GENMASK(31, 20), seqnum) | FIELD_PREP(GENMASK(15, 8), sizedwords) | hdr)
  394. #define MSG_HDR_SET_TYPE(hdr, type) \
  395. (((hdr) & 0xFFFFF) | ((type) << 16))
  396. #define QUEUE_HDR_TYPE(id, prio, rtype, stype) \
  397. (((id) & 0xFF) | (((prio) & 0xFF) << 8) | \
  398. (((rtype) & 0xFF) << 16) | (((stype) & 0xFF) << 24))
  399. #define HFI_RSP_TIMEOUT 1000 /* msec */
  400. #define HFI_IRQ_MSGQ_MASK BIT(0)
  401. enum hfi_msg_type {
  402. H2F_MSG_INIT = 0,
  403. H2F_MSG_FW_VER = 1,
  404. H2F_MSG_LM_CFG = 2,
  405. H2F_MSG_BW_VOTE_TBL = 3,
  406. H2F_MSG_PERF_TBL = 4,
  407. H2F_MSG_TEST = 5,
  408. H2F_MSG_ACD_TBL = 7,
  409. H2F_MSG_CLX_TBL = 8,
  410. H2F_MSG_START = 10,
  411. H2F_MSG_FEATURE_CTRL = 11,
  412. H2F_MSG_GET_VALUE = 12,
  413. H2F_MSG_SET_VALUE = 13,
  414. H2F_MSG_CORE_FW_START = 14,
  415. H2F_MSG_TABLE = 15,
  416. F2H_MSG_MEM_ALLOC = 20,
  417. H2F_MSG_GX_BW_PERF_VOTE = 30,
  418. H2F_MSG_FW_HALT = 32,
  419. H2F_MSG_PREPARE_SLUMBER = 33,
  420. F2H_MSG_ERR = 100,
  421. F2H_MSG_DEBUG = 101,
  422. F2H_MSG_LOG_BLOCK = 102,
  423. F2H_MSG_GMU_CNTR_REGISTER = 110,
  424. F2H_MSG_GMU_CNTR_RELEASE = 111,
  425. F2H_MSG_ACK = 126, /* Deprecated for v2.0*/
  426. H2F_MSG_ACK = 127, /* Deprecated for v2.0*/
  427. H2F_MSG_REGISTER_CONTEXT = 128,
  428. H2F_MSG_UNREGISTER_CONTEXT = 129,
  429. H2F_MSG_ISSUE_CMD = 130,
  430. H2F_MSG_ISSUE_CMD_RAW = 131,
  431. H2F_MSG_TS_NOTIFY = 132,
  432. F2H_MSG_TS_RETIRE = 133,
  433. H2F_MSG_CONTEXT_POINTERS = 134,
  434. H2F_MSG_ISSUE_LPAC_CMD_RAW = 135,
  435. H2F_MSG_CONTEXT_RULE = 140, /* AKA constraint */
  436. H2F_MSG_ISSUE_RECURRING_CMD = 141,
  437. F2H_MSG_CONTEXT_BAD = 150,
  438. H2F_MSG_HW_FENCE_INFO = 151,
  439. H2F_MSG_ISSUE_SYNCOBJ = 152,
  440. F2H_MSG_SYNCOBJ_QUERY = 153,
  441. H2F_MSG_WARMBOOT_CMD = 154,
  442. F2H_MSG_PROCESS_TRACE = 155,
  443. HFI_MAX_ID,
  444. };
  445. enum gmu_ret_type {
  446. GMU_SUCCESS = 0,
  447. GMU_ERROR_FATAL,
  448. GMU_ERROR_MEM_FAIL,
  449. GMU_ERROR_INVAL_PARAM,
  450. GMU_ERROR_NULL_PTR,
  451. GMU_ERROR_OUT_OF_BOUNDS,
  452. GMU_ERROR_TIMEOUT,
  453. GMU_ERROR_NOT_SUPPORTED,
  454. GMU_ERROR_NO_ENTRY,
  455. };
  456. /* H2F */
  457. struct hfi_gmu_init_cmd {
  458. u32 hdr;
  459. u32 seg_id;
  460. u32 dbg_buffer_addr;
  461. u32 dbg_buffer_size;
  462. u32 boot_state;
  463. } __packed;
  464. /* H2F */
  465. struct hfi_fw_version_cmd {
  466. u32 hdr;
  467. u32 supported_ver;
  468. } __packed;
  469. /* H2F */
  470. struct hfi_bwtable_cmd {
  471. u32 hdr;
  472. u32 bw_level_num;
  473. u32 cnoc_cmds_num;
  474. u32 ddr_cmds_num;
  475. u32 cnoc_wait_bitmask;
  476. u32 ddr_wait_bitmask;
  477. u32 cnoc_cmd_addrs[MAX_CNOC_CMDS];
  478. u32 cnoc_cmd_data[MAX_CNOC_LEVELS][MAX_CNOC_CMDS];
  479. u32 ddr_cmd_addrs[MAX_BW_CMDS];
  480. u32 ddr_cmd_data[MAX_BW_LEVELS][MAX_BW_CMDS];
  481. } __packed;
  482. struct opp_gx_desc {
  483. u32 vote;
  484. /* This is 'acdLvl' in gmu fw which is now repurposed for cx vote */
  485. u32 cx_vote;
  486. u32 freq;
  487. } __packed;
  488. struct opp_desc {
  489. u32 vote;
  490. u32 freq;
  491. } __packed;
  492. /* H2F */
  493. struct hfi_dcvstable_v1_cmd {
  494. u32 hdr;
  495. u32 gpu_level_num;
  496. u32 gmu_level_num;
  497. struct opp_desc gx_votes[MAX_GX_LEVELS_LEGACY];
  498. struct opp_desc cx_votes[MAX_CX_LEVELS];
  499. } __packed;
  500. /* H2F */
  501. struct hfi_dcvstable_cmd {
  502. u32 hdr;
  503. u32 gpu_level_num;
  504. u32 gmu_level_num;
  505. struct opp_gx_desc gx_votes[MAX_GX_LEVELS_LEGACY];
  506. struct opp_desc cx_votes[MAX_CX_LEVELS];
  507. } __packed;
  508. /* H2F */
  509. struct hfi_table_entry {
  510. u32 count;
  511. u32 stride;
  512. u32 data[];
  513. } __packed;
  514. struct hfi_table_cmd {
  515. u32 hdr;
  516. u32 version;
  517. u32 type;
  518. struct hfi_table_entry entry[];
  519. } __packed;
  520. #define MAX_ACD_STRIDE 2
  521. #define MAX_ACD_NUM_LEVELS KGSL_MAX_PWRLEVELS
  522. /* H2F */
  523. struct hfi_acd_table_cmd {
  524. u32 hdr;
  525. u32 version;
  526. u32 enable_by_level;
  527. u32 stride;
  528. u32 num_levels;
  529. u32 data[MAX_ACD_NUM_LEVELS * MAX_ACD_STRIDE];
  530. } __packed;
  531. struct hfi_clx_table_v1_cmd {
  532. /** @hdr: HFI header message */
  533. u32 hdr;
  534. /**
  535. * @data0: bits[0:15] Feature enable control
  536. * bits[16:31] Revision control
  537. */
  538. u32 data0;
  539. /**
  540. * @data1: bits[0:15] Migration time
  541. * bits[16:21] Current rating
  542. * bits[22:27] Phases for domain
  543. * bits[28:28] Path notifications
  544. * bits[29:31] Extra feature bits
  545. */
  546. u32 data1;
  547. /** @clxt: CLX time in microseconds */
  548. u32 clxt;
  549. /** @clxh: CLH time in microseconds */
  550. u32 clxh;
  551. /** @urgmode: Urgent HW throttle mode of operation */
  552. u32 urgmode;
  553. /** @lkgen: Enable leakage current estimate */
  554. u32 lkgen;
  555. } __packed;
  556. #define CLX_DOMAINS_V2 2
  557. struct clx_domain_v2 {
  558. /**
  559. * @data0: bits[0:15] Migration time
  560. * bits[16:21] Current rating
  561. * bits[22:27] Phases for domain
  562. * bits[28:28] Path notifications
  563. * bits[29:31] Extra feature bits
  564. */
  565. u32 data0;
  566. /** @clxt: CLX time in microseconds */
  567. u32 clxt;
  568. /** @clxh: CLH time in microseconds */
  569. u32 clxh;
  570. /** @urgmode: Urgent HW throttle mode of operation */
  571. u32 urgmode;
  572. /** @lkgen: Enable leakage current estimate */
  573. u32 lkgen;
  574. /** @currbudget: Current Budget */
  575. u32 currbudget;
  576. } __packed;
  577. /* H2F */
  578. struct hfi_clx_table_v2_cmd {
  579. /** @hdr: HFI header message */
  580. u32 hdr;
  581. /** @version: Version identifier for the format used for domains */
  582. u32 version;
  583. /** @domain: GFX and MXC Domain information */
  584. struct clx_domain_v2 domain[CLX_DOMAINS_V2];
  585. } __packed;
  586. /* H2F */
  587. struct hfi_test_cmd {
  588. u32 hdr;
  589. u32 data;
  590. } __packed;
  591. /* H2F */
  592. struct hfi_start_cmd {
  593. u32 hdr;
  594. } __packed;
  595. /* H2F */
  596. struct hfi_feature_ctrl_cmd {
  597. u32 hdr;
  598. u32 feature;
  599. u32 enable;
  600. u32 data;
  601. } __packed;
  602. /* H2F */
  603. struct hfi_get_value_cmd {
  604. u32 hdr;
  605. u32 type;
  606. u32 subtype;
  607. } __packed;
  608. /* Internal */
  609. struct hfi_get_value_req {
  610. struct hfi_get_value_cmd cmd;
  611. u32 data[16];
  612. } __packed;
  613. /* F2H */
  614. struct hfi_get_value_reply_cmd {
  615. u32 hdr;
  616. u32 req_hdr;
  617. u32 data[16];
  618. } __packed;
  619. /* H2F */
  620. struct hfi_set_value_cmd {
  621. u32 hdr;
  622. u32 type;
  623. u32 subtype;
  624. u32 data;
  625. } __packed;
  626. /* H2F */
  627. struct hfi_core_fw_start_cmd {
  628. u32 hdr;
  629. u32 handle;
  630. } __packed;
  631. struct hfi_mem_alloc_desc_legacy {
  632. u64 gpu_addr;
  633. u32 flags;
  634. u32 mem_kind;
  635. u32 host_mem_handle;
  636. u32 gmu_mem_handle;
  637. u32 gmu_addr;
  638. u32 size; /* Bytes */
  639. } __packed;
  640. struct hfi_mem_alloc_desc {
  641. u64 gpu_addr;
  642. u32 flags;
  643. u32 mem_kind;
  644. u32 host_mem_handle;
  645. u32 gmu_mem_handle;
  646. u32 gmu_addr;
  647. u32 size; /* Bytes */
  648. /**
  649. * @align: bits[0:7] specify alignment requirement of the GMU VA specified as a power of
  650. * two. bits[8:15] specify alignment requirement for the size of the GMU mapping. For
  651. * example, a decimal value of 20 = (1 << 20) = 1 MB alignment
  652. */
  653. u32 align;
  654. } __packed;
  655. struct hfi_mem_alloc_entry {
  656. struct hfi_mem_alloc_desc desc;
  657. struct kgsl_memdesc *md;
  658. };
  659. /* F2H */
  660. struct hfi_mem_alloc_cmd_legacy {
  661. u32 hdr;
  662. u32 reserved; /* Padding to ensure alignment of 'desc' below */
  663. struct hfi_mem_alloc_desc_legacy desc;
  664. } __packed;
  665. struct hfi_mem_alloc_cmd {
  666. u32 hdr;
  667. u32 version;
  668. struct hfi_mem_alloc_desc desc;
  669. } __packed;
  670. /* H2F */
  671. struct hfi_mem_alloc_reply_cmd {
  672. u32 hdr;
  673. u32 req_hdr;
  674. struct hfi_mem_alloc_desc desc;
  675. } __packed;
  676. /* H2F */
  677. struct hfi_gx_bw_perf_vote_cmd {
  678. u32 hdr;
  679. u32 ack_type;
  680. u32 freq;
  681. u32 bw;
  682. } __packed;
  683. /* H2F */
  684. struct hfi_fw_halt_cmd {
  685. u32 hdr;
  686. u32 en_halt;
  687. } __packed;
  688. /* H2F */
  689. struct hfi_prep_slumber_cmd {
  690. u32 hdr;
  691. u32 bw;
  692. u32 freq;
  693. } __packed;
  694. /* F2H */
  695. struct hfi_err_cmd {
  696. u32 hdr;
  697. u32 error_code;
  698. u32 data[16];
  699. } __packed;
  700. /* F2H */
  701. struct hfi_debug_cmd {
  702. u32 hdr;
  703. u32 type;
  704. u32 timestamp;
  705. u32 data;
  706. } __packed;
  707. /* F2H */
  708. struct hfi_trace_cmd {
  709. u32 hdr;
  710. u32 version;
  711. u64 identifier;
  712. } __packed;
  713. /* Trace packet definition */
  714. struct gmu_trace_packet {
  715. u32 hdr;
  716. u32 trace_id;
  717. u64 ticks;
  718. u32 payload[];
  719. } __packed;
  720. /* F2H */
  721. struct hfi_gmu_cntr_register_cmd {
  722. u32 hdr;
  723. u32 group_id;
  724. u32 countable;
  725. } __packed;
  726. /* H2F */
  727. struct hfi_gmu_cntr_register_reply_cmd {
  728. u32 hdr;
  729. u32 req_hdr;
  730. u32 group_id;
  731. u32 countable;
  732. u32 cntr_lo;
  733. u32 cntr_hi;
  734. } __packed;
  735. /* F2H */
  736. struct hfi_gmu_cntr_release_cmd {
  737. u32 hdr;
  738. u32 group_id;
  739. u32 countable;
  740. } __packed;
  741. /* H2F */
  742. struct hfi_register_ctxt_cmd {
  743. u32 hdr;
  744. u32 ctxt_id;
  745. u32 flags;
  746. u64 pt_addr;
  747. u32 ctxt_idr;
  748. u32 ctxt_bank;
  749. } __packed;
  750. /* H2F */
  751. struct hfi_unregister_ctxt_cmd {
  752. u32 hdr;
  753. u32 ctxt_id;
  754. u32 ts;
  755. } __packed;
  756. struct hfi_issue_ib {
  757. u64 addr;
  758. u32 size;
  759. } __packed;
  760. /* H2F */
  761. /* The length of *buf will be embedded in the hdr */
  762. struct hfi_issue_cmd_raw_cmd {
  763. u32 hdr;
  764. u32 *buf;
  765. } __packed;
  766. /* Internal */
  767. struct hfi_issue_cmd_raw_req {
  768. u32 queue;
  769. u32 ctxt_id;
  770. u32 len;
  771. u32 *buf;
  772. } __packed;
  773. /* H2F */
  774. struct hfi_ts_notify_cmd {
  775. u32 hdr;
  776. u32 ctxt_id;
  777. u32 ts;
  778. } __packed;
  779. #define CMDBATCH_SUCCESS 0
  780. #define CMDBATCH_RETIRED 1
  781. #define CMDBATCH_ERROR 2
  782. #define CMDBATCH_SKIP 3
  783. #define CMDBATCH_PROFILING BIT(4)
  784. #define CMDBATCH_EOF BIT(8)
  785. #define CMDBATCH_INDIRECT BIT(9)
  786. #define CMDBATCH_RECURRING_START BIT(18)
  787. #define CMDBATCH_RECURRING_STOP BIT(19)
  788. /* This indicates that the SYNCOBJ is kgsl output fence */
  789. #define GMU_SYNCOBJ_FLAG_KGSL_FENCE_BIT 0
  790. /* This indicates that the SYNCOBJ is signaled */
  791. #define GMU_SYNCOBJ_FLAG_SIGNALED_BIT 1
  792. /* This indicates that the SYNCOBJ's software status is queried */
  793. #define GMU_SYNCOBJ_FLAG_QUERY_SW_STATUS_BIT 2
  794. /* This indicates that the SYNCOBJ's software status is signaled */
  795. #define GMU_SYNCOBJ_FLAG_SW_STATUS_SIGNALED_BIT 3
  796. /* This indicates that the SYNCOBJ's software status is pending */
  797. #define GMU_SYNCOBJ_FLAG_SW_STATUS_PENDING_BIT 4
  798. #define GMU_SYNCOBJ_FLAGS \
  799. { BIT(GMU_SYNCOBJ_FLAG_KGSL_FENCE_BIT), "KGSL"}, \
  800. { BIT(GMU_SYNCOBJ_FLAG_SIGNALED_BIT), "SIGNALED"}, \
  801. { BIT(GMU_SYNCOBJ_FLAG_QUERY_SW_STATUS_BIT), "QUERIED"}, \
  802. { BIT(GMU_SYNCOBJ_FLAG_SW_STATUS_SIGNALED_BIT), "SW_SIGNALED"}, \
  803. { BIT(GMU_SYNCOBJ_FLAG_SW_STATUS_PENDING_BIT), "SW_PENDING"}
  804. /* F2H */
  805. struct hfi_ts_retire_cmd {
  806. u32 hdr;
  807. u32 ctxt_id;
  808. u32 ts;
  809. u32 type;
  810. u64 submitted_to_rb;
  811. u64 sop;
  812. u64 eop;
  813. u64 retired_on_gmu;
  814. u64 active;
  815. u32 version;
  816. } __packed;
  817. /* H2F */
  818. struct hfi_context_pointers_cmd {
  819. u32 hdr;
  820. u32 ctxt_id;
  821. u64 sop_addr;
  822. u64 eop_addr;
  823. u64 user_ctxt_record_addr;
  824. u32 version;
  825. u32 gmu_context_queue_addr;
  826. } __packed;
  827. /* H2F */
  828. struct hfi_context_rule_cmd {
  829. u32 hdr;
  830. u32 ctxt_id;
  831. u32 type;
  832. u32 status;
  833. } __packed;
  834. struct fault_info {
  835. u32 ctxt_id;
  836. u32 policy;
  837. u32 ts;
  838. } __packed;
  839. /* F2H */
  840. struct hfi_context_bad_cmd {
  841. u32 hdr;
  842. u32 version;
  843. struct fault_info gc;
  844. struct fault_info lpac;
  845. u32 error;
  846. u32 payload[];
  847. } __packed;
  848. /* F2H */
  849. struct hfi_context_bad_cmd_legacy {
  850. u32 hdr;
  851. u32 ctxt_id;
  852. u32 policy;
  853. u32 ts;
  854. u32 error;
  855. u32 payload[];
  856. } __packed;
  857. /* H2F */
  858. struct hfi_context_bad_reply_cmd {
  859. u32 hdr;
  860. u32 req_hdr;
  861. } __packed;
  862. /* H2F */
  863. struct hfi_submit_cmd {
  864. u32 hdr;
  865. u32 ctxt_id;
  866. u32 flags;
  867. u32 ts;
  868. u32 profile_gpuaddr_lo;
  869. u32 profile_gpuaddr_hi;
  870. u32 numibs;
  871. u32 big_ib_gmu_va;
  872. } __packed;
  873. struct hfi_syncobj {
  874. u64 ctxt_id;
  875. u64 seq_no;
  876. u64 flags;
  877. } __packed;
  878. struct hfi_submit_syncobj {
  879. u32 hdr;
  880. u32 version;
  881. u32 flags;
  882. u32 timestamp;
  883. u32 num_syncobj;
  884. } __packed;
  885. struct hfi_log_block {
  886. u32 hdr;
  887. u32 version;
  888. u32 start_index;
  889. u32 stop_index;
  890. } __packed;
  891. enum hfi_warmboot_cmd_type {
  892. HFI_WARMBOOT_SET_SCRATCH = 0,
  893. HFI_WARMBOOT_EXEC_SCRATCH,
  894. HFI_WARMBOOT_QUERY_SCRATCH,
  895. };
  896. struct hfi_warmboot_scratch_cmd {
  897. /** @hdr: Header for the scratch command packet */
  898. u32 hdr;
  899. /** @version: Version of the scratch command packet */
  900. u32 version;
  901. /** @flags: Set, Execute or Query scratch flag */
  902. u32 flags;
  903. /** @scratch_addr: Address of the scratch */
  904. u32 scratch_addr;
  905. /** @scratch_size: Size of the scratch in bytes*/
  906. u32 scratch_size;
  907. } __packed;
  908. /* Request GMU to add this fence to TxQueue without checking whether this is retired or not */
  909. #define HW_FENCE_FLAG_SKIP_MEMSTORE 0x1
  910. struct hfi_hw_fence_info {
  911. /** @hdr: Header for the fence info packet */
  912. u32 hdr;
  913. /** @version: Version of the fence info packet */
  914. u32 version;
  915. /** @gmu_ctxt_id: GMU Context id to which this fence belongs */
  916. u32 gmu_ctxt_id;
  917. /** @error: Any error code associated with this fence */
  918. u32 error;
  919. /** @ctxt_id: Context id for which hw fence is to be triggered */
  920. u64 ctxt_id;
  921. /** @ts: Timestamp for which hw fence is to be triggered */
  922. u64 ts;
  923. /** @flags: Flags on how to handle this hfi packet */
  924. u64 flags;
  925. /** @hash_index: Index of the hw fence in hw fence table */
  926. u64 hash_index;
  927. } __packed;
  928. /* The software fence corresponding to the queried hardware fence has not signaled */
  929. #define ADRENO_HW_FENCE_SW_STATUS_PENDING BIT(0)
  930. /* The software fence corresponding to the queried hardware fence has signaled */
  931. #define ADRENO_HW_FENCE_SW_STATUS_SIGNALED BIT(1)
  932. struct hfi_syncobj_query {
  933. /**
  934. * @query_bitmask: Bitmask representing the sync object descriptors to be queried. For
  935. * example, to query the second sync object descriptor(index=1) in a sync object,
  936. * bit(1) should be set in this bitmask.
  937. */
  938. u32 query_bitmask;
  939. } __packed;
  940. #define MAX_SYNCOBJ_QUERY_BITS 128
  941. #define BITS_PER_SYNCOBJ_QUERY 32
  942. #define MAX_SYNCOBJ_QUERY_DWORDS (MAX_SYNCOBJ_QUERY_BITS / BITS_PER_SYNCOBJ_QUERY)
  943. struct hfi_syncobj_query_cmd {
  944. /** @hdr: Header for the fence info packet */
  945. u32 hdr;
  946. /** @version: Version of the fence info packet */
  947. u32 version;
  948. /** @gmu_ctxt_id: GMU Context id to which this SYNC object belongs */
  949. u32 gmu_ctxt_id;
  950. /** @sync_obj_ts: Timestamp of this SYNC object */
  951. u32 sync_obj_ts;
  952. /** @queries: Array of query bitmasks */
  953. struct hfi_syncobj_query queries[MAX_SYNCOBJ_QUERY_DWORDS];
  954. } __packed;
  955. /**
  956. * struct pending_cmd - data structure to track outstanding HFI
  957. * command messages
  958. */
  959. struct pending_cmd {
  960. /** @sent_hdr: Header of the un-ack'd hfi packet */
  961. u32 sent_hdr;
  962. /** @results: Array to store the ack packet */
  963. u32 results[MAX_RCVD_SIZE];
  964. /** @complete: Completion to signal hfi ack has been received */
  965. struct completion complete;
  966. /** @node: to add it to the list of hfi packets waiting for ack */
  967. struct list_head node;
  968. };
  969. static inline int _CMD_MSG_HDR(u32 *hdr, int id, size_t size)
  970. {
  971. if (WARN_ON(size > HFI_MAX_MSG_SIZE))
  972. return -EMSGSIZE;
  973. *hdr = CREATE_MSG_HDR(id, HFI_MSG_CMD);
  974. return 0;
  975. }
  976. #define CMD_MSG_HDR(cmd, id) \
  977. _CMD_MSG_HDR(&(cmd).hdr, id, sizeof(cmd))
  978. #define RECORD_MSG_HDR(hdr) \
  979. ((hdr) | HFI_MSG_RECORD)
  980. #define CLEAR_RECORD_MSG_HDR(hdr) \
  981. ((hdr) & (~(HFI_MSG_RECORD | HFI_MSG_NOP)))
  982. #define RECORD_NOP_MSG_HDR(hdr) \
  983. ((hdr) | (HFI_MSG_RECORD | HFI_MSG_NOP))
  984. /* Maximum number of IBs in a submission */
  985. #define HWSCHED_MAX_DISPATCH_NUMIBS \
  986. ((HFI_MAX_MSG_SIZE - sizeof(struct hfi_submit_cmd)) \
  987. / sizeof(struct hfi_issue_ib))
  988. /**
  989. * struct payload_section - Container of keys values
  990. *
  991. * There may be a variable number of payload sections appended
  992. * to the context bad HFI message. Each payload section contains
  993. * a variable number of key-value pairs, both key and value being
  994. * single dword each.
  995. */
  996. struct payload_section {
  997. /** @type: Type of the payload */
  998. u16 type;
  999. /** @dwords: Number of dwords in the data array. */
  1000. u16 dwords;
  1001. /** @data: A sequence of key-value pairs. Each pair is 2 dwords. */
  1002. u32 data[];
  1003. } __packed;
  1004. /* IDs for context bad hfi payloads */
  1005. #define PAYLOAD_FAULT_REGS 1
  1006. #define PAYLOAD_RB 2
  1007. #define PAYLOAD_PREEMPT_TIMEOUT 3
  1008. /* Keys for PAYLOAD_FAULT_REGS type payload */
  1009. #define KEY_CP_OPCODE_ERROR 1
  1010. #define KEY_CP_PROTECTED_ERROR 2
  1011. #define KEY_CP_HW_FAULT 3
  1012. #define KEY_CP_BV_OPCODE_ERROR 4
  1013. #define KEY_CP_BV_PROTECTED_ERROR 5
  1014. #define KEY_CP_BV_HW_FAULT 6
  1015. #define KEY_CP_LPAC_OPCODE_ERROR 7
  1016. #define KEY_CP_LPAC_PROTECTED_ERROR 8
  1017. #define KEY_CP_LPAC_HW_FAULT 9
  1018. #define KEY_SWFUSE_VIOLATION_FAULT 10
  1019. #define KEY_AQE0_OPCODE_ERROR 11
  1020. #define KEY_AQE0_HW_FAULT 12
  1021. #define KEY_AQE1_OPCODE_ERROR 13
  1022. #define KEY_AQE1_HW_FAULT 14
  1023. #define KEY_CP_AHB_ERROR 30
  1024. #define KEY_TSB_WRITE_ERROR 31
  1025. /* Keys for PAYLOAD_RB type payload */
  1026. #define KEY_RB_ID 1
  1027. #define KEY_RB_RPTR 2
  1028. #define KEY_RB_WPTR 3
  1029. #define KEY_RB_SIZEDWORDS 4
  1030. #define KEY_RB_QUEUED_TS 5
  1031. #define KEY_RB_RETIRED_TS 6
  1032. #define KEY_RB_GPUADDR_LO 7
  1033. #define KEY_RB_GPUADDR_HI 8
  1034. /* Keys for PAYLOAD_PREEMPT_TIMEOUT type payload */
  1035. #define KEY_PREEMPT_TIMEOUT_CUR_RB_ID 1
  1036. #define KEY_PREEMPT_TIMEOUT_NEXT_RB_ID 2
  1037. /* Types of errors that trigger context bad HFI */
  1038. /* GPU encountered a CP HW error */
  1039. #define GMU_CP_HW_ERROR 600
  1040. /* GPU encountered a GPU Hang interrupt */
  1041. #define GMU_GPU_HW_HANG 601
  1042. /* Preemption didn't complete in given time */
  1043. #define GMU_GPU_PREEMPT_TIMEOUT 602
  1044. /* Fault due to Long IB timeout */
  1045. #define GMU_GPU_SW_HANG 603
  1046. /* GPU encountered a bad opcode */
  1047. #define GMU_CP_OPCODE_ERROR 604
  1048. /* GPU encountered protected mode error */
  1049. #define GMU_CP_PROTECTED_ERROR 605
  1050. /* GPU encountered an illegal instruction */
  1051. #define GMU_CP_ILLEGAL_INST_ERROR 606
  1052. /* GPU encountered a CP ucode error */
  1053. #define GMU_CP_UCODE_ERROR 607
  1054. /* GPU encountered a CP hw fault error */
  1055. #define GMU_CP_HW_FAULT_ERROR 608
  1056. /* GPU encountered a GPC error */
  1057. #define GMU_CP_GPC_ERROR 609
  1058. /* GPU BV encountered a bad opcode */
  1059. #define GMU_CP_BV_OPCODE_ERROR 610
  1060. /* GPU BV encountered protected mode error */
  1061. #define GMU_CP_BV_PROTECTED_ERROR 611
  1062. /* GPU BV encountered a CP hw fault error */
  1063. #define GMU_CP_BV_HW_FAULT_ERROR 612
  1064. /* GPU BV encountered a CP ucode error */
  1065. #define GMU_CP_BV_UCODE_ERROR 613
  1066. /* GPU BV encountered an illegal instruction */
  1067. #define GMU_CP_BV_ILLEGAL_INST_ERROR 614
  1068. /* GPU encountered a bad LPAC opcode */
  1069. #define GMU_CP_LPAC_OPCODE_ERROR 615
  1070. /* GPU LPAC encountered a CP ucode error */
  1071. #define GMU_CP_LPAC_UCODE_ERROR 616
  1072. /* GPU LPAC encountered a CP hw fault error */
  1073. #define GMU_CP_LPAC_HW_FAULT_ERROR 617
  1074. /* GPU LPAC encountered protected mode error */
  1075. #define GMU_CP_LPAC_PROTECTED_ERROR 618
  1076. /* GPU LPAC encountered an illegal instruction */
  1077. #define GMU_CP_LPAC_ILLEGAL_INST_ERROR 619
  1078. /* Fault due to LPAC Long IB timeout */
  1079. #define GMU_GPU_LPAC_SW_HANG 620
  1080. /* Fault due to software fuse violation interrupt */
  1081. #define GMU_GPU_SW_FUSE_VIOLATION 621
  1082. /* AQE related error codes */
  1083. #define GMU_GPU_AQE0_OPCODE_ERRROR 622
  1084. #define GMU_GPU_AQE0_UCODE_ERROR 623
  1085. #define GMU_GPU_AQE0_HW_FAULT_ERROR 624
  1086. #define GMU_GPU_AQE0_ILLEGAL_INST_ERROR 625
  1087. #define GMU_GPU_AQE1_OPCODE_ERRROR 626
  1088. #define GMU_GPU_AQE1_UCODE_ERROR 627
  1089. #define GMU_GPU_AQE1_HW_FAULT_ERROR 628
  1090. #define GMU_GPU_AQE1_ILLEGAL_INST_ERROR 629
  1091. /* GMU encountered a sync object which is signaled via software but not via hardware */
  1092. #define GMU_SYNCOBJ_TIMEOUT_ERROR 630
  1093. /* Non fatal GPU error codes */
  1094. #define GMU_CP_AHB_ERROR 650
  1095. #define GMU_ATB_ASYNC_FIFO_OVERFLOW 651
  1096. #define GMU_RBBM_ATB_BUF_OVERFLOW 652
  1097. #define GMU_UCHE_OOB_ACCESS 653
  1098. #define GMU_UCHE_TRAP_INTR 654
  1099. #define GMU_TSB_WRITE_ERROR 655
  1100. /* GPU encountered an unknown CP error */
  1101. #define GMU_CP_UNKNOWN_ERROR 700
  1102. /**
  1103. * hfi_update_read_idx - Update the read index of an hfi queue
  1104. * hdr: Pointer to the hfi queue header
  1105. * index: New read index
  1106. *
  1107. * This function makes sure that kgsl has consumed f2h packets
  1108. * before GMU sees the updated read index. This avoids a corner
  1109. * case where GMU might over-write f2h packets that have not yet
  1110. * been consumed by kgsl.
  1111. */
  1112. static inline void hfi_update_read_idx(struct hfi_queue_header *hdr, u32 index)
  1113. {
  1114. /*
  1115. * This is to make sure packets are consumed before gmu sees the updated
  1116. * read index
  1117. */
  1118. mb();
  1119. hdr->read_index = index;
  1120. }
  1121. /**
  1122. * hfi_update_write_idx - Update the write index of a GMU queue
  1123. * write_idx: Pointer to the write index
  1124. * index: New write index
  1125. *
  1126. * This function makes sure that the h2f packets are written out
  1127. * to memory before GMU sees the updated write index. This avoids
  1128. * corner cases where GMU might fetch stale entries that can happen
  1129. * if write index is updated before new packets have been written
  1130. * out to memory.
  1131. */
  1132. static inline void hfi_update_write_idx(volatile u32 *write_idx, u32 index)
  1133. {
  1134. /*
  1135. * This is to make sure packets are written out before gmu sees the
  1136. * updated write index
  1137. */
  1138. wmb();
  1139. *write_idx = index;
  1140. /*
  1141. * Memory barrier to make sure write index is written before an
  1142. * interrupt is raised
  1143. */
  1144. wmb();
  1145. }
  1146. /**
  1147. * hfi_get_mem_alloc_desc - Get the descriptor from F2H_MSG_MEM_ALLOC packet
  1148. * rcvd: Pointer to the F2H_MSG_MEM_ALLOC packet
  1149. * out: Pointer to copy the descriptor data to
  1150. *
  1151. * This function checks for the F2H_MSG_MEM_ALLOC packet version and based on that gets the
  1152. * descriptor data from the packet.
  1153. */
  1154. static inline void hfi_get_mem_alloc_desc(void *rcvd, struct hfi_mem_alloc_desc *out)
  1155. {
  1156. struct hfi_mem_alloc_cmd_legacy *in_legacy = (struct hfi_mem_alloc_cmd_legacy *)rcvd;
  1157. struct hfi_mem_alloc_cmd *in = (struct hfi_mem_alloc_cmd *)rcvd;
  1158. if (in->version > 0)
  1159. memcpy(out, &in->desc, sizeof(in->desc));
  1160. else
  1161. memcpy(out, &in_legacy->desc, sizeof(in_legacy->desc));
  1162. }
  1163. /**
  1164. * hfi_get_gmu_va_alignment - Get the alignment (in bytes) for a GMU VA
  1165. * align: Alignment specified as a power of two (2^n) in bits[0:7]
  1166. *
  1167. * This function derives the GMU VA alignment in bytes from bits[0:7] in the passed in value, which
  1168. * is specified in terms of power of two (2^n). For example, va_align = 20 means (1 << 20) = 1MB
  1169. * alignment. The minimum alignment (in bytes) is SZ_4K i.e. anything less than (or equal to) a
  1170. * va_align value of ilog2(SZ_4K) will default to SZ_4K alignment.
  1171. */
  1172. static inline u32 hfi_get_gmu_va_alignment(u32 align)
  1173. {
  1174. u32 va_align = FIELD_GET(GENMASK(7, 0), align);
  1175. return (va_align > ilog2(SZ_4K)) ? (1 << va_align) : SZ_4K;
  1176. }
  1177. /**
  1178. * hfi_get_gmu_sz_alignment - Get the alignment (in bytes) for GMU mapping size
  1179. * align: Alignment specified as a power of two (2^n) in bits[8:15]
  1180. *
  1181. * This function derives the GMU VA size alignment in bytes from bits[8:15] in the passed in value,
  1182. * which is specified in terms of power of two (2^n). For example, sz_align = 20 means
  1183. * (1 << 20) = 1MB alignment. The minimum alignment (in bytes) is SZ_4K i.e. anything less
  1184. * than (or equal to) a sz_align value of ilog2(SZ_4K) will default to SZ_4K alignment.
  1185. */
  1186. static inline u32 hfi_get_gmu_sz_alignment(u32 align)
  1187. {
  1188. u32 sz_align = FIELD_GET(GENMASK(15, 8), align);
  1189. return (sz_align > ilog2(SZ_4K)) ? (1 << sz_align) : SZ_4K;
  1190. }
  1191. /**
  1192. * adreno_hwsched_wait_ack_completion - Wait for HFI ack asynchronously
  1193. * adreno_dev: Pointer to the adreno device
  1194. * dev: Pointer to the device structure
  1195. * ack: Pointer to the pending ack
  1196. * process_msgq: Function pointer to the msgq processing function
  1197. *
  1198. * This function waits for the completion structure, which gets signaled asynchronously. In case
  1199. * there is a timeout, process the msgq one last time. If the ack is present, log an error and move
  1200. * on. If the ack isn't present, log an error, take a snapshot and return -ETIMEDOUT.
  1201. *
  1202. * Return: 0 on success and -ETIMEDOUT on failure
  1203. */
  1204. int adreno_hwsched_wait_ack_completion(struct adreno_device *adreno_dev,
  1205. struct device *dev, struct pending_cmd *ack,
  1206. void (*process_msgq)(struct adreno_device *adreno_dev));
  1207. /**
  1208. * adreno_hwsched_ctxt_unregister_wait_completion - Wait for HFI ack for context unregister
  1209. * adreno_dev: Pointer to the adreno device
  1210. * dev: Pointer to the device structure
  1211. * ack: Pointer to the pending ack
  1212. * process_msgq: Function pointer to the msgq processing function
  1213. * cmd: Pointer to the hfi packet header and data
  1214. *
  1215. * This function waits for the completion structure for context unregister hfi ack,
  1216. * which gets signaled asynchronously. In case there is a timeout, process the msgq
  1217. * one last time. If the ack is present, log an error and move on. If the ack isn't
  1218. * present, log an error and return -ETIMEDOUT.
  1219. *
  1220. * Return: 0 on success and -ETIMEDOUT on failure
  1221. */
  1222. int adreno_hwsched_ctxt_unregister_wait_completion(
  1223. struct adreno_device *adreno_dev,
  1224. struct device *dev, struct pending_cmd *ack,
  1225. void (*process_msgq)(struct adreno_device *adreno_dev),
  1226. struct hfi_unregister_ctxt_cmd *cmd);
  1227. /**
  1228. * hfi_get_minidump_string - Get the va-minidump string from entry
  1229. * mem_kind: mem_kind type
  1230. * hfi_minidump_str: Pointer to the output string
  1231. * size: Max size of the hfi_minidump_str
  1232. * rb_id: Pointer to the rb_id count
  1233. *
  1234. * This function return 0 on valid mem_kind and copies the VA-MINIDUMP string to
  1235. * hfi_minidump_str else return error
  1236. */
  1237. static inline int hfi_get_minidump_string(u32 mem_kind, char *hfi_minidump_str,
  1238. size_t size, u32 *rb_id)
  1239. {
  1240. /* Extend this if the VA mindump need more hfi alloc entries */
  1241. switch (mem_kind) {
  1242. case HFI_MEMKIND_RB:
  1243. snprintf(hfi_minidump_str, size, KGSL_GMU_RB_ENTRY"_%d", (*rb_id)++);
  1244. break;
  1245. case HFI_MEMKIND_SCRATCH:
  1246. snprintf(hfi_minidump_str, size, KGSL_SCRATCH_ENTRY);
  1247. break;
  1248. case HFI_MEMKIND_PROFILE:
  1249. snprintf(hfi_minidump_str, size, KGSL_GMU_KERNEL_PROF_ENTRY);
  1250. break;
  1251. case HFI_MEMKIND_USER_PROFILE_IBS:
  1252. snprintf(hfi_minidump_str, size, KGSL_GMU_USER_PROF_ENTRY);
  1253. break;
  1254. case HFI_MEMKIND_CMD_BUFFER:
  1255. snprintf(hfi_minidump_str, size, KGSL_GMU_CMD_BUFFER_ENTRY);
  1256. break;
  1257. default:
  1258. return -EINVAL;
  1259. }
  1260. return 0;
  1261. }
  1262. #endif