adreno_gen8_hwsched_hfi.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef _ADRENO_GEN8_HWSCHED_HFI_H_
  7. #define _ADRENO_GEN8_HWSCHED_HFI_H_
  8. /* Maximum number of IBs in a submission */
  9. #define HWSCHED_MAX_NUMIBS \
  10. ((HFI_MAX_MSG_SIZE - offsetof(struct hfi_issue_cmd_cmd, ibs)) \
  11. / sizeof(struct hfi_issue_ib))
  12. /*
  13. * This is used to put userspace threads to sleep when hardware fence unack count reaches a
  14. * threshold. This bit is cleared in two scenarios:
  15. * 1. If the hardware fence unack count drops to a desired threshold.
  16. * 2. If there is a GMU/GPU fault. Because we don't want the threads to keep sleeping through fault
  17. * recovery, which can easily take 100s of milliseconds to complete.
  18. */
  19. #define GEN8_HWSCHED_HW_FENCE_SLEEP_BIT 0x0
  20. /*
  21. * This is used to avoid creating any more hardware fences until the hardware fence unack count
  22. * drops to a desired threshold. This bit is required in cases where GEN8_HWSCHED_HW_FENCE_SLEEP_BIT
  23. * will be cleared, but we still want to avoid creating any more hardware fences. For example, if
  24. * hardware fence unack count reaches a maximum threshold, both GEN8_HWSCHED_HW_FENCE_SLEEP_BIT and
  25. * GEN8_HWSCHED_HW_FENCE_MAX_BIT will be set. Say, a GMU/GPU fault happens and
  26. * GEN8_HWSCHED_HW_FENCE_SLEEP_BIT will be cleared to wake up any sleeping threads. But,
  27. * GEN8_HWSCHED_HW_FENCE_MAX_BIT will remain set to avoid creating any new hardware fences until
  28. * recovery is complete and deferred drawctxt (if any) is handled.
  29. */
  30. #define GEN8_HWSCHED_HW_FENCE_MAX_BIT 0x1
  31. /*
  32. * This is used to avoid creating any more hardware fences until concurrent reset/recovery completes
  33. */
  34. #define GEN8_HWSCHED_HW_FENCE_ABORT_BIT 0x2
  35. struct gen8_hwsched_hfi {
  36. struct hfi_mem_alloc_entry mem_alloc_table[32];
  37. u32 mem_alloc_entries;
  38. /** @irq_mask: Store the hfi interrupt mask */
  39. u32 irq_mask;
  40. /** @msglock: To protect the list of un-ACKed hfi packets */
  41. rwlock_t msglock;
  42. /** @msglist: List of un-ACKed hfi packets */
  43. struct list_head msglist;
  44. /** @f2h_task: Task for processing gmu fw to host packets */
  45. struct task_struct *f2h_task;
  46. /** @f2h_wq: Waitqueue for the f2h_task */
  47. wait_queue_head_t f2h_wq;
  48. /** @big_ib: GMU buffer to hold big IBs */
  49. struct kgsl_memdesc *big_ib;
  50. /** @big_ib_recurring: GMU buffer to hold big recurring IBs */
  51. struct kgsl_memdesc *big_ib_recurring;
  52. /** @msg_mutex: Mutex for accessing the msgq */
  53. struct mutex msgq_mutex;
  54. struct {
  55. /** @lock: Spinlock for managing hardware fences */
  56. spinlock_t lock;
  57. /**
  58. * @unack_count: Number of hardware fences sent to GMU but haven't yet been ack'd
  59. * by GMU
  60. */
  61. u32 unack_count;
  62. /**
  63. * @unack_wq: Waitqueue to wait on till number of unacked hardware fences drops to
  64. * a desired threshold
  65. */
  66. wait_queue_head_t unack_wq;
  67. /**
  68. * @defer_drawctxt: Drawctxt to send hardware fences from as soon as unacked
  69. * hardware fences drops to a desired threshold
  70. */
  71. struct adreno_context *defer_drawctxt;
  72. /**
  73. * @defer_ts: The timestamp of the hardware fence which got deferred
  74. */
  75. u32 defer_ts;
  76. /**
  77. * @flags: Flags to control the creation of new hardware fences
  78. */
  79. unsigned long flags;
  80. /** @seqnum: Sequence number for hardware fence packet header */
  81. atomic_t seqnum;
  82. } hw_fence;
  83. /**
  84. * @hw_fence_timer: Timer to trigger fault if unack'd hardware fence count does'nt drop
  85. * to a desired threshold in given amount of time
  86. */
  87. struct timer_list hw_fence_timer;
  88. /**
  89. * @hw_fence_ws: Work struct that gets scheduled when hw_fence_timer expires
  90. */
  91. struct work_struct hw_fence_ws;
  92. /** @detached_hw_fences_list: List of hardware fences belonging to detached contexts */
  93. struct list_head detached_hw_fence_list;
  94. /** @defer_hw_fence_work: The work structure to send deferred hardware fences to GMU */
  95. struct kthread_work defer_hw_fence_work;
  96. };
  97. struct kgsl_drawobj_cmd;
  98. /**
  99. * gen8_hwsched_hfi_probe - Probe hwsched hfi resources
  100. * @adreno_dev: Pointer to adreno device structure
  101. *
  102. * Return: 0 on success and negative error on failure.
  103. */
  104. int gen8_hwsched_hfi_probe(struct adreno_device *adreno_dev);
  105. /**
  106. * gen8_hwsched_hfi_remove - Release hwsched hfi resources
  107. * @adreno_dev: Pointer to adreno device structure
  108. */
  109. void gen8_hwsched_hfi_remove(struct adreno_device *adreno_dev);
  110. /**
  111. * gen8_hwsched_hfi_init - Initialize hfi resources
  112. * @adreno_dev: Pointer to adreno device structure
  113. *
  114. * This function is used to initialize hfi resources
  115. * once before the very first gmu boot
  116. *
  117. * Return: 0 on success and negative error on failure.
  118. */
  119. int gen8_hwsched_hfi_init(struct adreno_device *adreno_dev);
  120. /**
  121. * gen8_hwsched_hfi_start - Start hfi resources
  122. * @adreno_dev: Pointer to adreno device structure
  123. *
  124. * Send the various hfi packets before booting the gpu
  125. *
  126. * Return: 0 on success and negative error on failure.
  127. */
  128. int gen8_hwsched_hfi_start(struct adreno_device *adreno_dev);
  129. /**
  130. * gen8_hwsched_hfi_stop - Stop the hfi resources
  131. * @adreno_dev: Pointer to the adreno device
  132. *
  133. * This function does the hfi cleanup when powering down the gmu
  134. */
  135. void gen8_hwsched_hfi_stop(struct adreno_device *adreno_dev);
  136. /**
  137. * gen8_hwched_cp_init - Send CP_INIT via HFI
  138. * @adreno_dev: Pointer to adreno device structure
  139. *
  140. * This function is used to send CP INIT packet and bring
  141. * GPU out of secure mode using hfi raw packets.
  142. *
  143. * Return: 0 on success and negative error on failure.
  144. */
  145. int gen8_hwsched_cp_init(struct adreno_device *adreno_dev);
  146. /**
  147. * gen8_hfi_send_cmd_async - Send an hfi packet
  148. * @adreno_dev: Pointer to adreno device structure
  149. * @data: Data to be sent in the hfi packet
  150. * @size_bytes: Size of the packet in bytes
  151. *
  152. * Send data in the form of an HFI packet to gmu and wait for
  153. * it's ack asynchronously
  154. *
  155. * Return: 0 on success and negative error on failure.
  156. */
  157. int gen8_hfi_send_cmd_async(struct adreno_device *adreno_dev, void *data, u32 size_bytes);
  158. /**
  159. * gen8_hwsched_submit_drawobj - Dispatch IBs to dispatch queues
  160. * @adreno_dev: Pointer to adreno device structure
  161. * @drawobj: The command draw object which needs to be submitted
  162. *
  163. * This function is used to register the context if needed and submit
  164. * IBs to the hfi dispatch queues.
  165. * Return: 0 on success and negative error on failure
  166. */
  167. int gen8_hwsched_submit_drawobj(struct adreno_device *adreno_dev,
  168. struct kgsl_drawobj *drawobj);
  169. /**
  170. * gen8_hwsched_context_detach - Unregister a context with GMU
  171. * @drawctxt: Pointer to the adreno context
  172. *
  173. * This function sends context unregister HFI and waits for the ack
  174. * to ensure all submissions from this context have retired
  175. */
  176. void gen8_hwsched_context_detach(struct adreno_context *drawctxt);
  177. /* Helper function to get to gen8 hwsched hfi device from adreno device */
  178. struct gen8_hwsched_hfi *to_gen8_hwsched_hfi(struct adreno_device *adreno_dev);
  179. /**
  180. * gen8_hwsched_preempt_count_get - Get preemption count from GMU
  181. * @adreno_dev: Pointer to adreno device
  182. *
  183. * This function sends a GET_VALUE HFI packet to get the number of
  184. * preemptions completed since last SLUMBER exit.
  185. *
  186. * Return: Preemption count
  187. */
  188. u32 gen8_hwsched_preempt_count_get(struct adreno_device *adreno_dev);
  189. /**
  190. * gen8_hwsched_parse_payload - Parse payload to look up a key
  191. * @payload: Pointer to a payload section
  192. * @key: The key who's value is to be looked up
  193. *
  194. * This function parses the payload data which is a sequence
  195. * of key-value pairs.
  196. *
  197. * Return: The value of the key or 0 if key is not found
  198. */
  199. u32 gen8_hwsched_parse_payload(struct payload_section *payload, u32 key);
  200. /**
  201. * gen8_hwsched_lpac_cp_init - Send CP_INIT to LPAC via HFI
  202. * @adreno_dev: Pointer to adreno device structure
  203. *
  204. * This function is used to send CP INIT packet to LPAC and
  205. * enable submission to LPAC queue.
  206. *
  207. * Return: 0 on success and negative error on failure.
  208. */
  209. int gen8_hwsched_lpac_cp_init(struct adreno_device *adreno_dev);
  210. /**
  211. * gen8_hfi_send_lpac_feature_ctrl - Send the lpac feature hfi packet
  212. * @adreno_dev: Pointer to the adreno device
  213. *
  214. * Return: 0 on success or negative error on failure
  215. */
  216. int gen8_hfi_send_lpac_feature_ctrl(struct adreno_device *adreno_dev);
  217. /**
  218. * gen8_hwsched_context_destroy - Destroy any hwsched related resources during context destruction
  219. * @adreno_dev: Pointer to adreno device
  220. * @drawctxt: Pointer to the adreno context
  221. *
  222. * This functions destroys any hwsched related resources when this context is destroyed
  223. */
  224. void gen8_hwsched_context_destroy(struct adreno_device *adreno_dev,
  225. struct adreno_context *drawctxt);
  226. /**
  227. * gen8_hwsched_hfi_get_value - Send GET_VALUE packet to GMU to get the value of a property
  228. * @adreno_dev: Pointer to adreno device
  229. * @prop: property to get from GMU
  230. *
  231. * This functions sends GET_VALUE HFI packet to query value of a property
  232. *
  233. * Return: On success, return the value in the GMU response. On failure, return 0
  234. */
  235. u32 gen8_hwsched_hfi_get_value(struct adreno_device *adreno_dev, u32 prop);
  236. /**
  237. * gen8_send_hw_fence_hfi_wait_ack - Send hardware fence info to GMU
  238. * @adreno_dev: Pointer to adreno device
  239. * @entry: Pointer to the adreno hardware fence entry
  240. * @flags: Flags for this hardware fence
  241. *
  242. * Send the hardware fence info to the GMU and wait for the ack
  243. *
  244. * Return: 0 on success or negative error on failure
  245. */
  246. int gen8_send_hw_fence_hfi_wait_ack(struct adreno_device *adreno_dev,
  247. struct adreno_hw_fence_entry *entry, u64 flags);
  248. /**
  249. * gen8_hwsched_create_hw_fence - Create a hardware fence
  250. * @adreno_dev: Pointer to adreno device
  251. * @kfence: Pointer to the kgsl fence
  252. *
  253. * Create a hardware fence, set up hardware fence info and send it to GMU if required
  254. */
  255. void gen8_hwsched_create_hw_fence(struct adreno_device *adreno_dev,
  256. struct kgsl_sync_fence *kfence);
  257. /**
  258. * gen8_hwsched_drain_context_hw_fences - Drain context's hardware fences via GMU
  259. * @adreno_dev: Pointer to adreno device
  260. * @drawctxt: Pointer to the adreno context which is to be flushed
  261. *
  262. * Trigger hardware fences that were never dispatched to GMU
  263. *
  264. * Return: Zero on success or negative error on failure
  265. */
  266. int gen8_hwsched_drain_context_hw_fences(struct adreno_device *adreno_dev,
  267. struct adreno_context *drawctxt);
  268. /**
  269. * gen8_hwsched_check_context_inflight_hw_fences - Check whether all hardware fences
  270. * from a context have been sent to the TxQueue or not
  271. * @adreno_dev: Pointer to adreno device
  272. * @drawctxt: Pointer to the adreno context which is to be flushed
  273. *
  274. * Check if all hardware fences from this context have been sent to the
  275. * TxQueue. If not, log an error and return error code.
  276. *
  277. * Return: Zero on success or negative error on failure
  278. */
  279. int gen8_hwsched_check_context_inflight_hw_fences(struct adreno_device *adreno_dev,
  280. struct adreno_context *drawctxt);
  281. /**
  282. * gen8_remove_hw_fence_entry - Remove hardware fence entry
  283. * @adreno_dev: pointer to the adreno device
  284. * @entry: Pointer to the hardware fence entry
  285. */
  286. void gen8_remove_hw_fence_entry(struct adreno_device *adreno_dev,
  287. struct adreno_hw_fence_entry *entry);
  288. /**
  289. * gen8_trigger_hw_fence_cpu - Trigger hardware fence from cpu
  290. * @adreno_dev: pointer to the adreno device
  291. * @fence: hardware fence entry to be triggered
  292. *
  293. * Trigger the hardware fence by sending it to GMU's TxQueue and raise the
  294. * interrupt from GMU to APPS
  295. */
  296. void gen8_trigger_hw_fence_cpu(struct adreno_device *adreno_dev,
  297. struct adreno_hw_fence_entry *fence);
  298. /**
  299. * gen8_hwsched_disable_hw_fence_throttle - Disable hardware fence throttling after reset
  300. * @adreno_dev: pointer to the adreno device
  301. *
  302. * After device reset, clear hardware fence related data structures and send any hardware fences
  303. * that got deferred (prior to reset) and re-open the gates for hardware fence creation
  304. *
  305. * Return: Zero on success or negative error on failure
  306. */
  307. int gen8_hwsched_disable_hw_fence_throttle(struct adreno_device *adreno_dev);
  308. /**
  309. * gen8_hwsched_process_msgq - Process msgq
  310. * @adreno_dev: pointer to the adreno device
  311. *
  312. * This function grabs the msgq mutex and processes msgq for any outstanding hfi packets
  313. */
  314. void gen8_hwsched_process_msgq(struct adreno_device *adreno_dev);
  315. /**
  316. * gen8_hwsched_boot_gpu - Send the command to boot GPU
  317. * @adreno_dev: Pointer to adreno device
  318. *
  319. * Send the hfi to boot GPU, and check the ack, incase of a failure
  320. * get a snapshot and capture registers of interest.
  321. *
  322. * Return: Zero on success or negative error on failure
  323. */
  324. int gen8_hwsched_boot_gpu(struct adreno_device *adreno_dev);
  325. #endif