adreno_hwsched.h 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef _ADRENO_HWSCHED_H_
  7. #define _ADRENO_HWSCHED_H_
  8. #include <linux/soc/qcom/msm_hw_fence.h>
  9. #include "kgsl_sync.h"
  10. /* This structure represents inflight command object */
  11. struct cmd_list_obj {
  12. /** @drawobj: Handle to the draw object */
  13. struct kgsl_drawobj *drawobj;
  14. /** @node: List node to put it in the list of inflight commands */
  15. struct list_head node;
  16. };
  17. /**
  18. * struct adreno_hw_fence_entry - A structure to store hardware fence and the context
  19. */
  20. struct adreno_hw_fence_entry {
  21. /** @cmd: H2F_MSG_HW_FENCE_INFO packet for this hardware fence */
  22. struct hfi_hw_fence_info cmd;
  23. /** @kfence: Pointer to the kgsl fence */
  24. struct kgsl_sync_fence *kfence;
  25. /** @drawctxt: Pointer to the context */
  26. struct adreno_context *drawctxt;
  27. /** @node: list node to add it to a list */
  28. struct list_head node;
  29. /** @reset_node: list node to add it to post reset list of hardware fences */
  30. struct list_head reset_node;
  31. };
  32. /**
  33. * struct adreno_hwsched_ops - Function table to hook hwscheduler things
  34. * to target specific routines
  35. */
  36. struct adreno_hwsched_ops {
  37. /**
  38. * @submit_drawobj - Target specific function to submit IBs to hardware
  39. */
  40. int (*submit_drawobj)(struct adreno_device *adreno_dev,
  41. struct kgsl_drawobj *drawobj);
  42. /**
  43. * @preempt_count - Target specific function to get preemption count
  44. */
  45. u32 (*preempt_count)(struct adreno_device *adreno_dev);
  46. /**
  47. * @create_hw_fence - Target specific function to create a hardware fence
  48. */
  49. void (*create_hw_fence)(struct adreno_device *adreno_dev,
  50. struct kgsl_sync_fence *kfence);
  51. };
  52. /**
  53. * struct adreno_hw_fence - Container for hardware fences instance
  54. */
  55. struct adreno_hw_fence {
  56. /** @handle: Handle for hardware fences */
  57. void *handle;
  58. /** @descriptor: Memory descriptor for hardware fences */
  59. struct msm_hw_fence_mem_addr mem_descriptor;
  60. /** @memdesc: Kgsl memory descriptor for hardware fences queue */
  61. struct kgsl_memdesc memdesc;
  62. };
  63. /**
  64. * struct adreno_hwsched - Container for the hardware scheduler
  65. */
  66. struct adreno_hwsched {
  67. /** @mutex: Mutex needed to run dispatcher function */
  68. struct mutex mutex;
  69. /** @flags: Container for the dispatcher internal flags */
  70. unsigned long flags;
  71. /** @inflight: Number of active submissions to the dispatch queues */
  72. u32 inflight;
  73. /** @jobs - Array of dispatch job lists for each priority level */
  74. struct llist_head jobs[16];
  75. /** @requeue - Array of lists for dispatch jobs that got requeued */
  76. struct llist_head requeue[16];
  77. /** @work: The work structure to execute dispatcher function */
  78. struct kthread_work work;
  79. /** @cmd_list: List of objects submitted to dispatch queues */
  80. struct list_head cmd_list;
  81. /** @fault: Atomic to record a fault */
  82. atomic_t fault;
  83. struct kthread_worker *worker;
  84. /** @hwsched_ops: Container for target specific hwscheduler ops */
  85. const struct adreno_hwsched_ops *hwsched_ops;
  86. /** @ctxt_bad: Container for the context bad hfi packet */
  87. void *ctxt_bad;
  88. /** @idle_gate: Gate to wait on for hwscheduler to idle */
  89. struct completion idle_gate;
  90. /** @big_cmdobj = Points to the big IB that is inflight */
  91. struct kgsl_drawobj_cmd *big_cmdobj;
  92. /** @recurring_cmdobj: Recurring commmand object sent to GMU */
  93. struct kgsl_drawobj_cmd *recurring_cmdobj;
  94. /** @lsr_timer: Timer struct to schedule lsr work */
  95. struct timer_list lsr_timer;
  96. /** @lsr_check_ws: Lsr work to update power stats */
  97. struct work_struct lsr_check_ws;
  98. /** @hw_fence: Container for the hw fences instance */
  99. struct adreno_hw_fence hw_fence;
  100. /** @hw_fence_cache: kmem cache for storing hardware output fences */
  101. struct kmem_cache *hw_fence_cache;
  102. /** @hw_fence_count: Number of hardware fences that haven't yet been sent to Tx Queue */
  103. atomic_t hw_fence_count;
  104. /**
  105. * @submission_seqnum: Sequence number for sending submissions to GMU context queues or
  106. * dispatch queues
  107. */
  108. atomic_t submission_seqnum;
  109. /** @global_ctxtq: Memory descriptor for global context queue */
  110. struct kgsl_memdesc global_ctxtq;
  111. /** @global_ctxt_gmu_registered: Whether global context is registered with gmu */
  112. bool global_ctxt_gmu_registered;
  113. };
  114. /*
  115. * This value is based on maximum number of IBs that can fit
  116. * in the ringbuffer.
  117. */
  118. #define HWSCHED_MAX_IBS 2000
  119. enum adreno_hwsched_flags {
  120. ADRENO_HWSCHED_POWER = 0,
  121. ADRENO_HWSCHED_ACTIVE,
  122. ADRENO_HWSCHED_CTX_BAD_LEGACY,
  123. ADRENO_HWSCHED_CONTEXT_QUEUE,
  124. ADRENO_HWSCHED_HW_FENCE,
  125. };
  126. /**
  127. * adreno_hwsched_trigger - Function to schedule the hwsched thread
  128. * @adreno_dev: A handle to adreno device
  129. *
  130. * Schedule the hw dispatcher for retiring and submitting command objects
  131. */
  132. void adreno_hwsched_trigger(struct adreno_device *adreno_dev);
  133. /**
  134. * adreno_hwsched_start() - activate the hwsched dispatcher
  135. * @adreno_dev: pointer to the adreno device
  136. *
  137. * Enable dispatcher thread to execute
  138. */
  139. void adreno_hwsched_start(struct adreno_device *adreno_dev);
  140. /**
  141. * adreno_hwsched_init() - Initialize the hwsched
  142. * @adreno_dev: pointer to the adreno device
  143. * @hwsched_ops: Pointer to target specific hwsched ops
  144. *
  145. * Set up the hwsched resources.
  146. * Return: 0 on success or negative on failure.
  147. */
  148. int adreno_hwsched_init(struct adreno_device *adreno_dev,
  149. const struct adreno_hwsched_ops *hwsched_ops);
  150. /**
  151. * adreno_hwsched_fault - Set hwsched fault to request recovery
  152. * @adreno_dev: A handle to adreno device
  153. * @fault: The type of fault
  154. */
  155. void adreno_hwsched_fault(struct adreno_device *adreno_dev, u32 fault);
  156. /**
  157. * adreno_hwsched_clear_fault() - Clear the hwsched fault
  158. * @adreno_dev: A pointer to an adreno_device structure
  159. *
  160. * Clear the hwsched fault status for adreno device
  161. */
  162. void adreno_hwsched_clear_fault(struct adreno_device *adreno_dev);
  163. /**
  164. * adreno_hwsched_parse_fault_ib - Parse the faulty submission
  165. * @adreno_dev: pointer to the adreno device
  166. * @snapshot: Pointer to the snapshot structure
  167. *
  168. * Walk the list of active submissions to find the one that faulted and
  169. * parse it so that relevant command buffers can be added to the snapshot
  170. */
  171. void adreno_hwsched_parse_fault_cmdobj(struct adreno_device *adreno_dev,
  172. struct kgsl_snapshot *snapshot);
  173. void adreno_hwsched_flush(struct adreno_device *adreno_dev);
  174. /**
  175. * adreno_hwsched_unregister_contexts - Reset context gmu_registered bit
  176. * @adreno_dev: pointer to the adreno device
  177. *
  178. * Walk the list of contexts and reset the gmu_registered for all
  179. * contexts
  180. */
  181. void adreno_hwsched_unregister_contexts(struct adreno_device *adreno_dev);
  182. /**
  183. * adreno_hwsched_idle - Wait for dispatcher and hardware to become idle
  184. * @adreno_dev: A handle to adreno device
  185. *
  186. * Return: 0 on success or negative error on failure
  187. */
  188. int adreno_hwsched_idle(struct adreno_device *adreno_dev);
  189. void adreno_hwsched_retire_cmdobj(struct adreno_hwsched *hwsched,
  190. struct kgsl_drawobj_cmd *cmdobj);
  191. bool adreno_hwsched_context_queue_enabled(struct adreno_device *adreno_dev);
  192. /**
  193. * adreno_hwsched_register_hw_fence - Register GPU as a hardware fence client
  194. * @adreno_dev: pointer to the adreno device
  195. *
  196. * Register with the hardware fence driver to be able to trigger and wait
  197. * for hardware fences. Also, set up the memory descriptor for mapping the
  198. * client queue to the GMU.
  199. */
  200. void adreno_hwsched_register_hw_fence(struct adreno_device *adreno_dev);
  201. /**
  202. * adreno_hwsched_deregister_hw_fence - Deregister GPU as a hardware fence client
  203. * @adreno_dev: pointer to the adreno device
  204. *
  205. * Deregister with the hardware fence driver and free up any resources allocated
  206. * as part of registering with the hardware fence driver
  207. */
  208. void adreno_hwsched_deregister_hw_fence(struct adreno_device *adreno_dev);
  209. /**
  210. * adreno_hwsched_replay - Resubmit inflight cmdbatches after gpu reset
  211. * @adreno_dev: pointer to the adreno device
  212. *
  213. * Resubmit all cmdbatches to GMU after device reset
  214. */
  215. void adreno_hwsched_replay(struct adreno_device *adreno_dev);
  216. /**
  217. * adreno_hwsched_parse_payload - Parse payload to look up a key
  218. * @payload: Pointer to a payload section
  219. * @key: The key who's value is to be looked up
  220. *
  221. * This function parses the payload data which is a sequence
  222. * of key-value pairs.
  223. *
  224. * Return: The value of the key or 0 if key is not found
  225. */
  226. u32 adreno_hwsched_parse_payload(struct payload_section *payload, u32 key);
  227. /**
  228. * adreno_hwsched_gpu_fault - Gets hwsched gpu fault info
  229. * @adreno_dev: pointer to the adreno device
  230. *
  231. * Returns zero for hwsched fault else non zero value
  232. */
  233. u32 adreno_hwsched_gpu_fault(struct adreno_device *adreno_dev);
  234. /**
  235. * adreno_hwsched_log_nonfatal_gpu_fault - Logs non fatal GPU error from context bad hfi packet
  236. * @adreno_dev: pointer to the adreno device
  237. * @dev: Pointer to the struct device for the GMU platform device
  238. * @error: Types of error that triggered from context bad HFI
  239. *
  240. * This function parses context bad hfi packet and logs error information.
  241. *
  242. * Return: True for non fatal error code else false.
  243. */
  244. bool adreno_hwsched_log_nonfatal_gpu_fault(struct adreno_device *adreno_dev,
  245. struct device *dev, u32 error);
  246. #endif