walt.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #ifndef _LINUX_SCHED_WALT_H
  7. #define _LINUX_SCHED_WALT_H
  8. #include <linux/types.h>
  9. #include <linux/spinlock_types.h>
  10. #include <linux/cpumask.h>
  11. enum pause_client {
  12. PAUSE_CORE_CTL = 0x01,
  13. PAUSE_THERMAL = 0x02,
  14. PAUSE_HYP = 0x04,
  15. PAUSE_SBT = 0x08,
  16. };
  17. #define NO_BOOST 0
  18. #define FULL_THROTTLE_BOOST 1
  19. #define CONSERVATIVE_BOOST 2
  20. #define RESTRAINED_BOOST 3
  21. #define STORAGE_BOOST 4
  22. #define FULL_THROTTLE_BOOST_DISABLE -1
  23. #define CONSERVATIVE_BOOST_DISABLE -2
  24. #define RESTRAINED_BOOST_DISABLE -3
  25. #define STORAGE_BOOST_DISABLE -4
  26. #define MAX_NUM_BOOST_TYPE (STORAGE_BOOST+1)
  27. #if IS_ENABLED(CONFIG_SCHED_WALT)
  28. #define MAX_CPUS_PER_CLUSTER 6
  29. #define MAX_CLUSTERS 4
  30. struct core_ctl_notif_data {
  31. unsigned int nr_big;
  32. unsigned int coloc_load_pct;
  33. unsigned int ta_util_pct[MAX_CLUSTERS];
  34. unsigned int cur_cap_pct[MAX_CLUSTERS];
  35. };
  36. enum task_boost_type {
  37. TASK_BOOST_NONE = 0,
  38. TASK_BOOST_ON_MID,
  39. TASK_BOOST_ON_MAX,
  40. TASK_BOOST_STRICT_MAX,
  41. TASK_BOOST_END,
  42. };
  43. #define WALT_NR_CPUS 8
  44. #define RAVG_HIST_SIZE 5
  45. /* wts->bucket_bitmask needs to be updated if NUM_BUSY_BUCKETS > 16 */
  46. #define NUM_BUSY_BUCKETS 16
  47. #define NUM_BUSY_BUCKETS_SHIFT 4
  48. struct walt_related_thread_group {
  49. int id;
  50. raw_spinlock_t lock;
  51. struct list_head tasks;
  52. struct list_head list;
  53. bool skip_min;
  54. struct rcu_head rcu;
  55. u64 last_update;
  56. u64 downmigrate_ts;
  57. u64 start_ktime_ts;
  58. };
  59. struct walt_task_struct {
  60. /*
  61. * 'mark_start' marks the beginning of an event (task waking up, task
  62. * starting to execute, task being preempted) within a window
  63. *
  64. * 'sum' represents how runnable a task has been within current
  65. * window. It incorporates both running time and wait time and is
  66. * frequency scaled.
  67. *
  68. * 'sum_history' keeps track of history of 'sum' seen over previous
  69. * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
  70. * ignored.
  71. *
  72. * 'demand' represents maximum sum seen over previous
  73. * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
  74. * demand for tasks.
  75. *
  76. * 'curr_window_cpu' represents task's contribution to cpu busy time on
  77. * various CPUs in the current window
  78. *
  79. * 'prev_window_cpu' represents task's contribution to cpu busy time on
  80. * various CPUs in the previous window
  81. *
  82. * 'curr_window' represents the sum of all entries in curr_window_cpu
  83. *
  84. * 'prev_window' represents the sum of all entries in prev_window_cpu
  85. *
  86. * 'pred_demand_scaled' represents task's current predicted cpu busy time
  87. * in terms of 1024 units
  88. *
  89. * 'busy_buckets' groups historical busy time into different buckets
  90. * used for prediction
  91. *
  92. * 'demand_scaled' represents task's demand scaled to 1024
  93. *
  94. * 'prev_on_rq' tracks enqueue/dequeue of a task for error conditions
  95. * 0 = nothing, 1 = enqueued, 2 = dequeued
  96. */
  97. u32 flags;
  98. u64 mark_start;
  99. u64 window_start;
  100. u32 sum, demand;
  101. u32 coloc_demand;
  102. u32 sum_history[RAVG_HIST_SIZE];
  103. u16 sum_history_util[RAVG_HIST_SIZE];
  104. u32 curr_window_cpu[WALT_NR_CPUS];
  105. u32 prev_window_cpu[WALT_NR_CPUS];
  106. u32 curr_window, prev_window;
  107. u8 busy_buckets[NUM_BUSY_BUCKETS];
  108. u16 bucket_bitmask;
  109. u16 demand_scaled;
  110. u16 pred_demand_scaled;
  111. u64 active_time;
  112. u64 last_win_size;
  113. int boost;
  114. bool wake_up_idle;
  115. bool misfit;
  116. bool rtg_high_prio;
  117. u8 low_latency;
  118. u64 boost_period;
  119. u64 boost_expires;
  120. u64 last_sleep_ts;
  121. u32 init_load_pct;
  122. u32 unfilter;
  123. u64 last_wake_ts;
  124. u64 last_enqueued_ts;
  125. struct walt_related_thread_group __rcu *grp;
  126. struct list_head grp_list;
  127. u64 cpu_cycles;
  128. bool iowaited;
  129. int prev_on_rq;
  130. int prev_on_rq_cpu;
  131. struct list_head mvp_list;
  132. u64 sum_exec_snapshot_for_slice;
  133. u64 sum_exec_snapshot_for_total;
  134. u64 total_exec;
  135. int mvp_prio;
  136. int cidx;
  137. int load_boost;
  138. int64_t boosted_task_load;
  139. int prev_cpu;
  140. int new_cpu;
  141. u8 enqueue_after_migration;
  142. u8 hung_detect_status;
  143. int pipeline_cpu;
  144. cpumask_t reduce_mask;
  145. u64 mark_start_birth_ts;
  146. u8 high_util_history;
  147. };
  148. /*
  149. * enumeration to set the flags variable
  150. * each index below represents an offset into
  151. * wts->flags
  152. */
  153. enum walt_flags {
  154. WALT_INIT,
  155. MAX_WALT_FLAGS
  156. };
  157. #define wts_to_ts(wts) ({ \
  158. void *__mptr = (void *)(wts); \
  159. ((struct task_struct *)(__mptr - \
  160. offsetof(struct task_struct, android_vendor_data1))); })
  161. static inline bool sched_get_wake_up_idle(struct task_struct *p)
  162. {
  163. struct walt_task_struct *wts = (struct walt_task_struct *) p->android_vendor_data1;
  164. return wts->wake_up_idle;
  165. }
  166. static inline int sched_set_wake_up_idle(struct task_struct *p, bool wake_up_idle)
  167. {
  168. struct walt_task_struct *wts = (struct walt_task_struct *) p->android_vendor_data1;
  169. wts->wake_up_idle = wake_up_idle;
  170. return 0;
  171. }
  172. static inline void set_wake_up_idle(bool wake_up_idle)
  173. {
  174. struct walt_task_struct *wts = (struct walt_task_struct *) current->android_vendor_data1;
  175. wts->wake_up_idle = wake_up_idle;
  176. }
  177. extern int sched_lpm_disallowed_time(int cpu, u64 *timeout);
  178. extern int set_task_boost(int boost, u64 period);
  179. struct notifier_block;
  180. extern void core_ctl_notifier_register(struct notifier_block *n);
  181. extern void core_ctl_notifier_unregister(struct notifier_block *n);
  182. extern int core_ctl_set_boost(bool boost);
  183. extern int walt_set_cpus_taken(struct cpumask *set);
  184. extern int walt_unset_cpus_taken(struct cpumask *unset);
  185. extern cpumask_t walt_get_cpus_taken(void);
  186. extern int walt_get_cpus_in_state1(struct cpumask *cpus);
  187. extern int walt_pause_cpus(struct cpumask *cpus, enum pause_client client);
  188. extern int walt_resume_cpus(struct cpumask *cpus, enum pause_client client);
  189. extern int walt_partial_pause_cpus(struct cpumask *cpus, enum pause_client client);
  190. extern int walt_partial_resume_cpus(struct cpumask *cpus, enum pause_client client);
  191. extern int sched_set_boost(int type);
  192. #else
  193. static inline int sched_lpm_disallowed_time(int cpu, u64 *timeout)
  194. {
  195. return INT_MAX;
  196. }
  197. static inline int set_task_boost(int boost, u64 period)
  198. {
  199. return 0;
  200. }
  201. static inline bool sched_get_wake_up_idle(struct task_struct *p)
  202. {
  203. return false;
  204. }
  205. static inline int sched_set_wake_up_idle(struct task_struct *p, bool wake_up_idle)
  206. {
  207. return 0;
  208. }
  209. static inline void set_wake_up_idle(bool wake_up_idle)
  210. {
  211. }
  212. static inline int core_ctl_set_boost(bool boost)
  213. {
  214. return 0;
  215. }
  216. static inline void core_ctl_notifier_register(struct notifier_block *n)
  217. {
  218. }
  219. static inline void core_ctl_notifier_unregister(struct notifier_block *n)
  220. {
  221. }
  222. static inline int walt_pause_cpus(struct cpumask *cpus, enum pause_client client)
  223. {
  224. return 0;
  225. }
  226. static inline int walt_resume_cpus(struct cpumask *cpus, enum pause_client client)
  227. {
  228. return 0;
  229. }
  230. inline int walt_partial_pause_cpus(struct cpumask *cpus, enum pause_client client)
  231. {
  232. return 0;
  233. }
  234. inline int walt_partial_resume_cpus(struct cpumask *cpus, enum pause_client client)
  235. {
  236. return 0;
  237. }
  238. static inline void walt_set_cpus_taken(struct cpumask *set)
  239. {
  240. }
  241. static inline void walt_unset_cpus_taken(struct cpumask *unset)
  242. {
  243. }
  244. static inline cpumask_t walt_get_cpus_taken(void)
  245. {
  246. cpumask_t t = { CPU_BITS_NONE };
  247. return t;
  248. }
  249. static inline int sched_set_boost(int type)
  250. {
  251. return -EINVAL;
  252. }
  253. #endif
  254. #endif /* _LINUX_SCHED_WALT_H */