cpufreq_walt.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * This is based on schedutil governor but modified to work with
  4. * WALT.
  5. *
  6. * Copyright (C) 2016, Intel Corporation
  7. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  8. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/kthread.h>
  12. #include <trace/events/power.h>
  13. #include "walt.h"
  14. #include "trace.h"
  15. struct waltgov_tunables {
  16. struct gov_attr_set attr_set;
  17. unsigned int up_rate_limit_us;
  18. unsigned int down_rate_limit_us;
  19. unsigned int hispeed_load;
  20. unsigned int hispeed_freq;
  21. unsigned int rtg_boost_freq;
  22. unsigned int adaptive_low_freq;
  23. unsigned int adaptive_high_freq;
  24. unsigned int adaptive_low_freq_kernel;
  25. unsigned int adaptive_high_freq_kernel;
  26. unsigned int target_load_thresh;
  27. unsigned int target_load_shift;
  28. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  29. unsigned int up_delay_freq;
  30. #endif
  31. bool pl;
  32. int boost;
  33. };
  34. struct waltgov_policy {
  35. struct cpufreq_policy *policy;
  36. u64 last_ws;
  37. u64 curr_cycles;
  38. u64 last_cyc_update_time;
  39. unsigned long avg_cap;
  40. struct waltgov_tunables *tunables;
  41. struct list_head tunables_hook;
  42. unsigned long hispeed_util;
  43. unsigned long rtg_boost_util;
  44. unsigned long max;
  45. raw_spinlock_t update_lock;
  46. u64 last_freq_update_time;
  47. s64 min_rate_limit_ns;
  48. s64 up_rate_delay_ns;
  49. s64 down_rate_delay_ns;
  50. unsigned int next_freq;
  51. unsigned int cached_raw_freq;
  52. unsigned int driving_cpu;
  53. /* The next fields are only needed if fast switch cannot be used: */
  54. struct irq_work irq_work;
  55. struct kthread_work work;
  56. struct mutex work_lock;
  57. struct kthread_worker worker;
  58. struct task_struct *thread;
  59. bool limits_changed;
  60. bool need_freq_update;
  61. bool thermal_isolated;
  62. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  63. bool force_up_delay;
  64. #endif
  65. };
  66. struct waltgov_cpu {
  67. struct waltgov_callback cb;
  68. struct waltgov_policy *wg_policy;
  69. unsigned int cpu;
  70. struct walt_cpu_load walt_load;
  71. unsigned long util;
  72. unsigned long max;
  73. unsigned int flags;
  74. unsigned int reasons;
  75. };
  76. DEFINE_PER_CPU(struct waltgov_callback *, waltgov_cb_data);
  77. static DEFINE_PER_CPU(struct waltgov_cpu, waltgov_cpu);
  78. static DEFINE_PER_CPU(struct waltgov_tunables *, cached_tunables);
  79. /************************ Governor internals ***********************/
  80. static bool waltgov_should_update_freq(struct waltgov_policy *wg_policy, u64 time)
  81. {
  82. s64 delta_ns;
  83. if (unlikely(wg_policy->limits_changed)) {
  84. wg_policy->limits_changed = false;
  85. wg_policy->need_freq_update = true;
  86. return true;
  87. }
  88. /*
  89. * No need to recalculate next freq for min_rate_limit_us
  90. * at least. However we might still decide to further rate
  91. * limit once frequency change direction is decided, according
  92. * to the separate rate limits.
  93. */
  94. delta_ns = time - wg_policy->last_freq_update_time;
  95. return delta_ns >= wg_policy->min_rate_limit_ns;
  96. }
  97. static bool waltgov_up_down_rate_limit(struct waltgov_policy *wg_policy, u64 time,
  98. unsigned int next_freq)
  99. {
  100. s64 delta_ns;
  101. delta_ns = time - wg_policy->last_freq_update_time;
  102. if (next_freq > wg_policy->next_freq &&
  103. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  104. next_freq > wg_policy->tunables->up_delay_freq &&
  105. #endif
  106. delta_ns < wg_policy->up_rate_delay_ns)
  107. return true;
  108. if (next_freq < wg_policy->next_freq &&
  109. delta_ns < wg_policy->down_rate_delay_ns)
  110. return true;
  111. return false;
  112. }
  113. static void __waltgov_update_next_freq(struct waltgov_policy *wg_policy,
  114. u64 time, unsigned int next_freq, unsigned int raw_freq)
  115. {
  116. wg_policy->cached_raw_freq = raw_freq;
  117. wg_policy->next_freq = next_freq;
  118. wg_policy->last_freq_update_time = time;
  119. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  120. wg_policy->force_up_delay
  121. = (next_freq < wg_policy->tunables->up_delay_freq)? true : false;
  122. #endif
  123. }
  124. static bool waltgov_update_next_freq(struct waltgov_policy *wg_policy, u64 time,
  125. unsigned int next_freq,
  126. unsigned int raw_freq)
  127. {
  128. if (wg_policy->next_freq == next_freq)
  129. return false;
  130. if (waltgov_up_down_rate_limit(wg_policy, time, next_freq)) {
  131. wg_policy->cached_raw_freq = 0;
  132. return false;
  133. }
  134. __waltgov_update_next_freq(wg_policy, time, next_freq, raw_freq);
  135. return true;
  136. }
  137. static unsigned long freq_to_util(struct waltgov_policy *wg_policy,
  138. unsigned int freq)
  139. {
  140. return mult_frac(wg_policy->max, freq,
  141. wg_policy->policy->cpuinfo.max_freq);
  142. }
  143. #define KHZ 1000
  144. static void waltgov_track_cycles(struct waltgov_policy *wg_policy,
  145. unsigned int prev_freq,
  146. u64 upto)
  147. {
  148. u64 delta_ns, cycles;
  149. u64 next_ws = wg_policy->last_ws + sched_ravg_window;
  150. upto = min(upto, next_ws);
  151. /* Track cycles in current window */
  152. delta_ns = upto - wg_policy->last_cyc_update_time;
  153. delta_ns *= prev_freq;
  154. do_div(delta_ns, (NSEC_PER_SEC / KHZ));
  155. cycles = delta_ns;
  156. wg_policy->curr_cycles += cycles;
  157. wg_policy->last_cyc_update_time = upto;
  158. }
  159. static void waltgov_calc_avg_cap(struct waltgov_policy *wg_policy, u64 curr_ws,
  160. unsigned int prev_freq)
  161. {
  162. u64 last_ws = wg_policy->last_ws;
  163. unsigned int avg_freq;
  164. int cpu;
  165. if (curr_ws < last_ws) {
  166. printk_deferred("============ WALT CPUFREQ DUMP START ==============\n");
  167. for_each_online_cpu(cpu) {
  168. struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu);
  169. struct waltgov_policy *wg_policy_internal = wg_cpu->wg_policy;
  170. printk_deferred("cpu=%d walt_load->ws=%llu and policy->last_ws=%llu\n",
  171. wg_cpu->cpu, wg_cpu->walt_load.ws,
  172. wg_policy_internal->last_ws);
  173. }
  174. printk_deferred("============ WALT CPUFREQ DUMP END ==============\n");
  175. WALT_BUG(WALT_BUG_WALT, NULL,
  176. "policy->related_cpus=0x%x curr_ws=%llu < last_ws=%llu",
  177. cpumask_bits(wg_policy->policy->related_cpus)[0], curr_ws,
  178. last_ws);
  179. }
  180. if (curr_ws <= last_ws)
  181. return;
  182. /* If we skipped some windows */
  183. if (curr_ws > (last_ws + sched_ravg_window)) {
  184. avg_freq = prev_freq;
  185. /* Reset tracking history */
  186. wg_policy->last_cyc_update_time = curr_ws;
  187. } else {
  188. waltgov_track_cycles(wg_policy, prev_freq, curr_ws);
  189. avg_freq = wg_policy->curr_cycles;
  190. avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
  191. }
  192. wg_policy->avg_cap = freq_to_util(wg_policy, avg_freq);
  193. wg_policy->curr_cycles = 0;
  194. wg_policy->last_ws = curr_ws;
  195. }
  196. static void waltgov_fast_switch(struct waltgov_policy *wg_policy, u64 time,
  197. unsigned int next_freq)
  198. {
  199. struct cpufreq_policy *policy = wg_policy->policy;
  200. waltgov_track_cycles(wg_policy, wg_policy->policy->cur, time);
  201. cpufreq_driver_fast_switch(policy, next_freq);
  202. }
  203. static void waltgov_deferred_update(struct waltgov_policy *wg_policy, u64 time,
  204. unsigned int next_freq)
  205. {
  206. walt_irq_work_queue(&wg_policy->irq_work);
  207. }
  208. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  209. #define TARGET_LOAD_PL 90
  210. #endif
  211. #define TARGET_LOAD 80
  212. static inline unsigned long walt_map_util_freq(unsigned long util,
  213. struct waltgov_policy *wg_policy,
  214. unsigned long cap, int cpu)
  215. {
  216. unsigned long fmax = wg_policy->policy->cpuinfo.max_freq;
  217. unsigned int shift = wg_policy->tunables->target_load_shift;
  218. if (util >= wg_policy->tunables->target_load_thresh &&
  219. cpu_util_rt(cpu_rq(cpu)) < (cap >> 2))
  220. return max(
  221. (fmax + (fmax >> shift)) * util,
  222. (fmax + (fmax >> 2)) * wg_policy->tunables->target_load_thresh
  223. )/cap;
  224. return (fmax + (fmax >> 2)) * util / cap;
  225. }
  226. static inline unsigned int get_adaptive_low_freq(struct waltgov_policy *wg_policy)
  227. {
  228. return(max(wg_policy->tunables->adaptive_low_freq,
  229. wg_policy->tunables->adaptive_low_freq_kernel));
  230. }
  231. static inline unsigned int get_adaptive_high_freq(struct waltgov_policy *wg_policy)
  232. {
  233. return(max(wg_policy->tunables->adaptive_high_freq,
  234. wg_policy->tunables->adaptive_high_freq_kernel));
  235. }
  236. static unsigned int get_next_freq(struct waltgov_policy *wg_policy,
  237. unsigned long util, unsigned long max,
  238. struct waltgov_cpu *wg_cpu, u64 time)
  239. {
  240. struct cpufreq_policy *policy = wg_policy->policy;
  241. unsigned int freq, raw_freq, final_freq;
  242. struct waltgov_cpu *wg_driv_cpu = &per_cpu(waltgov_cpu, wg_policy->driving_cpu);
  243. struct walt_sched_cluster *cluster;
  244. bool skip = false;
  245. bool thermal_isolated_now = cpus_halted_by_client(
  246. wg_policy->policy->related_cpus, PAUSE_THERMAL);
  247. if (thermal_isolated_now) {
  248. if (!wg_policy->thermal_isolated) {
  249. /* Entering thermal isolation */
  250. wg_policy->thermal_isolated = true;
  251. wg_policy->policy->cached_resolved_idx = 0;
  252. final_freq = wg_policy->policy->freq_table[0].frequency;
  253. __waltgov_update_next_freq(wg_policy, time, final_freq, final_freq);
  254. } else {
  255. final_freq = 0; /* no need to change freq, i.e. continue with min freq */
  256. }
  257. raw_freq = final_freq;
  258. freq = raw_freq;
  259. goto out;
  260. } else {
  261. if (wg_policy->thermal_isolated) {
  262. /* Exiting thermal isolation*/
  263. wg_policy->thermal_isolated = false;
  264. wg_policy->need_freq_update = true;
  265. }
  266. }
  267. raw_freq = walt_map_util_freq(util, wg_policy, max, wg_driv_cpu->cpu);
  268. freq = raw_freq;
  269. cluster = cpu_cluster(policy->cpu);
  270. if (cpumask_intersects(&cluster->cpus, cpu_partial_halt_mask) &&
  271. is_state1())
  272. skip = true;
  273. /* ss power: add kernel condition */
  274. if ((wg_policy->tunables->adaptive_high_freq ||
  275. wg_policy->tunables->adaptive_high_freq_kernel) &&
  276. !skip) {
  277. if (raw_freq < get_adaptive_low_freq(wg_policy)) {
  278. freq = get_adaptive_low_freq(wg_policy);
  279. wg_driv_cpu->reasons = CPUFREQ_REASON_ADAPTIVE_LOW;
  280. } else if (raw_freq <= get_adaptive_high_freq(wg_policy)) {
  281. freq = get_adaptive_high_freq(wg_policy);
  282. wg_driv_cpu->reasons = CPUFREQ_REASON_ADAPTIVE_HIGH;
  283. }
  284. }
  285. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  286. if (wg_policy->force_up_delay)
  287. freq = min(wg_policy->tunables->up_delay_freq, freq);
  288. #endif
  289. if (freq > fmax_cap[SMART_FMAX_CAP][cluster->id]) {
  290. freq = fmax_cap[SMART_FMAX_CAP][cluster->id];
  291. wg_driv_cpu->reasons |= CPUFREQ_REASON_SMART_FMAX_CAP;
  292. }
  293. if (freq > fmax_cap[HIGH_PERF_CAP][cluster->id]) {
  294. freq = fmax_cap[HIGH_PERF_CAP][cluster->id];
  295. wg_driv_cpu->reasons |= CPUFREQ_REASON_HIGH_PERF_CAP;
  296. }
  297. if (freq > fmax_cap[PARTIAL_HALT_CAP][cluster->id]) {
  298. freq = fmax_cap[PARTIAL_HALT_CAP][cluster->id];
  299. wg_driv_cpu->reasons |= CPUFREQ_REASON_PARTIAL_HALT_CAP;
  300. }
  301. if (freq > fmax_cap[FREQ_REL_CAP][cluster->id]) {
  302. freq = fmax_cap[FREQ_REL_CAP][cluster->id];
  303. wg_driv_cpu->reasons |= CPUFREQ_REASON_FREQ_REL_CAP;
  304. }
  305. if (wg_policy->cached_raw_freq && freq == wg_policy->cached_raw_freq &&
  306. !wg_policy->need_freq_update) {
  307. final_freq = 0;
  308. goto out;
  309. }
  310. wg_policy->need_freq_update = false;
  311. final_freq = cpufreq_driver_resolve_freq(policy, freq);
  312. if (!waltgov_update_next_freq(wg_policy, time, final_freq, freq)) {
  313. final_freq = 0;
  314. goto out;
  315. }
  316. out:
  317. trace_waltgov_next_freq(policy->cpu, util, max, raw_freq, freq,
  318. policy->min, policy->max,
  319. wg_policy->cached_raw_freq, wg_policy->need_freq_update,
  320. wg_policy->thermal_isolated,
  321. wg_driv_cpu->cpu, wg_driv_cpu->reasons);
  322. return final_freq;
  323. }
  324. static unsigned long waltgov_get_util(struct waltgov_cpu *wg_cpu)
  325. {
  326. struct rq *rq = cpu_rq(wg_cpu->cpu);
  327. unsigned long max = arch_scale_cpu_capacity(wg_cpu->cpu);
  328. unsigned long util;
  329. wg_cpu->max = max;
  330. wg_cpu->reasons = 0;
  331. util = cpu_util_freq_walt(wg_cpu->cpu, &wg_cpu->walt_load, &wg_cpu->reasons);
  332. return uclamp_rq_util_with(rq, util, NULL);
  333. }
  334. #define NL_RATIO 75
  335. #define DEFAULT_HISPEED_LOAD 90
  336. #define DEFAULT_SILVER_RTG_BOOST_FREQ 1000000
  337. #define DEFAULT_GOLD_RTG_BOOST_FREQ 768000
  338. #define DEFAULT_PRIME_RTG_BOOST_FREQ 0
  339. #define DEFAULT_TARGET_LOAD_THRESH 1024
  340. #define DEFAULT_TARGET_LOAD_SHIFT 4
  341. static inline void max_and_reason(unsigned long *cur_util, unsigned long boost_util,
  342. struct waltgov_cpu *wg_cpu, unsigned int reason)
  343. {
  344. if (boost_util && boost_util >= *cur_util) {
  345. *cur_util = boost_util;
  346. wg_cpu->reasons = reason;
  347. wg_cpu->wg_policy->driving_cpu = wg_cpu->cpu;
  348. }
  349. }
  350. static void waltgov_walt_adjust(struct waltgov_cpu *wg_cpu, unsigned long cpu_util,
  351. unsigned long nl, unsigned long *util,
  352. unsigned long *max)
  353. {
  354. struct waltgov_policy *wg_policy = wg_cpu->wg_policy;
  355. bool is_migration = wg_cpu->flags & WALT_CPUFREQ_IC_MIGRATION;
  356. bool is_rtg_boost = wg_cpu->walt_load.rtgb_active;
  357. bool is_hiload;
  358. bool employ_ed_boost = wg_cpu->walt_load.ed_active && sysctl_ed_boost_pct;
  359. unsigned long pl = wg_cpu->walt_load.pl;
  360. if (is_rtg_boost && (!cpumask_test_cpu(wg_cpu->cpu, cpu_partial_halt_mask) ||
  361. !is_state1()))
  362. max_and_reason(util, wg_policy->rtg_boost_util, wg_cpu, CPUFREQ_REASON_RTG_BOOST);
  363. is_hiload = (cpu_util >= mult_frac(wg_policy->avg_cap,
  364. wg_policy->tunables->hispeed_load,
  365. 100));
  366. if (cpumask_test_cpu(wg_cpu->cpu, cpu_partial_halt_mask) &&
  367. is_state1())
  368. is_hiload = false;
  369. if (is_hiload && !is_migration)
  370. max_and_reason(util, wg_policy->hispeed_util, wg_cpu, CPUFREQ_REASON_HISPEED);
  371. if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
  372. max_and_reason(util, *max, wg_cpu, CPUFREQ_REASON_NWD);
  373. if (wg_policy->tunables->pl) {
  374. if (sysctl_sched_conservative_pl)
  375. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  376. pl = mult_frac(pl, TARGET_LOAD_PL, 100);
  377. #else
  378. pl = mult_frac(pl, TARGET_LOAD, 100);
  379. #endif
  380. max_and_reason(util, pl, wg_cpu, CPUFREQ_REASON_PL);
  381. }
  382. if (employ_ed_boost)
  383. wg_cpu->reasons |= CPUFREQ_REASON_EARLY_DET;
  384. }
  385. static inline unsigned long target_util(struct waltgov_policy *wg_policy,
  386. unsigned int freq)
  387. {
  388. unsigned long util;
  389. util = freq_to_util(wg_policy, freq);
  390. if (is_min_possible_cluster_cpu(wg_policy->policy->cpu) &&
  391. util >= wg_policy->tunables->target_load_thresh)
  392. util = mult_frac(util, 94, 100);
  393. else
  394. util = mult_frac(util, TARGET_LOAD, 100);
  395. return util;
  396. }
  397. static unsigned int waltgov_next_freq_shared(struct waltgov_cpu *wg_cpu, u64 time)
  398. {
  399. struct waltgov_policy *wg_policy = wg_cpu->wg_policy;
  400. struct cpufreq_policy *policy = wg_policy->policy;
  401. unsigned long util = 0, max = 1;
  402. unsigned int j;
  403. int boost = wg_policy->tunables->boost;
  404. for_each_cpu(j, policy->cpus) {
  405. struct waltgov_cpu *j_wg_cpu = &per_cpu(waltgov_cpu, j);
  406. unsigned long j_util, j_max, j_nl;
  407. /*
  408. * If the util value for all CPUs in a policy is 0, just using >
  409. * will result in a max value of 1. WALT stats can later update
  410. * the aggregated util value, causing get_next_freq() to compute
  411. * freq = max_freq * 1.25 * (util / max) for nonzero util,
  412. * leading to spurious jumps to fmax.
  413. */
  414. j_util = j_wg_cpu->util;
  415. j_nl = j_wg_cpu->walt_load.nl;
  416. j_max = j_wg_cpu->max;
  417. if (boost) {
  418. j_util = mult_frac(j_util, boost + 100, 100);
  419. j_nl = mult_frac(j_nl, boost + 100, 100);
  420. }
  421. if (j_util * max >= j_max * util) {
  422. util = j_util;
  423. max = j_max;
  424. wg_policy->driving_cpu = j;
  425. }
  426. waltgov_walt_adjust(j_wg_cpu, j_util, j_nl, &util, &max);
  427. }
  428. return get_next_freq(wg_policy, util, max, wg_cpu, time);
  429. }
  430. static void waltgov_update_freq(struct waltgov_callback *cb, u64 time,
  431. unsigned int flags)
  432. {
  433. struct waltgov_cpu *wg_cpu = container_of(cb, struct waltgov_cpu, cb);
  434. struct waltgov_policy *wg_policy = wg_cpu->wg_policy;
  435. unsigned long hs_util, rtg_boost_util;
  436. unsigned int next_f;
  437. if (!wg_policy->tunables->pl && flags & WALT_CPUFREQ_PL)
  438. return;
  439. wg_cpu->util = waltgov_get_util(wg_cpu);
  440. wg_cpu->flags = flags;
  441. raw_spin_lock(&wg_policy->update_lock);
  442. if (wg_policy->max != wg_cpu->max) {
  443. wg_policy->max = wg_cpu->max;
  444. hs_util = target_util(wg_policy,
  445. wg_policy->tunables->hispeed_freq);
  446. wg_policy->hispeed_util = hs_util;
  447. rtg_boost_util = target_util(wg_policy,
  448. wg_policy->tunables->rtg_boost_freq);
  449. wg_policy->rtg_boost_util = rtg_boost_util;
  450. }
  451. waltgov_calc_avg_cap(wg_policy, wg_cpu->walt_load.ws,
  452. wg_policy->policy->cur);
  453. trace_waltgov_util_update(wg_cpu->cpu, wg_cpu->util, wg_policy->avg_cap,
  454. wg_cpu->max, wg_cpu->walt_load.nl,
  455. wg_cpu->walt_load.pl,
  456. wg_cpu->walt_load.rtgb_active, flags);
  457. if (waltgov_should_update_freq(wg_policy, time) &&
  458. !(flags & WALT_CPUFREQ_CONTINUE)) {
  459. next_f = waltgov_next_freq_shared(wg_cpu, time);
  460. if (!next_f)
  461. goto out;
  462. if (wg_policy->policy->fast_switch_enabled)
  463. waltgov_fast_switch(wg_policy, time, next_f);
  464. else
  465. waltgov_deferred_update(wg_policy, time, next_f);
  466. }
  467. out:
  468. raw_spin_unlock(&wg_policy->update_lock);
  469. }
  470. static void waltgov_work(struct kthread_work *work)
  471. {
  472. struct waltgov_policy *wg_policy = container_of(work, struct waltgov_policy, work);
  473. unsigned int freq;
  474. unsigned long flags;
  475. raw_spin_lock_irqsave(&wg_policy->update_lock, flags);
  476. freq = wg_policy->next_freq;
  477. waltgov_track_cycles(wg_policy, wg_policy->policy->cur,
  478. walt_sched_clock());
  479. raw_spin_unlock_irqrestore(&wg_policy->update_lock, flags);
  480. mutex_lock(&wg_policy->work_lock);
  481. __cpufreq_driver_target(wg_policy->policy, freq, CPUFREQ_RELATION_L);
  482. mutex_unlock(&wg_policy->work_lock);
  483. }
  484. static void waltgov_irq_work(struct irq_work *irq_work)
  485. {
  486. struct waltgov_policy *wg_policy;
  487. wg_policy = container_of(irq_work, struct waltgov_policy, irq_work);
  488. kthread_queue_work(&wg_policy->worker, &wg_policy->work);
  489. }
  490. /************************** sysfs interface ************************/
  491. static inline struct waltgov_tunables *to_waltgov_tunables(struct gov_attr_set *attr_set)
  492. {
  493. return container_of(attr_set, struct waltgov_tunables, attr_set);
  494. }
  495. static DEFINE_MUTEX(min_rate_lock);
  496. static void update_min_rate_limit_ns(struct waltgov_policy *wg_policy)
  497. {
  498. mutex_lock(&min_rate_lock);
  499. wg_policy->min_rate_limit_ns = min(wg_policy->up_rate_delay_ns,
  500. wg_policy->down_rate_delay_ns);
  501. mutex_unlock(&min_rate_lock);
  502. }
  503. static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
  504. {
  505. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  506. return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->up_rate_limit_us);
  507. }
  508. static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
  509. {
  510. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  511. return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->down_rate_limit_us);
  512. }
  513. static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
  514. const char *buf, size_t count)
  515. {
  516. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  517. struct waltgov_policy *wg_policy;
  518. unsigned int rate_limit_us;
  519. if (kstrtouint(buf, 10, &rate_limit_us))
  520. return -EINVAL;
  521. tunables->up_rate_limit_us = rate_limit_us;
  522. list_for_each_entry(wg_policy, &attr_set->policy_list, tunables_hook) {
  523. wg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
  524. update_min_rate_limit_ns(wg_policy);
  525. }
  526. return count;
  527. }
  528. static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
  529. const char *buf, size_t count)
  530. {
  531. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  532. struct waltgov_policy *wg_policy;
  533. unsigned int rate_limit_us;
  534. if (kstrtouint(buf, 10, &rate_limit_us))
  535. return -EINVAL;
  536. tunables->down_rate_limit_us = rate_limit_us;
  537. list_for_each_entry(wg_policy, &attr_set->policy_list, tunables_hook) {
  538. wg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
  539. update_min_rate_limit_ns(wg_policy);
  540. }
  541. return count;
  542. }
  543. static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
  544. static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
  545. static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
  546. {
  547. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  548. return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_load);
  549. }
  550. static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
  551. const char *buf, size_t count)
  552. {
  553. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  554. if (kstrtouint(buf, 10, &tunables->hispeed_load))
  555. return -EINVAL;
  556. tunables->hispeed_load = min(100U, tunables->hispeed_load);
  557. return count;
  558. }
  559. static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
  560. {
  561. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  562. return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_freq);
  563. }
  564. static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
  565. const char *buf, size_t count)
  566. {
  567. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  568. unsigned int val;
  569. struct waltgov_policy *wg_policy;
  570. unsigned long hs_util;
  571. unsigned long flags;
  572. if (kstrtouint(buf, 10, &val))
  573. return -EINVAL;
  574. tunables->hispeed_freq = val;
  575. list_for_each_entry(wg_policy, &attr_set->policy_list, tunables_hook) {
  576. raw_spin_lock_irqsave(&wg_policy->update_lock, flags);
  577. hs_util = target_util(wg_policy,
  578. wg_policy->tunables->hispeed_freq);
  579. wg_policy->hispeed_util = hs_util;
  580. raw_spin_unlock_irqrestore(&wg_policy->update_lock, flags);
  581. }
  582. return count;
  583. }
  584. static ssize_t rtg_boost_freq_show(struct gov_attr_set *attr_set, char *buf)
  585. {
  586. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  587. return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->rtg_boost_freq);
  588. }
  589. static ssize_t rtg_boost_freq_store(struct gov_attr_set *attr_set,
  590. const char *buf, size_t count)
  591. {
  592. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  593. unsigned int val;
  594. struct waltgov_policy *wg_policy;
  595. unsigned long rtg_boost_util;
  596. unsigned long flags;
  597. if (kstrtouint(buf, 10, &val))
  598. return -EINVAL;
  599. tunables->rtg_boost_freq = val;
  600. list_for_each_entry(wg_policy, &attr_set->policy_list, tunables_hook) {
  601. raw_spin_lock_irqsave(&wg_policy->update_lock, flags);
  602. rtg_boost_util = target_util(wg_policy,
  603. wg_policy->tunables->rtg_boost_freq);
  604. wg_policy->rtg_boost_util = rtg_boost_util;
  605. raw_spin_unlock_irqrestore(&wg_policy->update_lock, flags);
  606. }
  607. return count;
  608. }
  609. static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
  610. {
  611. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  612. return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->pl);
  613. }
  614. static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
  615. size_t count)
  616. {
  617. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  618. if (kstrtobool(buf, &tunables->pl))
  619. return -EINVAL;
  620. return count;
  621. }
  622. static ssize_t boost_show(struct gov_attr_set *attr_set, char *buf)
  623. {
  624. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  625. return scnprintf(buf, PAGE_SIZE, "%d\n", tunables->boost);
  626. }
  627. static ssize_t boost_store(struct gov_attr_set *attr_set, const char *buf,
  628. size_t count)
  629. {
  630. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  631. struct waltgov_policy *wg_policy;
  632. int val;
  633. if (kstrtoint(buf, 10, &val))
  634. return -EINVAL;
  635. if (val < -100 || val > 1000)
  636. return -EINVAL;
  637. tunables->boost = val;
  638. list_for_each_entry(wg_policy, &attr_set->policy_list, tunables_hook) {
  639. struct rq *rq = cpu_rq(wg_policy->policy->cpu);
  640. unsigned long flags;
  641. raw_spin_lock_irqsave(&rq->__lock, flags);
  642. waltgov_run_callback(rq, WALT_CPUFREQ_BOOST_UPDATE);
  643. raw_spin_unlock_irqrestore(&rq->__lock, flags);
  644. }
  645. return count;
  646. }
  647. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  648. static ssize_t up_delay_freq_show(struct gov_attr_set *attr_set, char *buf)
  649. {
  650. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  651. return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->up_delay_freq);
  652. }
  653. static ssize_t up_delay_freq_store(struct gov_attr_set *attr_set,
  654. const char *buf, size_t count)
  655. {
  656. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  657. unsigned int val;
  658. struct waltgov_policy *wg_policy;
  659. unsigned long flags;
  660. if (kstrtouint(buf, 10, &val))
  661. return -EINVAL;
  662. if (val < 0)
  663. val = 0;
  664. list_for_each_entry(wg_policy, &attr_set->policy_list, tunables_hook) {
  665. raw_spin_lock_irqsave(&wg_policy->update_lock, flags);
  666. if (val > wg_policy->policy->cpuinfo.max_freq)
  667. val = wg_policy->policy->cpuinfo.max_freq;
  668. raw_spin_unlock_irqrestore(&wg_policy->update_lock, flags);
  669. }
  670. tunables->up_delay_freq = val;
  671. return count;
  672. }
  673. #endif
  674. /**
  675. * cpufreq_walt_set_adaptive_freq() - set the waltgov adaptive freq for cpu
  676. * @cpu: the cpu for which the values should be set
  677. * @adaptive_low_freq: low freq
  678. * @adaptive_high_freq:high_freq
  679. *
  680. * Configure the adaptive_low/high_freq for the cpu specified. This will impact all
  681. * cpus governed by the policy (e.g. all cpus in a cluster). The actual value used
  682. * for adaptive frequencies will be governed by the user space setting for the
  683. * policy, and this value.
  684. *
  685. * Return: 0 if successful, error otherwise
  686. */
  687. int cpufreq_walt_set_adaptive_freq(unsigned int cpu, unsigned int adaptive_low_freq,
  688. unsigned int adaptive_high_freq)
  689. {
  690. struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu);
  691. struct waltgov_policy *wg_policy;
  692. struct cpufreq_policy *policy;
  693. if (unlikely(walt_disabled))
  694. return -EAGAIN;
  695. if (!cpu_possible(cpu))
  696. return -EFAULT;
  697. /* ss power: check NULL condition */
  698. wg_policy = wg_cpu->wg_policy;
  699. if (!wg_policy) {
  700. pr_err("%s: wg_policy NULL !!\n", __func__);
  701. return -EFAULT;
  702. }
  703. policy = wg_policy->policy;
  704. if (!policy) {
  705. pr_err("%s: policy NULL !!\n", __func__);
  706. return -EFAULT;
  707. }
  708. if (policy->cpuinfo.min_freq <= adaptive_low_freq &&
  709. policy->cpuinfo.max_freq >= adaptive_high_freq) {
  710. wg_policy->tunables->adaptive_low_freq_kernel = adaptive_low_freq;
  711. wg_policy->tunables->adaptive_high_freq_kernel = adaptive_high_freq;
  712. return 0;
  713. }
  714. return -EINVAL;
  715. }
  716. EXPORT_SYMBOL_GPL(cpufreq_walt_set_adaptive_freq);
  717. /**
  718. * cpufreq_walt_get_adaptive_freq() - get the waltgov adaptive freq for cpu
  719. * @cpu: the cpu for which the values should be returned
  720. * @adaptive_low_freq: pointer to write the current kernel adaptive_low_freq value
  721. * @adaptive_high_freq:pointer to write the current kernel adaptive_high_freq value
  722. *
  723. * Get the currently active adaptive_low/high_freq for the cpu specified.
  724. *
  725. * Return: 0 if successful, error otherwise
  726. */
  727. int cpufreq_walt_get_adaptive_freq(unsigned int cpu, unsigned int *adaptive_low_freq,
  728. unsigned int *adaptive_high_freq)
  729. {
  730. struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu);
  731. struct waltgov_policy *wg_policy;
  732. if (unlikely(walt_disabled))
  733. return -EAGAIN;
  734. if (!cpu_possible(cpu))
  735. return -EFAULT;
  736. /* ss power: check NULL condition */
  737. wg_policy = wg_cpu->wg_policy;
  738. if (!wg_policy) {
  739. pr_err("%s: wg_policy NULL !!\n", __func__);
  740. return -EFAULT;
  741. }
  742. if (adaptive_low_freq && adaptive_high_freq) {
  743. *adaptive_low_freq = get_adaptive_low_freq(wg_policy);
  744. *adaptive_high_freq = get_adaptive_high_freq(wg_policy);
  745. return 0;
  746. }
  747. return -EINVAL;
  748. }
  749. EXPORT_SYMBOL_GPL(cpufreq_walt_get_adaptive_freq);
  750. /**
  751. * cpufreq_walt_reset_adaptive_freq() - reset the waltgov adaptive freq for cpu
  752. * @cpu: the cpu for which the values should be set
  753. *
  754. * Reset the kernel adaptive_low/high_freq to zero.
  755. *
  756. * Return: 0 if successful, error otherwise
  757. */
  758. int cpufreq_walt_reset_adaptive_freq(unsigned int cpu)
  759. {
  760. struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu);
  761. struct waltgov_policy *wg_policy;
  762. if (unlikely(walt_disabled))
  763. return -EAGAIN;
  764. if (!cpu_possible(cpu))
  765. return -EFAULT;
  766. /* ss power: check NULL condition */
  767. wg_policy = wg_cpu->wg_policy;
  768. if (!wg_policy) {
  769. pr_err("%s: wg_policy NULL !!\n", __func__);
  770. return -EFAULT;
  771. }
  772. wg_policy->tunables->adaptive_low_freq_kernel = 0;
  773. wg_policy->tunables->adaptive_high_freq_kernel = 0;
  774. return 0;
  775. }
  776. EXPORT_SYMBOL_GPL(cpufreq_walt_reset_adaptive_freq);
  777. #define WALTGOV_ATTR_RW(_name) \
  778. static struct governor_attr _name = \
  779. __ATTR(_name, 0644, show_##_name, store_##_name) \
  780. #define show_attr(name) \
  781. static ssize_t show_##name(struct gov_attr_set *attr_set, char *buf) \
  782. { \
  783. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set); \
  784. return scnprintf(buf, PAGE_SIZE, "%lu\n", tunables->name); \
  785. } \
  786. #define store_attr(name) \
  787. static ssize_t store_##name(struct gov_attr_set *attr_set, \
  788. const char *buf, size_t count) \
  789. { \
  790. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set); \
  791. \
  792. if (kstrtouint(buf, 10, &tunables->name)) \
  793. return -EINVAL; \
  794. \
  795. return count; \
  796. } \
  797. show_attr(adaptive_low_freq);
  798. store_attr(adaptive_low_freq);
  799. show_attr(adaptive_high_freq);
  800. store_attr(adaptive_high_freq);
  801. show_attr(target_load_thresh);
  802. show_attr(target_load_shift);
  803. store_attr(target_load_shift);
  804. static ssize_t store_target_load_thresh(struct gov_attr_set *attr_set,
  805. const char *buf, size_t count)
  806. {
  807. struct waltgov_tunables *tunables = to_waltgov_tunables(attr_set);
  808. struct waltgov_policy *wg_policy;
  809. if (kstrtouint(buf, 10, &tunables->target_load_thresh))
  810. return -EINVAL;
  811. list_for_each_entry(wg_policy, &attr_set->policy_list, tunables_hook) {
  812. unsigned long flags;
  813. raw_spin_lock_irqsave(&wg_policy->update_lock, flags);
  814. wg_policy->hispeed_util = target_util(wg_policy,
  815. wg_policy->tunables->hispeed_freq);
  816. wg_policy->rtg_boost_util = target_util(wg_policy,
  817. wg_policy->tunables->rtg_boost_freq);
  818. raw_spin_unlock_irqrestore(&wg_policy->update_lock, flags);
  819. }
  820. return count;
  821. }
  822. static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
  823. static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
  824. static struct governor_attr rtg_boost_freq = __ATTR_RW(rtg_boost_freq);
  825. static struct governor_attr pl = __ATTR_RW(pl);
  826. static struct governor_attr boost = __ATTR_RW(boost);
  827. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  828. static struct governor_attr up_delay_freq = __ATTR_RW(up_delay_freq);
  829. #endif
  830. WALTGOV_ATTR_RW(adaptive_low_freq);
  831. WALTGOV_ATTR_RW(adaptive_high_freq);
  832. WALTGOV_ATTR_RW(target_load_thresh);
  833. WALTGOV_ATTR_RW(target_load_shift);
  834. /* ss power: add kernel freq node */
  835. show_attr(adaptive_low_freq_kernel);
  836. store_attr(adaptive_low_freq_kernel);
  837. show_attr(adaptive_high_freq_kernel);
  838. store_attr(adaptive_high_freq_kernel);
  839. WALTGOV_ATTR_RW(adaptive_low_freq_kernel);
  840. WALTGOV_ATTR_RW(adaptive_high_freq_kernel);
  841. static struct attribute *waltgov_attrs[] = {
  842. &up_rate_limit_us.attr,
  843. &down_rate_limit_us.attr,
  844. &hispeed_load.attr,
  845. &hispeed_freq.attr,
  846. &rtg_boost_freq.attr,
  847. &pl.attr,
  848. &boost.attr,
  849. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  850. &up_delay_freq.attr,
  851. #endif
  852. &adaptive_low_freq.attr,
  853. &adaptive_high_freq.attr,
  854. &target_load_thresh.attr,
  855. &target_load_shift.attr,
  856. &adaptive_low_freq_kernel.attr,
  857. &adaptive_high_freq_kernel.attr,
  858. NULL
  859. };
  860. ATTRIBUTE_GROUPS(waltgov);
  861. static struct kobj_type waltgov_tunables_ktype = {
  862. .default_groups = waltgov_groups,
  863. .sysfs_ops = &governor_sysfs_ops,
  864. };
  865. /********************** cpufreq governor interface *********************/
  866. static struct cpufreq_governor walt_gov;
  867. static struct waltgov_policy *waltgov_policy_alloc(struct cpufreq_policy *policy)
  868. {
  869. struct waltgov_policy *wg_policy;
  870. wg_policy = kzalloc(sizeof(*wg_policy), GFP_KERNEL);
  871. if (!wg_policy)
  872. return NULL;
  873. wg_policy->policy = policy;
  874. raw_spin_lock_init(&wg_policy->update_lock);
  875. return wg_policy;
  876. }
  877. static void waltgov_policy_free(struct waltgov_policy *wg_policy)
  878. {
  879. kfree(wg_policy);
  880. }
  881. static int waltgov_kthread_create(struct waltgov_policy *wg_policy)
  882. {
  883. struct task_struct *thread;
  884. struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
  885. struct cpufreq_policy *policy = wg_policy->policy;
  886. int ret;
  887. /* kthread only required for slow path */
  888. if (policy->fast_switch_enabled)
  889. return 0;
  890. kthread_init_work(&wg_policy->work, waltgov_work);
  891. kthread_init_worker(&wg_policy->worker);
  892. thread = kthread_create(kthread_worker_fn, &wg_policy->worker,
  893. "waltgov:%d",
  894. cpumask_first(policy->related_cpus));
  895. if (IS_ERR(thread)) {
  896. pr_err("failed to create waltgov thread: %ld\n", PTR_ERR(thread));
  897. return PTR_ERR(thread);
  898. }
  899. ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
  900. if (ret) {
  901. kthread_stop(thread);
  902. pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
  903. return ret;
  904. }
  905. wg_policy->thread = thread;
  906. kthread_bind_mask(thread, policy->related_cpus);
  907. init_irq_work(&wg_policy->irq_work, waltgov_irq_work);
  908. mutex_init(&wg_policy->work_lock);
  909. wake_up_process(thread);
  910. return 0;
  911. }
  912. static void waltgov_kthread_stop(struct waltgov_policy *wg_policy)
  913. {
  914. /* kthread only required for slow path */
  915. if (wg_policy->policy->fast_switch_enabled)
  916. return;
  917. kthread_flush_worker(&wg_policy->worker);
  918. kthread_stop(wg_policy->thread);
  919. mutex_destroy(&wg_policy->work_lock);
  920. }
  921. static void waltgov_tunables_save(struct cpufreq_policy *policy,
  922. struct waltgov_tunables *tunables)
  923. {
  924. int cpu;
  925. struct waltgov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
  926. if (!cached) {
  927. cached = kzalloc(sizeof(*tunables), GFP_KERNEL);
  928. if (!cached)
  929. return;
  930. for_each_cpu(cpu, policy->related_cpus)
  931. per_cpu(cached_tunables, cpu) = cached;
  932. }
  933. cached->pl = tunables->pl;
  934. cached->hispeed_load = tunables->hispeed_load;
  935. cached->rtg_boost_freq = tunables->rtg_boost_freq;
  936. cached->hispeed_freq = tunables->hispeed_freq;
  937. cached->up_rate_limit_us = tunables->up_rate_limit_us;
  938. cached->down_rate_limit_us = tunables->down_rate_limit_us;
  939. cached->boost = tunables->boost;
  940. cached->adaptive_low_freq = tunables->adaptive_low_freq;
  941. cached->adaptive_high_freq = tunables->adaptive_high_freq;
  942. cached->adaptive_low_freq_kernel = tunables->adaptive_low_freq_kernel;
  943. cached->adaptive_high_freq_kernel = tunables->adaptive_high_freq_kernel;
  944. cached->target_load_thresh = tunables->target_load_thresh;
  945. cached->target_load_shift = tunables->target_load_shift;
  946. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  947. cached->up_delay_freq = tunables->up_delay_freq;
  948. #endif
  949. }
  950. static void waltgov_tunables_restore(struct cpufreq_policy *policy)
  951. {
  952. struct waltgov_policy *wg_policy = policy->governor_data;
  953. struct waltgov_tunables *tunables = wg_policy->tunables;
  954. struct waltgov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
  955. if (!cached)
  956. return;
  957. tunables->pl = cached->pl;
  958. tunables->hispeed_load = cached->hispeed_load;
  959. tunables->rtg_boost_freq = cached->rtg_boost_freq;
  960. tunables->hispeed_freq = cached->hispeed_freq;
  961. tunables->up_rate_limit_us = cached->up_rate_limit_us;
  962. tunables->down_rate_limit_us = cached->down_rate_limit_us;
  963. tunables->boost = cached->boost;
  964. tunables->adaptive_low_freq = cached->adaptive_low_freq;
  965. tunables->adaptive_high_freq = cached->adaptive_high_freq;
  966. tunables->adaptive_low_freq_kernel = cached->adaptive_low_freq_kernel;
  967. tunables->adaptive_high_freq_kernel = cached->adaptive_high_freq_kernel;
  968. tunables->target_load_thresh = cached->target_load_thresh;
  969. tunables->target_load_shift = cached->target_load_shift;
  970. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  971. tunables->up_delay_freq = cached->up_delay_freq;
  972. #endif
  973. }
  974. bool waltgov_disabled = true;
  975. static int waltgov_init(struct cpufreq_policy *policy)
  976. {
  977. struct waltgov_policy *wg_policy;
  978. struct waltgov_tunables *tunables;
  979. int ret = 0;
  980. /* State should be equivalent to EXIT */
  981. if (policy->governor_data)
  982. return -EBUSY;
  983. cpufreq_enable_fast_switch(policy);
  984. if (policy->fast_switch_possible && !policy->fast_switch_enabled)
  985. BUG_ON(1);
  986. wg_policy = waltgov_policy_alloc(policy);
  987. if (!wg_policy) {
  988. ret = -ENOMEM;
  989. goto disable_fast_switch;
  990. }
  991. ret = waltgov_kthread_create(wg_policy);
  992. if (ret)
  993. goto free_wg_policy;
  994. tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
  995. if (!tunables) {
  996. ret = -ENOMEM;
  997. goto stop_kthread;
  998. }
  999. gov_attr_set_init(&tunables->attr_set, &wg_policy->tunables_hook);
  1000. tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
  1001. tunables->target_load_thresh = DEFAULT_TARGET_LOAD_THRESH;
  1002. tunables->target_load_shift = DEFAULT_TARGET_LOAD_SHIFT;
  1003. if (is_min_possible_cluster_cpu(policy->cpu))
  1004. tunables->rtg_boost_freq = DEFAULT_SILVER_RTG_BOOST_FREQ;
  1005. else if (is_max_possible_cluster_cpu(policy->cpu))
  1006. tunables->rtg_boost_freq = DEFAULT_PRIME_RTG_BOOST_FREQ;
  1007. else
  1008. tunables->rtg_boost_freq = DEFAULT_GOLD_RTG_BOOST_FREQ;
  1009. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  1010. tunables->up_delay_freq = policy->cpuinfo.max_freq;
  1011. #endif
  1012. policy->governor_data = wg_policy;
  1013. wg_policy->tunables = tunables;
  1014. waltgov_tunables_restore(policy);
  1015. ret = kobject_init_and_add(&tunables->attr_set.kobj, &waltgov_tunables_ktype,
  1016. get_governor_parent_kobj(policy), "%s",
  1017. walt_gov.name);
  1018. if (ret)
  1019. goto fail;
  1020. return 0;
  1021. fail:
  1022. kobject_put(&tunables->attr_set.kobj);
  1023. policy->governor_data = NULL;
  1024. kfree(tunables);
  1025. stop_kthread:
  1026. waltgov_kthread_stop(wg_policy);
  1027. free_wg_policy:
  1028. waltgov_policy_free(wg_policy);
  1029. disable_fast_switch:
  1030. cpufreq_disable_fast_switch(policy);
  1031. pr_err("initialization failed (error %d)\n", ret);
  1032. return ret;
  1033. }
  1034. static void waltgov_exit(struct cpufreq_policy *policy)
  1035. {
  1036. struct waltgov_policy *wg_policy = policy->governor_data;
  1037. struct waltgov_tunables *tunables = wg_policy->tunables;
  1038. unsigned int count;
  1039. count = gov_attr_set_put(&tunables->attr_set, &wg_policy->tunables_hook);
  1040. policy->governor_data = NULL;
  1041. if (!count) {
  1042. waltgov_tunables_save(policy, tunables);
  1043. kfree(tunables);
  1044. }
  1045. waltgov_kthread_stop(wg_policy);
  1046. waltgov_policy_free(wg_policy);
  1047. cpufreq_disable_fast_switch(policy);
  1048. }
  1049. static int waltgov_start(struct cpufreq_policy *policy)
  1050. {
  1051. struct waltgov_policy *wg_policy = policy->governor_data;
  1052. unsigned int cpu;
  1053. wg_policy->up_rate_delay_ns =
  1054. wg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
  1055. wg_policy->down_rate_delay_ns =
  1056. wg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
  1057. update_min_rate_limit_ns(wg_policy);
  1058. wg_policy->last_freq_update_time = 0;
  1059. wg_policy->next_freq = 0;
  1060. wg_policy->limits_changed = false;
  1061. wg_policy->need_freq_update = false;
  1062. wg_policy->cached_raw_freq = 0;
  1063. #if IS_ENABLED(CONFIG_SCHED_POWER_OPTIMIZE)
  1064. wg_policy->force_up_delay = true;
  1065. #endif
  1066. for_each_cpu(cpu, policy->cpus) {
  1067. struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu);
  1068. memset(wg_cpu, 0, sizeof(*wg_cpu));
  1069. wg_cpu->cpu = cpu;
  1070. wg_cpu->wg_policy = wg_policy;
  1071. }
  1072. for_each_cpu(cpu, policy->cpus) {
  1073. struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu);
  1074. waltgov_add_callback(cpu, &wg_cpu->cb, waltgov_update_freq);
  1075. }
  1076. waltgov_disabled = false;
  1077. return 0;
  1078. }
  1079. static void waltgov_stop(struct cpufreq_policy *policy)
  1080. {
  1081. struct waltgov_policy *wg_policy = policy->governor_data;
  1082. unsigned int cpu;
  1083. for_each_cpu(cpu, policy->cpus)
  1084. waltgov_remove_callback(cpu);
  1085. synchronize_rcu();
  1086. if (!policy->fast_switch_enabled) {
  1087. irq_work_sync(&wg_policy->irq_work);
  1088. kthread_cancel_work_sync(&wg_policy->work);
  1089. }
  1090. waltgov_disabled = true;
  1091. }
  1092. static void waltgov_limits(struct cpufreq_policy *policy)
  1093. {
  1094. struct waltgov_policy *wg_policy = policy->governor_data;
  1095. unsigned long flags, now;
  1096. unsigned int freq, final_freq;
  1097. if (!policy->fast_switch_enabled) {
  1098. mutex_lock(&wg_policy->work_lock);
  1099. raw_spin_lock_irqsave(&wg_policy->update_lock, flags);
  1100. waltgov_track_cycles(wg_policy, wg_policy->policy->cur,
  1101. walt_sched_clock());
  1102. raw_spin_unlock_irqrestore(&wg_policy->update_lock, flags);
  1103. cpufreq_policy_apply_limits(policy);
  1104. mutex_unlock(&wg_policy->work_lock);
  1105. } else {
  1106. raw_spin_lock_irqsave(&wg_policy->update_lock, flags);
  1107. if (!wg_policy->thermal_isolated) {
  1108. freq = policy->cur;
  1109. now = walt_sched_clock();
  1110. /*
  1111. * cpufreq_driver_resolve_freq() has a clamp, so we do not need
  1112. * to do any sort of additional validation here.
  1113. */
  1114. final_freq = cpufreq_driver_resolve_freq(policy, freq);
  1115. if (wg_policy->next_freq != final_freq) {
  1116. __waltgov_update_next_freq(wg_policy, now, final_freq, final_freq);
  1117. waltgov_fast_switch(wg_policy, now, final_freq);
  1118. }
  1119. }
  1120. raw_spin_unlock_irqrestore(&wg_policy->update_lock, flags);
  1121. }
  1122. wg_policy->limits_changed = true;
  1123. }
  1124. static struct cpufreq_governor walt_gov = {
  1125. .name = "walt",
  1126. .init = waltgov_init,
  1127. .exit = waltgov_exit,
  1128. .start = waltgov_start,
  1129. .stop = waltgov_stop,
  1130. .limits = waltgov_limits,
  1131. .owner = THIS_MODULE,
  1132. };
  1133. int waltgov_register(void)
  1134. {
  1135. return cpufreq_register_governor(&walt_gov);
  1136. }