sysctl.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <trace/hooks/sched.h>
  7. #include "walt.h"
  8. #include "trace.h"
  9. static int neg_four = -4;
  10. static int four = 4;
  11. static int two_hundred_fifty_five = 255;
  12. static unsigned int ns_per_sec = NSEC_PER_SEC;
  13. static unsigned int one_hundred_thousand = 100000;
  14. static unsigned int two_hundred_million = 200000000;
  15. static int __maybe_unused two = 2;
  16. static int one_hundred = 100;
  17. static int one_thousand = 1000;
  18. static int one_thousand_twenty_four = 1024;
  19. static int two_thousand = 2000;
  20. static int walt_max_cpus = WALT_NR_CPUS;
  21. /*
  22. * CFS task prio range is [100 ... 139]
  23. * 120 is the default prio.
  24. * RTG boost range is [100 ... 119] because giving
  25. * boost for [120 .. 139] does not make sense.
  26. * 99 means disabled and it is the default value.
  27. */
  28. static unsigned int min_cfs_boost_prio = 99;
  29. static unsigned int max_cfs_boost_prio = 119;
  30. unsigned int sysctl_sched_capacity_margin_up_pct[MAX_MARGIN_LEVELS];
  31. unsigned int sysctl_sched_capacity_margin_dn_pct[MAX_MARGIN_LEVELS];
  32. unsigned int sysctl_sched_busy_hyst_enable_cpus;
  33. unsigned int sysctl_sched_busy_hyst;
  34. unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus;
  35. unsigned int sysctl_sched_coloc_busy_hyst_cpu[WALT_NR_CPUS];
  36. unsigned int sysctl_sched_coloc_busy_hyst_max_ms;
  37. unsigned int sysctl_sched_coloc_busy_hyst_cpu_busy_pct[WALT_NR_CPUS];
  38. unsigned int sysctl_sched_util_busy_hyst_enable_cpus;
  39. unsigned int sysctl_sched_util_busy_hyst_cpu[WALT_NR_CPUS];
  40. unsigned int sysctl_sched_util_busy_hyst_cpu_util[WALT_NR_CPUS];
  41. unsigned int sysctl_sched_boost;
  42. unsigned int sysctl_sched_wake_up_idle[2];
  43. unsigned int sysctl_input_boost_ms;
  44. unsigned int sysctl_input_boost_freq[8];
  45. unsigned int sysctl_sched_boost_on_input;
  46. unsigned int sysctl_sched_early_up[MAX_MARGIN_LEVELS];
  47. unsigned int sysctl_sched_early_down[MAX_MARGIN_LEVELS];
  48. /* sysctl nodes accesed by other files */
  49. unsigned int __read_mostly sysctl_sched_coloc_downmigrate_ns;
  50. unsigned int __read_mostly sysctl_sched_group_downmigrate_pct;
  51. unsigned int __read_mostly sysctl_sched_group_upmigrate_pct;
  52. unsigned int __read_mostly sysctl_sched_window_stats_policy;
  53. unsigned int sysctl_sched_ravg_window_nr_ticks;
  54. unsigned int sysctl_sched_ravg_window_nr_ticks_user;
  55. unsigned int sysctl_sched_walt_rotate_big_tasks;
  56. unsigned int sysctl_sched_task_unfilter_period;
  57. unsigned int sysctl_walt_low_latency_task_threshold; /* disabled by default */
  58. unsigned int sysctl_sched_conservative_pl;
  59. unsigned int sysctl_sched_min_task_util_for_boost = 51;
  60. unsigned int sysctl_sched_min_task_util_for_uclamp = 51;
  61. unsigned int sysctl_sched_min_task_util_for_colocation = 35;
  62. unsigned int sysctl_sched_many_wakeup_threshold = WALT_MANY_WAKEUP_DEFAULT;
  63. const int sched_user_hint_max = 1000;
  64. unsigned int sysctl_walt_rtg_cfs_boost_prio = 99; /* disabled by default */
  65. unsigned int sysctl_sched_sync_hint_enable = 1;
  66. unsigned int sysctl_panic_on_walt_bug;
  67. unsigned int sysctl_sched_suppress_region2;
  68. unsigned int sysctl_sched_skip_sp_newly_idle_lb = 1;
  69. unsigned int sysctl_sched_hyst_min_coloc_ns = 80000000;
  70. unsigned int sysctl_sched_asymcap_boost;
  71. unsigned int sysctl_sched_long_running_rt_task_ms;
  72. unsigned int sysctl_sched_idle_enough = SCHED_IDLE_ENOUGH_DEFAULT;
  73. unsigned int sysctl_sched_cluster_util_thres_pct = SCHED_CLUSTER_UTIL_THRES_PCT_DEFAULT;
  74. unsigned int sysctl_sched_idle_enough_clust[MAX_CLUSTERS];
  75. unsigned int sysctl_sched_cluster_util_thres_pct_clust[MAX_CLUSTERS];
  76. unsigned int sysctl_ed_boost_pct;
  77. unsigned int sysctl_em_inflate_pct = 100;
  78. unsigned int sysctl_em_inflate_thres = 1024;
  79. unsigned int sysctl_sched_heavy_nr;
  80. unsigned int sysctl_max_freq_partial_halt = FREQ_QOS_MAX_DEFAULT_VALUE;
  81. unsigned int sysctl_fmax_cap[MAX_CLUSTERS];
  82. unsigned int sysctl_sched_sbt_pause_cpus;
  83. unsigned int sysctl_sched_sbt_enable = 1;
  84. unsigned int sysctl_sched_sbt_delay_windows;
  85. unsigned int high_perf_cluster_freq_cap[MAX_CLUSTERS];
  86. unsigned int sysctl_sched_pipeline_cpus;
  87. unsigned int fmax_cap[MAX_FREQ_CAP][MAX_CLUSTERS];
  88. unsigned int sysctl_sched_pipeline_skip_prime;
  89. unsigned int sysctl_sched_fmax_uncap_thresh_ms;
  90. unsigned int sysctl_sched_fmax_uncap_thresh_util;
  91. bool sbt_ongoing;
  92. /* Entries for 4 clusters and 10 tuples(3 item in each tuple */
  93. unsigned int sysctl_cluster_arr[4][MAX_FREQ_RELATIONS * TUPLE_SIZE] = {
  94. [0] = {0, 0, 0},
  95. [1] = {0, 0, 0},
  96. [2] = {0, 0, 0},
  97. [3] = {0, 0, 0},
  98. };
  99. struct freq_relation_map relation_data[MAX_CLUSTERS][MAX_FREQ_RELATIONS];
  100. /* range is [1 .. INT_MAX] */
  101. static int sysctl_task_read_pid = 1;
  102. static int sched_freq_map_handler(struct ctl_table *table, int write,
  103. void __user *buffer, size_t *lenp,
  104. loff_t *ppos)
  105. {
  106. int i, idx = 0, ret = -EPERM;
  107. unsigned int *data = (unsigned int *)table->data;
  108. static DEFINE_MUTEX(ignore_cluster_mutex);
  109. static int configured[MAX_CLUSTERS] = {0};
  110. int index;
  111. unsigned int val[MAX_FREQ_RELATIONS * TUPLE_SIZE];
  112. unsigned int src_cluster_fmax;
  113. unsigned int cluster_freq[MAX_CLUSTERS] = {0};
  114. struct ctl_table tmp = {
  115. .data = &val,
  116. .maxlen = sizeof(unsigned int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
  117. .mode = table->mode,
  118. };
  119. if (num_sched_clusters <= 1)
  120. return ret;
  121. index = (data == sysctl_cluster_arr[0]) ? 0 : (data == sysctl_cluster_arr[1]) ?
  122. 1 : (data == sysctl_cluster_arr[2]) ? 2 : 3;
  123. /* we are not allowing prime to have any relations for now */
  124. if (index >= num_sched_clusters - 1)
  125. return ret;
  126. mutex_lock(&ignore_cluster_mutex);
  127. if (!write) {
  128. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  129. goto unlock;
  130. }
  131. /* updation allowed only once */
  132. if (configured[index])
  133. goto unlock;
  134. ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
  135. if (ret)
  136. goto unlock;
  137. src_cluster_fmax = sched_cluster[index]->max_possible_freq;
  138. configured[index] = 1;
  139. /*
  140. * tuple format:
  141. * <a b c>:
  142. * a : source cluster frequency
  143. * b : first cpu of target cluster
  144. * c : target cluster frequency
  145. */
  146. for (i = 0; i < MAX_FREQ_RELATIONS; i++) {
  147. int tgt_cluster_id;
  148. idx = i * 3;
  149. if ((val[idx + 0] == 0) || (val[idx + 1] >= cpumask_weight(cpu_possible_mask)) ||
  150. (val[idx + 2] == 0))
  151. break;
  152. tgt_cluster_id = cpu_cluster(val[idx + 1])->id;
  153. /* target cpu cannot be of same/lower cluster */
  154. if (tgt_cluster_id <= index)
  155. break;
  156. /* frequency should be always same or increasing */
  157. if (cluster_freq[index] > val[idx + 0])
  158. break;
  159. cluster_freq[index] = val[idx + 0];
  160. if (cluster_freq[tgt_cluster_id] >= val[idx + 2])
  161. break;
  162. cluster_freq[tgt_cluster_id] = val[idx + 2];
  163. relation_data[index][i].src_freq = data[idx + 0] = val[idx + 0];
  164. relation_data[index][i].target_cluster_cpu = data[idx + 1] = val[idx + 1];
  165. relation_data[index][i].tgt_freq = data[idx + 2] = val[idx + 2];
  166. }
  167. for (; i < MAX_FREQ_RELATIONS; i++) {
  168. idx = i * 3;
  169. relation_data[index][i].src_freq = data[idx + 0] = FREQ_QOS_MAX_DEFAULT_VALUE;
  170. relation_data[index][i].target_cluster_cpu = data[idx + 1] = -1;
  171. relation_data[index][i].tgt_freq = data[idx + 2] = FREQ_QOS_MAX_DEFAULT_VALUE;
  172. }
  173. update_freq_relation(sched_cluster[index]);
  174. unlock:
  175. mutex_unlock(&ignore_cluster_mutex);
  176. return ret;
  177. }
  178. static int walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
  179. void __user *buffer, size_t *lenp,
  180. loff_t *ppos)
  181. {
  182. int ret;
  183. static DEFINE_MUTEX(mutex);
  184. struct rq *rq = cpu_rq(cpumask_first(cpu_possible_mask));
  185. unsigned long flags;
  186. if (unlikely(num_sched_clusters <= 0))
  187. return -EPERM;
  188. mutex_lock(&mutex);
  189. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  190. if (ret || !write) {
  191. mutex_unlock(&mutex);
  192. return ret;
  193. }
  194. /*
  195. * The load scale factor update happens with all
  196. * rqs locked. so acquiring 1 CPU rq lock and
  197. * updating the thresholds is sufficient for
  198. * an atomic update.
  199. */
  200. raw_spin_lock_irqsave(&rq->__lock, flags);
  201. walt_update_group_thresholds();
  202. raw_spin_unlock_irqrestore(&rq->__lock, flags);
  203. mutex_unlock(&mutex);
  204. return ret;
  205. }
  206. static int walt_proc_user_hint_handler(struct ctl_table *table,
  207. int write, void __user *buffer, size_t *lenp,
  208. loff_t *ppos)
  209. {
  210. int ret;
  211. unsigned int old_value;
  212. static DEFINE_MUTEX(mutex);
  213. mutex_lock(&mutex);
  214. old_value = sysctl_sched_user_hint;
  215. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  216. if (ret || !write || (old_value == sysctl_sched_user_hint))
  217. goto unlock;
  218. sched_user_hint_reset_time = jiffies + HZ;
  219. walt_irq_work_queue(&walt_migration_irq_work);
  220. unlock:
  221. mutex_unlock(&mutex);
  222. return ret;
  223. }
  224. DECLARE_BITMAP(sysctl_bitmap, WALT_NR_CPUS);
  225. static int walt_proc_sbt_pause_handler(struct ctl_table *table,
  226. int write, void __user *buffer, size_t *lenp,
  227. loff_t *ppos)
  228. {
  229. int ret = 0;
  230. unsigned int old_value;
  231. unsigned long bitmask;
  232. const unsigned long *bitmaskp = &bitmask;
  233. static DEFINE_MUTEX(mutex);
  234. mutex_lock(&mutex);
  235. old_value = sysctl_sched_sbt_pause_cpus;
  236. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  237. if (ret || !write || (old_value == sysctl_sched_sbt_pause_cpus))
  238. goto unlock;
  239. bitmask = (unsigned long)sysctl_sched_sbt_pause_cpus;
  240. bitmap_copy(sysctl_bitmap, bitmaskp, WALT_NR_CPUS);
  241. if (!sbt_ongoing)
  242. cpumask_copy(&cpus_for_sbt_pause, to_cpumask(sysctl_bitmap));
  243. else
  244. pr_warn("sbt core control is on-going, ignore change\n");
  245. unlock:
  246. mutex_unlock(&mutex);
  247. return ret;
  248. }
  249. /*
  250. * pipeline cpus are non-prime cpus chosen to handle pipeline tasks, e.g. golds
  251. * Notice That
  252. * - This can be updated only if sysctl_sched_heavy_nr == 0 && pipeline_nr == 0
  253. * - CPU7 is not allowed to set sched_pipeline_cpus
  254. */
  255. static int walt_proc_pipeline_cpus_handler(struct ctl_table *table,
  256. int write, void __user *buffer, size_t *lenp,
  257. loff_t *ppos)
  258. {
  259. int ret = 0;
  260. unsigned int old_value;
  261. unsigned long bitmask;
  262. const unsigned long *bitmaskp = &bitmask;
  263. static DEFINE_MUTEX(mutex);
  264. int avoid_cpu = cpumask_last(&sched_cluster[num_sched_clusters - 1]->cpus);
  265. /* do not allow if pipeline is setup */
  266. if (write && (sysctl_sched_heavy_nr || pipeline_nr))
  267. return -EPERM;
  268. mutex_lock(&mutex);
  269. old_value = sysctl_sched_pipeline_cpus;
  270. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  271. if (ret || !write || (old_value == sysctl_sched_pipeline_cpus))
  272. goto unlock;
  273. bitmask = (unsigned long)sysctl_sched_pipeline_cpus;
  274. bitmap_copy(sysctl_bitmap, bitmaskp, WALT_NR_CPUS);
  275. cpumask_copy(&cpus_for_pipeline, to_cpumask(sysctl_bitmap));
  276. /* do not allow avoid_cpu to be present in sysctl nor the mask */
  277. cpumask_clear_cpu(avoid_cpu, &cpus_for_pipeline);
  278. sysctl_sched_pipeline_cpus &= ~(1 << avoid_cpu);
  279. unlock:
  280. mutex_unlock(&mutex);
  281. return ret;
  282. }
  283. /* pipeline cpus are non-prime cpus chosen to handle pipeline tasks, e.g. golds */
  284. static int walt_proc_heavy_nr_handler(struct ctl_table *table,
  285. int write, void __user *buffer, size_t *lenp,
  286. loff_t *ppos)
  287. {
  288. int ret = 0;
  289. static DEFINE_MUTEX(mutex);
  290. if (write && !sysctl_sched_pipeline_cpus)
  291. return -EPERM;
  292. mutex_lock(&mutex);
  293. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  294. mutex_unlock(&mutex);
  295. return ret;
  296. }
  297. static int sched_ravg_window_handler(struct ctl_table *table,
  298. int write, void __user *buffer, size_t *lenp,
  299. loff_t *ppos)
  300. {
  301. int ret = -EPERM;
  302. static DEFINE_MUTEX(mutex);
  303. int val;
  304. struct ctl_table tmp = {
  305. .data = &val,
  306. .maxlen = sizeof(val),
  307. .mode = table->mode,
  308. };
  309. mutex_lock(&mutex);
  310. if (write && HZ != 250)
  311. goto unlock;
  312. val = sysctl_sched_ravg_window_nr_ticks;
  313. ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
  314. if (ret || !write || (val == sysctl_sched_ravg_window_nr_ticks))
  315. goto unlock;
  316. if (val != 2 && val != 3 && val != 4 && val != 5 && val != 8) {
  317. ret = -EINVAL;
  318. goto unlock;
  319. }
  320. sysctl_sched_ravg_window_nr_ticks = val;
  321. sched_window_nr_ticks_change();
  322. unlock:
  323. mutex_unlock(&mutex);
  324. return ret;
  325. }
  326. static int sched_ravg_window_handler_user(struct ctl_table *table,
  327. int write, void __user *buffer, size_t *lenp,
  328. loff_t *ppos)
  329. {
  330. int ret = -EPERM;
  331. static DEFINE_MUTEX(mutex);
  332. int val;
  333. struct ctl_table tmp = {
  334. .data = &val,
  335. .maxlen = sizeof(val),
  336. .mode = table->mode,
  337. };
  338. mutex_lock(&mutex);
  339. if (write && HZ != 250)
  340. goto unlock;
  341. val = sysctl_sched_ravg_window_nr_ticks_user;
  342. ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
  343. if (ret || !write || (val == sysctl_sched_ravg_window_nr_ticks_user))
  344. goto unlock;
  345. if (val != 0 && val != 2 && val != 3 && val != 4 && val != 5 && val != 8) {
  346. ret = -EINVAL;
  347. goto unlock;
  348. }
  349. sysctl_sched_ravg_window_nr_ticks_user = val;
  350. sched_window_nr_ticks_change();
  351. unlock:
  352. mutex_unlock(&mutex);
  353. return ret;
  354. }
  355. static DEFINE_MUTEX(sysctl_pid_mutex);
  356. static int sched_task_read_pid_handler(struct ctl_table *table, int write,
  357. void __user *buffer, size_t *lenp,
  358. loff_t *ppos)
  359. {
  360. int ret;
  361. mutex_lock(&sysctl_pid_mutex);
  362. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  363. mutex_unlock(&sysctl_pid_mutex);
  364. return ret;
  365. }
  366. enum {
  367. TASK_BEGIN = 0,
  368. WAKE_UP_IDLE,
  369. INIT_TASK_LOAD,
  370. GROUP_ID,
  371. PER_TASK_BOOST,
  372. PER_TASK_BOOST_PERIOD_MS,
  373. LOW_LATENCY,
  374. PIPELINE,
  375. LOAD_BOOST,
  376. REDUCE_AFFINITY,
  377. };
  378. static int sched_task_handler(struct ctl_table *table, int write,
  379. void __user *buffer, size_t *lenp,
  380. loff_t *ppos)
  381. {
  382. int ret, param;
  383. struct task_struct *task;
  384. int pid_and_val[2] = {-1, -1};
  385. int val;
  386. struct walt_task_struct *wts;
  387. struct rq *rq;
  388. struct rq_flags rf;
  389. unsigned long bitmask;
  390. const unsigned long *bitmaskp = &bitmask;
  391. struct ctl_table tmp = {
  392. .data = &pid_and_val,
  393. .maxlen = sizeof(pid_and_val),
  394. .mode = table->mode,
  395. };
  396. mutex_lock(&sysctl_pid_mutex);
  397. if (!write) {
  398. task = get_pid_task(find_vpid(sysctl_task_read_pid),
  399. PIDTYPE_PID);
  400. if (!task) {
  401. ret = -ENOENT;
  402. goto unlock_mutex;
  403. }
  404. wts = (struct walt_task_struct *) task->android_vendor_data1;
  405. pid_and_val[0] = sysctl_task_read_pid;
  406. param = (unsigned long)table->data;
  407. switch (param) {
  408. case WAKE_UP_IDLE:
  409. pid_and_val[1] = wts->wake_up_idle;
  410. break;
  411. case INIT_TASK_LOAD:
  412. pid_and_val[1] = wts->init_load_pct;
  413. break;
  414. case GROUP_ID:
  415. pid_and_val[1] = sched_get_group_id(task);
  416. break;
  417. case PER_TASK_BOOST:
  418. pid_and_val[1] = wts->boost;
  419. break;
  420. case PER_TASK_BOOST_PERIOD_MS:
  421. pid_and_val[1] =
  422. div64_ul(wts->boost_period,
  423. 1000000UL);
  424. break;
  425. case LOW_LATENCY:
  426. pid_and_val[1] = wts->low_latency &
  427. WALT_LOW_LATENCY_PROCFS;
  428. break;
  429. case PIPELINE:
  430. pid_and_val[1] = wts->low_latency &
  431. WALT_LOW_LATENCY_PIPELINE;
  432. break;
  433. case LOAD_BOOST:
  434. pid_and_val[1] = wts->load_boost;
  435. break;
  436. case REDUCE_AFFINITY:
  437. pid_and_val[1] = cpumask_bits(&wts->reduce_mask)[0];
  438. break;
  439. default:
  440. ret = -EINVAL;
  441. goto put_task;
  442. }
  443. ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
  444. goto put_task;
  445. }
  446. ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
  447. if (ret)
  448. goto unlock_mutex;
  449. if (pid_and_val[0] <= 0) {
  450. ret = -ENOENT;
  451. goto unlock_mutex;
  452. }
  453. /* parsed the values successfully in pid_and_val[] array */
  454. task = get_pid_task(find_vpid(pid_and_val[0]), PIDTYPE_PID);
  455. if (!task) {
  456. ret = -ENOENT;
  457. goto unlock_mutex;
  458. }
  459. wts = (struct walt_task_struct *) task->android_vendor_data1;
  460. param = (unsigned long)table->data;
  461. val = pid_and_val[1];
  462. if (param != LOAD_BOOST && val < 0) {
  463. ret = -EINVAL;
  464. goto put_task;
  465. }
  466. switch (param) {
  467. case WAKE_UP_IDLE:
  468. wts->wake_up_idle = val;
  469. break;
  470. case INIT_TASK_LOAD:
  471. if (pid_and_val[1] < 0 || pid_and_val[1] > 100) {
  472. ret = -EINVAL;
  473. goto put_task;
  474. }
  475. wts->init_load_pct = val;
  476. break;
  477. case GROUP_ID:
  478. ret = sched_set_group_id(task, val);
  479. break;
  480. case PER_TASK_BOOST:
  481. if (val < TASK_BOOST_NONE || val >= TASK_BOOST_END) {
  482. ret = -EINVAL;
  483. goto put_task;
  484. }
  485. wts->boost = val;
  486. if (val == 0)
  487. wts->boost_period = 0;
  488. break;
  489. case PER_TASK_BOOST_PERIOD_MS:
  490. if (wts->boost == 0 && val) {
  491. /* setting boost period w/o boost is invalid */
  492. ret = -EINVAL;
  493. goto put_task;
  494. }
  495. wts->boost_period = (u64)val * 1000 * 1000;
  496. wts->boost_expires = sched_clock() + wts->boost_period;
  497. break;
  498. case LOW_LATENCY:
  499. if (val)
  500. wts->low_latency |= WALT_LOW_LATENCY_PROCFS;
  501. else
  502. wts->low_latency &= ~WALT_LOW_LATENCY_PROCFS;
  503. break;
  504. case PIPELINE:
  505. /* deny write operation while empty pipeline cpus */
  506. if (!sysctl_sched_pipeline_cpus) {
  507. ret = -EPERM;
  508. goto put_task;
  509. }
  510. rq = task_rq_lock(task, &rf);
  511. if (READ_ONCE(task->__state) == TASK_DEAD) {
  512. ret = -EINVAL;
  513. task_rq_unlock(rq, task, &rf);
  514. goto put_task;
  515. }
  516. if (val) {
  517. ret = add_pipeline(wts);
  518. if (ret < 0) {
  519. task_rq_unlock(rq, task, &rf);
  520. goto put_task;
  521. }
  522. wts->low_latency |= WALT_LOW_LATENCY_PIPELINE;
  523. } else {
  524. wts->low_latency &= ~WALT_LOW_LATENCY_PIPELINE;
  525. remove_pipeline(wts);
  526. }
  527. task_rq_unlock(rq, task, &rf);
  528. break;
  529. case LOAD_BOOST:
  530. if (pid_and_val[1] < -90 || pid_and_val[1] > 90) {
  531. ret = -EINVAL;
  532. goto put_task;
  533. }
  534. wts->load_boost = val;
  535. if (val)
  536. wts->boosted_task_load = mult_frac((int64_t)1024, (int64_t)val, 100);
  537. else
  538. wts->boosted_task_load = 0;
  539. break;
  540. case REDUCE_AFFINITY:
  541. bitmask = (unsigned long) val;
  542. bitmap_copy(sysctl_bitmap, bitmaskp, WALT_NR_CPUS);
  543. cpumask_copy(&wts->reduce_mask, to_cpumask(sysctl_bitmap));
  544. break;
  545. default:
  546. ret = -EINVAL;
  547. }
  548. trace_sched_task_handler(task, param, val, CALLER_ADDR0, CALLER_ADDR1,
  549. CALLER_ADDR2, CALLER_ADDR3, CALLER_ADDR4, CALLER_ADDR5);
  550. put_task:
  551. put_task_struct(task);
  552. unlock_mutex:
  553. mutex_unlock(&sysctl_pid_mutex);
  554. return ret;
  555. }
  556. #ifdef CONFIG_PROC_SYSCTL
  557. static void sched_update_updown_migrate_values(bool up)
  558. {
  559. int i = 0, cpu;
  560. struct walt_sched_cluster *cluster;
  561. for_each_sched_cluster(cluster) {
  562. /*
  563. * No need to worry about CPUs in last cluster
  564. * if there are more than 2 clusters in the system
  565. */
  566. for_each_cpu(cpu, &cluster->cpus) {
  567. if (up)
  568. sched_capacity_margin_up[cpu] =
  569. SCHED_FIXEDPOINT_SCALE * 100 /
  570. sysctl_sched_capacity_margin_up_pct[i];
  571. else
  572. sched_capacity_margin_down[cpu] =
  573. SCHED_FIXEDPOINT_SCALE * 100 /
  574. sysctl_sched_capacity_margin_dn_pct[i];
  575. }
  576. trace_sched_update_updown_migrate_values(up, i);
  577. if (++i >= num_sched_clusters - 1)
  578. break;
  579. }
  580. }
  581. int sched_updown_migrate_handler(struct ctl_table *table, int write,
  582. void __user *buffer, size_t *lenp,
  583. loff_t *ppos)
  584. {
  585. int ret, i;
  586. unsigned int *data = (unsigned int *)table->data;
  587. static DEFINE_MUTEX(mutex);
  588. int cap_margin_levels = num_sched_clusters ? num_sched_clusters - 1 : 0;
  589. int val[MAX_MARGIN_LEVELS];
  590. struct ctl_table tmp = {
  591. .data = &val,
  592. .maxlen = sizeof(int) * cap_margin_levels,
  593. .mode = table->mode,
  594. };
  595. if (cap_margin_levels <= 0)
  596. return -EINVAL;
  597. mutex_lock(&mutex);
  598. if (!write) {
  599. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  600. goto unlock_mutex;
  601. }
  602. ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
  603. if (ret)
  604. goto unlock_mutex;
  605. /* check if valid pct values are passed in */
  606. for (i = 0; i < cap_margin_levels; i++) {
  607. if (val[i] <= 0 || val[i] > 100) {
  608. ret = -EINVAL;
  609. goto unlock_mutex;
  610. }
  611. }
  612. /* check up pct is greater than dn pct */
  613. if (data == &sysctl_sched_capacity_margin_up_pct[0]) {
  614. for (i = 0; i < cap_margin_levels; i++) {
  615. if (val[i] < sysctl_sched_capacity_margin_dn_pct[i]) {
  616. ret = -EINVAL;
  617. goto unlock_mutex;
  618. }
  619. }
  620. } else {
  621. for (i = 0; i < cap_margin_levels; i++) {
  622. if (sysctl_sched_capacity_margin_up_pct[i] < val[i]) {
  623. ret = -EINVAL;
  624. goto unlock_mutex;
  625. }
  626. }
  627. }
  628. /* all things checkout update the value */
  629. for (i = 0; i < cap_margin_levels; i++)
  630. data[i] = val[i];
  631. /* update individual cpu thresholds */
  632. sched_update_updown_migrate_values(data == &sysctl_sched_capacity_margin_up_pct[0]);
  633. unlock_mutex:
  634. mutex_unlock(&mutex);
  635. return ret;
  636. }
  637. static void sched_update_updown_early_migrate_values(bool up)
  638. {
  639. int i = 0, cpu;
  640. struct walt_sched_cluster *cluster;
  641. for_each_sched_cluster(cluster) {
  642. /*
  643. * No need to worry about CPUs in last cluster
  644. * if there are more than 2 clusters in the system
  645. */
  646. for_each_cpu(cpu, &cluster->cpus) {
  647. if (up)
  648. sched_capacity_margin_early_up[cpu] = sysctl_sched_early_up[i];
  649. else
  650. sched_capacity_margin_early_down[cpu] = sysctl_sched_early_down[i];
  651. }
  652. trace_sched_update_updown_early_migrate_values(up, i);
  653. if (++i >= num_sched_clusters - 1)
  654. break;
  655. }
  656. }
  657. int sched_updown_early_migrate_handler(struct ctl_table *table, int write,
  658. void __user *buffer, size_t *lenp,
  659. loff_t *ppos)
  660. {
  661. int ret, i;
  662. unsigned int *data = (unsigned int *)table->data;
  663. static DEFINE_MUTEX(mutex);
  664. int cap_margin_levels = num_sched_clusters ? num_sched_clusters - 1 : 0;
  665. int val[MAX_MARGIN_LEVELS];
  666. struct ctl_table tmp = {
  667. .data = &val,
  668. .maxlen = sizeof(int) * cap_margin_levels,
  669. .mode = table->mode,
  670. };
  671. if (cap_margin_levels <= 0)
  672. return -EINVAL;
  673. mutex_lock(&mutex);
  674. if (!write) {
  675. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  676. goto unlock_mutex;
  677. }
  678. ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
  679. if (ret)
  680. goto unlock_mutex;
  681. for (i = 0; i < cap_margin_levels; i++) {
  682. if (val[i] < 1024) {
  683. ret = -EINVAL;
  684. goto unlock_mutex;
  685. }
  686. }
  687. /* check up thresh is greater than dn thresh */
  688. if (data == &sysctl_sched_early_up[0]) {
  689. for (i = 0; i < cap_margin_levels; i++) {
  690. if (val[i] >= sysctl_sched_early_down[i]) {
  691. ret = -EINVAL;
  692. goto unlock_mutex;
  693. }
  694. }
  695. } else {
  696. for (i = 0; i < cap_margin_levels; i++) {
  697. if (sysctl_sched_early_up[i] >= val[i]) {
  698. ret = -EINVAL;
  699. goto unlock_mutex;
  700. }
  701. }
  702. }
  703. /* all things checkout update the value */
  704. for (i = 0; i < cap_margin_levels; i++)
  705. data[i] = val[i];
  706. /* update individual cpu thresholds */
  707. sched_update_updown_early_migrate_values(data == &sysctl_sched_early_up[0]);
  708. unlock_mutex:
  709. mutex_unlock(&mutex);
  710. return ret;
  711. }
  712. int sched_fmax_cap_handler(struct ctl_table *table, int write,
  713. void __user *buffer, size_t *lenp,
  714. loff_t *ppos)
  715. {
  716. int ret, i;
  717. unsigned int *data = (unsigned int *)table->data;
  718. static DEFINE_MUTEX(mutex);
  719. int cap_margin_levels = num_sched_clusters;
  720. int val[MAX_CLUSTERS];
  721. struct ctl_table tmp = {
  722. .data = &val,
  723. .maxlen = sizeof(int) * cap_margin_levels,
  724. .mode = table->mode,
  725. };
  726. if (cap_margin_levels <= 0)
  727. return -EINVAL;
  728. mutex_lock(&mutex);
  729. if (!write) {
  730. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  731. goto unlock_mutex;
  732. }
  733. ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
  734. if (ret)
  735. goto unlock_mutex;
  736. for (i = 0; i < cap_margin_levels; i++) {
  737. if (val[i] < 0) {
  738. ret = -EINVAL;
  739. goto unlock_mutex;
  740. }
  741. data[i] = val[i];
  742. }
  743. unlock_mutex:
  744. mutex_unlock(&mutex);
  745. return ret;
  746. }
  747. static DEFINE_MUTEX(idle_enough_mutex);
  748. int sched_idle_enough_handler(struct ctl_table *table, int write,
  749. void __user *buffer, size_t *lenp,
  750. loff_t *ppos)
  751. {
  752. int ret, i;
  753. mutex_lock(&idle_enough_mutex);
  754. ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
  755. if (ret || !write)
  756. goto unlock_mutex;
  757. /* update all per-cluster entries to match what was written */
  758. for (i = 0; i < MAX_CLUSTERS; i++)
  759. sysctl_sched_idle_enough_clust[i] = sysctl_sched_idle_enough;
  760. unlock_mutex:
  761. mutex_unlock(&idle_enough_mutex);
  762. return ret;
  763. }
  764. int sched_idle_enough_clust_handler(struct ctl_table *table, int write,
  765. void __user *buffer, size_t *lenp,
  766. loff_t *ppos)
  767. {
  768. int ret;
  769. mutex_lock(&idle_enough_mutex);
  770. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  771. if (ret || !write)
  772. goto unlock_mutex;
  773. /* update the single-entry to match the first cluster updated here */
  774. sysctl_sched_idle_enough = sysctl_sched_idle_enough_clust[0];
  775. unlock_mutex:
  776. mutex_unlock(&idle_enough_mutex);
  777. return ret;
  778. }
  779. static DEFINE_MUTEX(util_thres_mutex);
  780. int sched_cluster_util_thres_pct_handler(struct ctl_table *table, int write,
  781. void __user *buffer, size_t *lenp,
  782. loff_t *ppos)
  783. {
  784. int ret, i;
  785. mutex_lock(&util_thres_mutex);
  786. ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
  787. if (ret || !write)
  788. goto unlock_mutex;
  789. /* update all per-cluster entries to match what was written */
  790. for (i = 0; i < MAX_CLUSTERS; i++)
  791. sysctl_sched_cluster_util_thres_pct_clust[i] = sysctl_sched_cluster_util_thres_pct;
  792. unlock_mutex:
  793. mutex_unlock(&util_thres_mutex);
  794. return ret;
  795. }
  796. int sched_cluster_util_thres_pct_clust_handler(struct ctl_table *table, int write,
  797. void __user *buffer, size_t *lenp,
  798. loff_t *ppos)
  799. {
  800. int ret;
  801. mutex_lock(&util_thres_mutex);
  802. ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  803. if (ret || !write)
  804. goto unlock_mutex;
  805. /* update the single-entry to match the first cluster updated here */
  806. sysctl_sched_cluster_util_thres_pct = sysctl_sched_cluster_util_thres_pct_clust[0];
  807. unlock_mutex:
  808. mutex_unlock(&util_thres_mutex);
  809. return ret;
  810. }
  811. #endif /* CONFIG_PROC_SYSCTL */
  812. struct ctl_table input_boost_sysctls[] = {
  813. {
  814. .procname = "input_boost_ms",
  815. .data = &sysctl_input_boost_ms,
  816. .maxlen = sizeof(unsigned int),
  817. .mode = 0644,
  818. .proc_handler = proc_dointvec_minmax,
  819. .extra1 = SYSCTL_ZERO,
  820. .extra2 = &one_hundred_thousand,
  821. },
  822. {
  823. .procname = "input_boost_freq",
  824. .data = &sysctl_input_boost_freq,
  825. .maxlen = sizeof(unsigned int) * 8,
  826. .mode = 0644,
  827. .proc_handler = proc_dointvec_minmax,
  828. .extra1 = SYSCTL_ZERO,
  829. .extra2 = SYSCTL_INT_MAX,
  830. },
  831. {
  832. .procname = "sched_boost_on_input",
  833. .data = &sysctl_sched_boost_on_input,
  834. .maxlen = sizeof(unsigned int),
  835. .mode = 0644,
  836. .proc_handler = proc_dointvec_minmax,
  837. .extra1 = SYSCTL_ZERO,
  838. .extra2 = SYSCTL_INT_MAX,
  839. },
  840. { }
  841. };
  842. struct ctl_table walt_table[] = {
  843. {
  844. .procname = "sched_user_hint",
  845. .data = &sysctl_sched_user_hint,
  846. .maxlen = sizeof(unsigned int),
  847. .mode = 0644,
  848. .proc_handler = walt_proc_user_hint_handler,
  849. .extra1 = SYSCTL_ZERO,
  850. .extra2 = (void *)&sched_user_hint_max,
  851. },
  852. {
  853. .procname = "sched_window_stats_policy",
  854. .data = &sysctl_sched_window_stats_policy,
  855. .maxlen = sizeof(unsigned int),
  856. .mode = 0644,
  857. .proc_handler = proc_dointvec_minmax,
  858. .extra1 = SYSCTL_ZERO,
  859. .extra2 = &four,
  860. },
  861. {
  862. .procname = "sched_group_upmigrate",
  863. .data = &sysctl_sched_group_upmigrate_pct,
  864. .maxlen = sizeof(unsigned int),
  865. .mode = 0644,
  866. .proc_handler = walt_proc_group_thresholds_handler,
  867. .extra1 = &sysctl_sched_group_downmigrate_pct,
  868. },
  869. {
  870. .procname = "sched_group_downmigrate",
  871. .data = &sysctl_sched_group_downmigrate_pct,
  872. .maxlen = sizeof(unsigned int),
  873. .mode = 0644,
  874. .proc_handler = walt_proc_group_thresholds_handler,
  875. .extra1 = SYSCTL_ZERO,
  876. .extra2 = &sysctl_sched_group_upmigrate_pct,
  877. },
  878. {
  879. .procname = "sched_boost",
  880. .data = &sysctl_sched_boost,
  881. .maxlen = sizeof(unsigned int),
  882. .mode = 0644,
  883. .proc_handler = sched_boost_handler,
  884. .extra1 = &neg_four,
  885. .extra2 = &four,
  886. },
  887. {
  888. .procname = "sched_conservative_pl",
  889. .data = &sysctl_sched_conservative_pl,
  890. .maxlen = sizeof(unsigned int),
  891. .mode = 0644,
  892. .proc_handler = proc_dointvec_minmax,
  893. .extra1 = SYSCTL_ZERO,
  894. .extra2 = SYSCTL_ONE,
  895. },
  896. {
  897. .procname = "sched_many_wakeup_threshold",
  898. .data = &sysctl_sched_many_wakeup_threshold,
  899. .maxlen = sizeof(unsigned int),
  900. .mode = 0644,
  901. .proc_handler = proc_dointvec_minmax,
  902. .extra1 = &two,
  903. .extra2 = &one_thousand,
  904. },
  905. {
  906. .procname = "sched_walt_rotate_big_tasks",
  907. .data = &sysctl_sched_walt_rotate_big_tasks,
  908. .maxlen = sizeof(unsigned int),
  909. .mode = 0644,
  910. .proc_handler = proc_dointvec_minmax,
  911. .extra1 = SYSCTL_ZERO,
  912. .extra2 = SYSCTL_ONE,
  913. },
  914. {
  915. .procname = "sched_min_task_util_for_boost",
  916. .data = &sysctl_sched_min_task_util_for_boost,
  917. .maxlen = sizeof(unsigned int),
  918. .mode = 0644,
  919. .proc_handler = proc_dointvec_minmax,
  920. .extra1 = SYSCTL_ZERO,
  921. .extra2 = &one_thousand,
  922. },
  923. {
  924. .procname = "sched_min_task_util_for_uclamp",
  925. .data = &sysctl_sched_min_task_util_for_uclamp,
  926. .maxlen = sizeof(unsigned int),
  927. .mode = 0644,
  928. .proc_handler = proc_dointvec_minmax,
  929. .extra1 = SYSCTL_ZERO,
  930. .extra2 = &one_thousand,
  931. },
  932. {
  933. .procname = "sched_min_task_util_for_colocation",
  934. .data = &sysctl_sched_min_task_util_for_colocation,
  935. .maxlen = sizeof(unsigned int),
  936. .mode = 0644,
  937. .proc_handler = proc_dointvec_minmax,
  938. .extra1 = SYSCTL_ZERO,
  939. .extra2 = &one_thousand,
  940. },
  941. {
  942. .procname = "sched_coloc_downmigrate_ns",
  943. .data = &sysctl_sched_coloc_downmigrate_ns,
  944. .maxlen = sizeof(unsigned int),
  945. .mode = 0644,
  946. .proc_handler = proc_douintvec_minmax,
  947. },
  948. {
  949. .procname = "sched_task_unfilter_period",
  950. .data = &sysctl_sched_task_unfilter_period,
  951. .maxlen = sizeof(unsigned int),
  952. .mode = 0644,
  953. .proc_handler = proc_dointvec_minmax,
  954. .extra1 = SYSCTL_ONE,
  955. .extra2 = &two_hundred_million,
  956. },
  957. {
  958. .procname = "sched_busy_hysteresis_enable_cpus",
  959. .data = &sysctl_sched_busy_hyst_enable_cpus,
  960. .maxlen = sizeof(unsigned int),
  961. .mode = 0644,
  962. .proc_handler = sched_busy_hyst_handler,
  963. .extra1 = SYSCTL_ZERO,
  964. .extra2 = &two_hundred_fifty_five,
  965. },
  966. {
  967. .procname = "sched_busy_hyst_ns",
  968. .data = &sysctl_sched_busy_hyst,
  969. .maxlen = sizeof(unsigned int),
  970. .mode = 0644,
  971. .proc_handler = sched_busy_hyst_handler,
  972. .extra1 = SYSCTL_ZERO,
  973. .extra2 = &ns_per_sec,
  974. },
  975. {
  976. .procname = "sched_coloc_busy_hysteresis_enable_cpus",
  977. .data = &sysctl_sched_coloc_busy_hyst_enable_cpus,
  978. .maxlen = sizeof(unsigned int),
  979. .mode = 0644,
  980. .proc_handler = sched_busy_hyst_handler,
  981. .extra1 = SYSCTL_ZERO,
  982. .extra2 = &two_hundred_fifty_five,
  983. },
  984. {
  985. .procname = "sched_coloc_busy_hyst_cpu_ns",
  986. .data = &sysctl_sched_coloc_busy_hyst_cpu,
  987. .maxlen = sizeof(unsigned int) * WALT_NR_CPUS,
  988. .mode = 0644,
  989. .proc_handler = sched_busy_hyst_handler,
  990. .extra1 = SYSCTL_ZERO,
  991. .extra2 = &ns_per_sec,
  992. },
  993. {
  994. .procname = "sched_coloc_busy_hyst_max_ms",
  995. .data = &sysctl_sched_coloc_busy_hyst_max_ms,
  996. .maxlen = sizeof(unsigned int),
  997. .mode = 0644,
  998. .proc_handler = sched_busy_hyst_handler,
  999. .extra1 = SYSCTL_ZERO,
  1000. .extra2 = &one_hundred_thousand,
  1001. },
  1002. {
  1003. .procname = "sched_coloc_busy_hyst_cpu_busy_pct",
  1004. .data = &sysctl_sched_coloc_busy_hyst_cpu_busy_pct,
  1005. .maxlen = sizeof(unsigned int) * WALT_NR_CPUS,
  1006. .mode = 0644,
  1007. .proc_handler = sched_busy_hyst_handler,
  1008. .extra1 = SYSCTL_ZERO,
  1009. .extra2 = &one_hundred,
  1010. },
  1011. {
  1012. .procname = "sched_util_busy_hysteresis_enable_cpus",
  1013. .data = &sysctl_sched_util_busy_hyst_enable_cpus,
  1014. .maxlen = sizeof(unsigned int),
  1015. .mode = 0644,
  1016. .proc_handler = sched_busy_hyst_handler,
  1017. .extra1 = SYSCTL_ZERO,
  1018. .extra2 = &two_hundred_fifty_five,
  1019. },
  1020. {
  1021. .procname = "sched_util_busy_hyst_cpu_ns",
  1022. .data = &sysctl_sched_util_busy_hyst_cpu,
  1023. .maxlen = sizeof(unsigned int) * WALT_NR_CPUS,
  1024. .mode = 0644,
  1025. .proc_handler = sched_busy_hyst_handler,
  1026. .extra1 = SYSCTL_ZERO,
  1027. .extra2 = &ns_per_sec,
  1028. },
  1029. {
  1030. .procname = "sched_util_busy_hyst_cpu_util",
  1031. .data = &sysctl_sched_util_busy_hyst_cpu_util,
  1032. .maxlen = sizeof(unsigned int) * WALT_NR_CPUS,
  1033. .mode = 0644,
  1034. .proc_handler = sched_busy_hyst_handler,
  1035. .extra1 = SYSCTL_ZERO,
  1036. .extra2 = &one_thousand,
  1037. },
  1038. {
  1039. .procname = "sched_ravg_window_nr_ticks",
  1040. .data = &sysctl_sched_ravg_window_nr_ticks,
  1041. .maxlen = sizeof(unsigned int),
  1042. .mode = 0644,
  1043. .proc_handler = sched_ravg_window_handler,
  1044. },
  1045. {
  1046. .procname = "sched_ravg_window_nr_ticks_user",
  1047. .data = &sysctl_sched_ravg_window_nr_ticks_user,
  1048. .maxlen = sizeof(unsigned int),
  1049. .mode = 0644,
  1050. .proc_handler = sched_ravg_window_handler_user,
  1051. },
  1052. {
  1053. .procname = "sched_upmigrate",
  1054. .data = &sysctl_sched_capacity_margin_up_pct,
  1055. .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS,
  1056. .mode = 0644,
  1057. .proc_handler = sched_updown_migrate_handler,
  1058. },
  1059. {
  1060. .procname = "sched_downmigrate",
  1061. .data = &sysctl_sched_capacity_margin_dn_pct,
  1062. .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS,
  1063. .mode = 0644,
  1064. .proc_handler = sched_updown_migrate_handler,
  1065. },
  1066. {
  1067. .procname = "sched_early_upmigrate",
  1068. .data = &sysctl_sched_early_up,
  1069. .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS,
  1070. .mode = 0644,
  1071. .proc_handler = sched_updown_early_migrate_handler,
  1072. },
  1073. {
  1074. .procname = "sched_early_downmigrate",
  1075. .data = &sysctl_sched_early_down,
  1076. .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS,
  1077. .mode = 0644,
  1078. .proc_handler = sched_updown_early_migrate_handler,
  1079. },
  1080. {
  1081. .procname = "walt_rtg_cfs_boost_prio",
  1082. .data = &sysctl_walt_rtg_cfs_boost_prio,
  1083. .maxlen = sizeof(unsigned int),
  1084. .mode = 0644,
  1085. .proc_handler = proc_dointvec_minmax,
  1086. .extra1 = &min_cfs_boost_prio,
  1087. .extra2 = &max_cfs_boost_prio,
  1088. },
  1089. {
  1090. .procname = "walt_low_latency_task_threshold",
  1091. .data = &sysctl_walt_low_latency_task_threshold,
  1092. .maxlen = sizeof(unsigned int),
  1093. .mode = 0644,
  1094. .proc_handler = proc_dointvec_minmax,
  1095. .extra1 = SYSCTL_ZERO,
  1096. .extra2 = &one_thousand,
  1097. },
  1098. {
  1099. .procname = "sched_force_lb_enable",
  1100. .data = &sysctl_sched_force_lb_enable,
  1101. .maxlen = sizeof(unsigned int),
  1102. .mode = 0644,
  1103. .proc_handler = proc_dointvec_minmax,
  1104. .extra1 = SYSCTL_ZERO,
  1105. .extra2 = SYSCTL_ONE,
  1106. },
  1107. {
  1108. .procname = "sched_sync_hint_enable",
  1109. .data = &sysctl_sched_sync_hint_enable,
  1110. .maxlen = sizeof(unsigned int),
  1111. .mode = 0644,
  1112. .proc_handler = proc_dointvec_minmax,
  1113. .extra1 = SYSCTL_ZERO,
  1114. .extra2 = SYSCTL_ONE,
  1115. },
  1116. {
  1117. .procname = "sched_suppress_region2",
  1118. .data = &sysctl_sched_suppress_region2,
  1119. .maxlen = sizeof(unsigned int),
  1120. .mode = 0644,
  1121. .proc_handler = proc_dointvec_minmax,
  1122. .extra1 = SYSCTL_ZERO,
  1123. .extra2 = SYSCTL_ONE,
  1124. },
  1125. {
  1126. .procname = "sched_skip_sp_newly_idle_lb",
  1127. .data = &sysctl_sched_skip_sp_newly_idle_lb,
  1128. .maxlen = sizeof(unsigned int),
  1129. .mode = 0644,
  1130. .proc_handler = proc_dointvec_minmax,
  1131. .extra1 = SYSCTL_ZERO,
  1132. .extra2 = SYSCTL_ONE,
  1133. },
  1134. {
  1135. .procname = "sched_hyst_min_coloc_ns",
  1136. .data = &sysctl_sched_hyst_min_coloc_ns,
  1137. .maxlen = sizeof(unsigned int),
  1138. .mode = 0644,
  1139. .proc_handler = proc_dointvec_minmax,
  1140. .extra1 = SYSCTL_ZERO,
  1141. },
  1142. {
  1143. .procname = "panic_on_walt_bug",
  1144. .data = &sysctl_panic_on_walt_bug,
  1145. .maxlen = sizeof(unsigned int),
  1146. .mode = 0644,
  1147. .proc_handler = proc_dointvec_minmax,
  1148. .extra1 = SYSCTL_ZERO,
  1149. .extra2 = SYSCTL_INT_MAX,
  1150. },
  1151. {
  1152. .procname = "sched_lib_name",
  1153. .data = sched_lib_name,
  1154. .maxlen = LIB_PATH_LENGTH,
  1155. .mode = 0644,
  1156. .proc_handler = proc_dostring,
  1157. },
  1158. {
  1159. .procname = "sched_lib_mask_force",
  1160. .data = &sched_lib_mask_force,
  1161. .maxlen = sizeof(unsigned int),
  1162. .mode = 0644,
  1163. .proc_handler = proc_douintvec_minmax,
  1164. .extra1 = SYSCTL_ZERO,
  1165. .extra2 = &two_hundred_fifty_five,
  1166. },
  1167. {
  1168. .procname = "input_boost",
  1169. .mode = 0555,
  1170. .child = input_boost_sysctls,
  1171. },
  1172. {
  1173. .procname = "sched_wake_up_idle",
  1174. .data = (int *) WAKE_UP_IDLE,
  1175. .maxlen = sizeof(unsigned int) * 2,
  1176. .mode = 0644,
  1177. .proc_handler = sched_task_handler,
  1178. },
  1179. {
  1180. .procname = "sched_init_task_load",
  1181. .data = (int *) INIT_TASK_LOAD,
  1182. .maxlen = sizeof(unsigned int) * 2,
  1183. .mode = 0644,
  1184. .proc_handler = sched_task_handler,
  1185. },
  1186. {
  1187. .procname = "sched_group_id",
  1188. .data = (int *) GROUP_ID,
  1189. .maxlen = sizeof(unsigned int) * 2,
  1190. .mode = 0644,
  1191. .proc_handler = sched_task_handler,
  1192. },
  1193. {
  1194. .procname = "sched_per_task_boost",
  1195. .data = (int *) PER_TASK_BOOST,
  1196. .maxlen = sizeof(unsigned int) * 2,
  1197. .mode = 0644,
  1198. .proc_handler = sched_task_handler,
  1199. },
  1200. {
  1201. .procname = "sched_per_task_boost_period_ms",
  1202. .data = (int *) PER_TASK_BOOST_PERIOD_MS,
  1203. .maxlen = sizeof(unsigned int) * 2,
  1204. .mode = 0644,
  1205. .proc_handler = sched_task_handler,
  1206. },
  1207. {
  1208. .procname = "sched_low_latency",
  1209. .data = (int *) LOW_LATENCY,
  1210. .maxlen = sizeof(unsigned int) * 2,
  1211. .mode = 0644,
  1212. .proc_handler = sched_task_handler,
  1213. },
  1214. {
  1215. .procname = "sched_pipeline",
  1216. .data = (int *) PIPELINE,
  1217. .maxlen = sizeof(unsigned int) * 2,
  1218. .mode = 0644,
  1219. .proc_handler = sched_task_handler,
  1220. },
  1221. {
  1222. .procname = "task_load_boost",
  1223. .data = (int *) LOAD_BOOST,
  1224. .maxlen = sizeof(unsigned int) * 2,
  1225. .mode = 0644,
  1226. .proc_handler = sched_task_handler,
  1227. },
  1228. {
  1229. .procname = "task_reduce_affinity",
  1230. .data = (int *) REDUCE_AFFINITY,
  1231. .maxlen = sizeof(unsigned int) * 2,
  1232. .mode = 0644,
  1233. .proc_handler = sched_task_handler,
  1234. },
  1235. {
  1236. .procname = "sched_task_read_pid",
  1237. .data = &sysctl_task_read_pid,
  1238. .maxlen = sizeof(int),
  1239. .mode = 0644,
  1240. .proc_handler = sched_task_read_pid_handler,
  1241. .extra1 = SYSCTL_ONE,
  1242. .extra2 = SYSCTL_INT_MAX,
  1243. },
  1244. {
  1245. .procname = "sched_enable_tp",
  1246. .data = &sysctl_sched_dynamic_tp_enable,
  1247. .maxlen = sizeof(unsigned int),
  1248. .mode = 0644,
  1249. .proc_handler = sched_dynamic_tp_handler,
  1250. .extra1 = SYSCTL_ZERO,
  1251. .extra2 = SYSCTL_ONE,
  1252. },
  1253. {
  1254. .procname = "sched_asymcap_boost",
  1255. .data = &sysctl_sched_asymcap_boost,
  1256. .maxlen = sizeof(unsigned int),
  1257. .mode = 0644,
  1258. .proc_handler = proc_douintvec_minmax,
  1259. .extra1 = SYSCTL_ZERO,
  1260. .extra2 = SYSCTL_ONE,
  1261. },
  1262. {
  1263. .procname = "sched_cluster_util_thres_pct",
  1264. .data = &sysctl_sched_cluster_util_thres_pct,
  1265. .maxlen = sizeof(unsigned int),
  1266. .mode = 0644,
  1267. .proc_handler = sched_cluster_util_thres_pct_handler,
  1268. .extra1 = SYSCTL_ZERO,
  1269. .extra2 = SYSCTL_INT_MAX,
  1270. },
  1271. {
  1272. .procname = "sched_cluster_util_thres_pct_clust",
  1273. .data = &sysctl_sched_cluster_util_thres_pct_clust,
  1274. .maxlen = sizeof(unsigned int) * MAX_CLUSTERS,
  1275. .mode = 0644,
  1276. .proc_handler = sched_cluster_util_thres_pct_clust_handler,
  1277. .extra1 = SYSCTL_ZERO,
  1278. .extra2 = SYSCTL_INT_MAX,
  1279. },
  1280. {
  1281. .procname = "sched_idle_enough",
  1282. .data = &sysctl_sched_idle_enough,
  1283. .maxlen = sizeof(unsigned int),
  1284. .mode = 0644,
  1285. .proc_handler = sched_idle_enough_handler,
  1286. .extra1 = SYSCTL_ZERO,
  1287. .extra2 = SYSCTL_INT_MAX,
  1288. },
  1289. {
  1290. .procname = "sched_idle_enough_clust",
  1291. .data = &sysctl_sched_idle_enough_clust,
  1292. .maxlen = sizeof(unsigned int) * MAX_CLUSTERS,
  1293. .mode = 0644,
  1294. .proc_handler = sched_idle_enough_clust_handler,
  1295. .extra1 = SYSCTL_ZERO,
  1296. .extra2 = SYSCTL_INT_MAX,
  1297. },
  1298. {
  1299. .procname = "sched_long_running_rt_task_ms",
  1300. .data = &sysctl_sched_long_running_rt_task_ms,
  1301. .maxlen = sizeof(unsigned int),
  1302. .mode = 0644,
  1303. .proc_handler = sched_long_running_rt_task_ms_handler,
  1304. .extra1 = SYSCTL_ZERO,
  1305. .extra2 = &two_thousand,
  1306. },
  1307. {
  1308. .procname = "sched_ed_boost",
  1309. .data = &sysctl_ed_boost_pct,
  1310. .maxlen = sizeof(unsigned int),
  1311. .mode = 0644,
  1312. .proc_handler = proc_douintvec_minmax,
  1313. .extra1 = SYSCTL_ZERO,
  1314. .extra2 = &one_hundred,
  1315. },
  1316. {
  1317. .procname = "sched_em_inflate_pct",
  1318. .data = &sysctl_em_inflate_pct,
  1319. .maxlen = sizeof(unsigned int),
  1320. .mode = 0644,
  1321. .proc_handler = proc_douintvec_minmax,
  1322. .extra1 = &one_hundred,
  1323. .extra2 = &one_thousand,
  1324. },
  1325. {
  1326. .procname = "sched_em_inflate_thres",
  1327. .data = &sysctl_em_inflate_thres,
  1328. .maxlen = sizeof(unsigned int),
  1329. .mode = 0644,
  1330. .proc_handler = proc_douintvec_minmax,
  1331. .extra1 = SYSCTL_ZERO,
  1332. .extra2 = &one_thousand_twenty_four,
  1333. },
  1334. {
  1335. .procname = "sched_heavy_nr",
  1336. .data = &sysctl_sched_heavy_nr,
  1337. .maxlen = sizeof(unsigned int),
  1338. .mode = 0644,
  1339. .proc_handler = walt_proc_heavy_nr_handler,
  1340. .extra1 = SYSCTL_ZERO,
  1341. .extra2 = &walt_max_cpus,
  1342. },
  1343. {
  1344. .procname = "sched_sbt_enable",
  1345. .data = &sysctl_sched_sbt_enable,
  1346. .maxlen = sizeof(unsigned int),
  1347. .mode = 0644,
  1348. .proc_handler = proc_douintvec_minmax,
  1349. .extra1 = SYSCTL_ZERO,
  1350. .extra2 = SYSCTL_ONE,
  1351. },
  1352. {
  1353. .procname = "sched_sbt_pause_cpus",
  1354. .data = &sysctl_sched_sbt_pause_cpus,
  1355. .maxlen = sizeof(unsigned int),
  1356. .mode = 0644,
  1357. .proc_handler = walt_proc_sbt_pause_handler,
  1358. .extra1 = SYSCTL_ZERO,
  1359. .extra2 = SYSCTL_INT_MAX,
  1360. },
  1361. {
  1362. .procname = "sched_sbt_delay_windows",
  1363. .data = &sysctl_sched_sbt_delay_windows,
  1364. .maxlen = sizeof(unsigned int),
  1365. .mode = 0644,
  1366. .proc_handler = proc_douintvec_minmax,
  1367. .extra1 = SYSCTL_ZERO,
  1368. .extra2 = SYSCTL_INT_MAX,
  1369. },
  1370. {
  1371. .procname = "sched_pipeline_cpus",
  1372. .data = &sysctl_sched_pipeline_cpus,
  1373. .maxlen = sizeof(unsigned int),
  1374. .mode = 0644,
  1375. .proc_handler = walt_proc_pipeline_cpus_handler,
  1376. .extra1 = SYSCTL_ZERO,
  1377. .extra2 = SYSCTL_INT_MAX,
  1378. },
  1379. {
  1380. .procname = "sched_max_freq_partial_halt",
  1381. .data = &sysctl_max_freq_partial_halt,
  1382. .maxlen = sizeof(unsigned int),
  1383. .mode = 0644,
  1384. .proc_handler = proc_douintvec_minmax,
  1385. .extra1 = SYSCTL_ZERO,
  1386. .extra2 = SYSCTL_INT_MAX,
  1387. },
  1388. {
  1389. .procname = "sched_fmax_cap",
  1390. .data = &sysctl_fmax_cap,
  1391. .maxlen = sizeof(unsigned int) * MAX_CLUSTERS,
  1392. .mode = 0644,
  1393. .proc_handler = sched_fmax_cap_handler,
  1394. },
  1395. {
  1396. .procname = "sched_high_perf_cluster_freq_cap",
  1397. .data = &high_perf_cluster_freq_cap,
  1398. .maxlen = sizeof(unsigned int) * MAX_CLUSTERS,
  1399. .mode = 0644,
  1400. .proc_handler = sched_fmax_cap_handler,
  1401. },
  1402. {
  1403. .procname = "sched_cluster0_freq_map",
  1404. .data = sysctl_cluster_arr[0],
  1405. .maxlen = sizeof(int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
  1406. .mode = 0644,
  1407. .proc_handler = sched_freq_map_handler,
  1408. },
  1409. {
  1410. .procname = "sched_cluster1_freq_map",
  1411. .data = sysctl_cluster_arr[1],
  1412. .maxlen = sizeof(int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
  1413. .mode = 0644,
  1414. .proc_handler = sched_freq_map_handler,
  1415. },
  1416. {
  1417. .procname = "sched_cluster2_freq_map",
  1418. .data = sysctl_cluster_arr[2],
  1419. .maxlen = sizeof(int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
  1420. .mode = 0644,
  1421. .proc_handler = sched_freq_map_handler,
  1422. },
  1423. {
  1424. .procname = "sched_cluster3_freq_map",
  1425. .data = sysctl_cluster_arr[3],
  1426. .maxlen = sizeof(int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
  1427. .mode = 0644,
  1428. .proc_handler = sched_freq_map_handler,
  1429. },
  1430. {
  1431. .procname = "sched_pipeline_skip_prime",
  1432. .data = &sysctl_sched_pipeline_skip_prime,
  1433. .maxlen = sizeof(unsigned int),
  1434. .mode = 0644,
  1435. .proc_handler = proc_douintvec_minmax,
  1436. .extra1 = SYSCTL_ZERO,
  1437. .extra2 = SYSCTL_INT_MAX,
  1438. },
  1439. {
  1440. .procname = "sched_fmax_uncap_thresh_ms",
  1441. .data = &sysctl_sched_fmax_uncap_thresh_ms,
  1442. .maxlen = sizeof(unsigned int),
  1443. .mode = 0644,
  1444. .proc_handler = proc_douintvec_minmax,
  1445. .extra1 = SYSCTL_ZERO,
  1446. .extra2 = SYSCTL_INT_MAX,
  1447. },
  1448. {
  1449. .procname = "sched_fmax_uncap_thresh_util",
  1450. .data = &sysctl_sched_fmax_uncap_thresh_util,
  1451. .maxlen = sizeof(unsigned int),
  1452. .mode = 0644,
  1453. .proc_handler = proc_douintvec_minmax,
  1454. .extra1 = SYSCTL_ZERO,
  1455. .extra2 = SYSCTL_INT_MAX,
  1456. },
  1457. { }
  1458. };
  1459. struct ctl_table walt_base_table[] = {
  1460. {
  1461. .procname = "walt",
  1462. .mode = 0555,
  1463. .child = walt_table,
  1464. },
  1465. { },
  1466. };
  1467. void walt_tunables(void)
  1468. {
  1469. int i, j;
  1470. for (i = 0; i < MAX_MARGIN_LEVELS; i++) {
  1471. sysctl_sched_capacity_margin_up_pct[i] = 95; /* ~5% margin */
  1472. sysctl_sched_capacity_margin_dn_pct[i] = 85; /* ~15% margin */
  1473. sysctl_sched_early_up[i] = 1077;
  1474. sysctl_sched_early_down[i] = 1204;
  1475. }
  1476. sysctl_sched_group_upmigrate_pct = 100;
  1477. sysctl_sched_group_downmigrate_pct = 95;
  1478. sysctl_sched_task_unfilter_period = 100000000;
  1479. sysctl_sched_window_stats_policy = WINDOW_STATS_MAX_RECENT_AVG;
  1480. sysctl_sched_ravg_window_nr_ticks = (HZ / NR_WINDOWS_PER_SEC);
  1481. sysctl_sched_ravg_window_nr_ticks_user = 0;
  1482. sched_load_granule = DEFAULT_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
  1483. for (i = 0; i < WALT_NR_CPUS; i++) {
  1484. sysctl_sched_coloc_busy_hyst_cpu[i] = 39000000;
  1485. sysctl_sched_coloc_busy_hyst_cpu_busy_pct[i] = 10;
  1486. sysctl_sched_util_busy_hyst_cpu[i] = 5000000;
  1487. sysctl_sched_util_busy_hyst_cpu_util[i] = 15;
  1488. }
  1489. sysctl_sched_coloc_busy_hyst_enable_cpus = 112;
  1490. sysctl_sched_util_busy_hyst_enable_cpus = 255;
  1491. sysctl_sched_coloc_busy_hyst_max_ms = 5000;
  1492. sched_ravg_window = DEFAULT_SCHED_RAVG_WINDOW;
  1493. sysctl_input_boost_ms = 40;
  1494. sysctl_sched_fmax_uncap_thresh_ms = 300;
  1495. sysctl_sched_fmax_uncap_thresh_util = 90;
  1496. for (i = 0; i < 8; i++)
  1497. sysctl_input_boost_freq[i] = 0;
  1498. for (i = 0; i < MAX_CLUSTERS; i++) {
  1499. sysctl_fmax_cap[i] = FREQ_QOS_MAX_DEFAULT_VALUE;
  1500. high_perf_cluster_freq_cap[i] = FREQ_QOS_MAX_DEFAULT_VALUE;
  1501. sysctl_sched_idle_enough_clust[i] = SCHED_IDLE_ENOUGH_DEFAULT;
  1502. sysctl_sched_cluster_util_thres_pct_clust[i] = SCHED_CLUSTER_UTIL_THRES_PCT_DEFAULT;
  1503. }
  1504. for (i = 0; i < MAX_FREQ_CAP; i++) {
  1505. for (j = 0; j < MAX_CLUSTERS; j++)
  1506. fmax_cap[i][j] = FREQ_QOS_MAX_DEFAULT_VALUE;
  1507. }
  1508. for (i = 0; i < MAX_CLUSTERS; i++) {
  1509. for (j = 0; j < MAX_FREQ_RELATIONS; j++) {
  1510. relation_data[i][j].src_freq = relation_data[i][j].tgt_freq =
  1511. FREQ_QOS_MAX_DEFAULT_VALUE;
  1512. relation_data[i][j].target_cluster_cpu = -1;
  1513. }
  1514. }
  1515. }