1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
- */
- #include <trace/hooks/sched.h>
- #include "walt.h"
- #include "trace.h"
- static int neg_four = -4;
- static int four = 4;
- static int two_hundred_fifty_five = 255;
- static unsigned int ns_per_sec = NSEC_PER_SEC;
- static unsigned int one_hundred_thousand = 100000;
- static unsigned int two_hundred_million = 200000000;
- static int __maybe_unused two = 2;
- static int one_hundred = 100;
- static int one_thousand = 1000;
- static int one_thousand_twenty_four = 1024;
- static int two_thousand = 2000;
- static int walt_max_cpus = WALT_NR_CPUS;
- /*
- * CFS task prio range is [100 ... 139]
- * 120 is the default prio.
- * RTG boost range is [100 ... 119] because giving
- * boost for [120 .. 139] does not make sense.
- * 99 means disabled and it is the default value.
- */
- static unsigned int min_cfs_boost_prio = 99;
- static unsigned int max_cfs_boost_prio = 119;
- unsigned int sysctl_sched_capacity_margin_up_pct[MAX_MARGIN_LEVELS];
- unsigned int sysctl_sched_capacity_margin_dn_pct[MAX_MARGIN_LEVELS];
- unsigned int sysctl_sched_busy_hyst_enable_cpus;
- unsigned int sysctl_sched_busy_hyst;
- unsigned int sysctl_sched_coloc_busy_hyst_enable_cpus;
- unsigned int sysctl_sched_coloc_busy_hyst_cpu[WALT_NR_CPUS];
- unsigned int sysctl_sched_coloc_busy_hyst_max_ms;
- unsigned int sysctl_sched_coloc_busy_hyst_cpu_busy_pct[WALT_NR_CPUS];
- unsigned int sysctl_sched_util_busy_hyst_enable_cpus;
- unsigned int sysctl_sched_util_busy_hyst_cpu[WALT_NR_CPUS];
- unsigned int sysctl_sched_util_busy_hyst_cpu_util[WALT_NR_CPUS];
- unsigned int sysctl_sched_boost;
- unsigned int sysctl_sched_wake_up_idle[2];
- unsigned int sysctl_input_boost_ms;
- unsigned int sysctl_input_boost_freq[8];
- unsigned int sysctl_sched_boost_on_input;
- unsigned int sysctl_sched_early_up[MAX_MARGIN_LEVELS];
- unsigned int sysctl_sched_early_down[MAX_MARGIN_LEVELS];
- /* sysctl nodes accesed by other files */
- unsigned int __read_mostly sysctl_sched_coloc_downmigrate_ns;
- unsigned int __read_mostly sysctl_sched_group_downmigrate_pct;
- unsigned int __read_mostly sysctl_sched_group_upmigrate_pct;
- unsigned int __read_mostly sysctl_sched_window_stats_policy;
- unsigned int sysctl_sched_ravg_window_nr_ticks;
- unsigned int sysctl_sched_ravg_window_nr_ticks_user;
- unsigned int sysctl_sched_walt_rotate_big_tasks;
- unsigned int sysctl_sched_task_unfilter_period;
- unsigned int sysctl_walt_low_latency_task_threshold; /* disabled by default */
- unsigned int sysctl_sched_conservative_pl;
- unsigned int sysctl_sched_min_task_util_for_boost = 51;
- unsigned int sysctl_sched_min_task_util_for_uclamp = 51;
- unsigned int sysctl_sched_min_task_util_for_colocation = 35;
- unsigned int sysctl_sched_many_wakeup_threshold = WALT_MANY_WAKEUP_DEFAULT;
- const int sched_user_hint_max = 1000;
- unsigned int sysctl_walt_rtg_cfs_boost_prio = 99; /* disabled by default */
- unsigned int sysctl_sched_sync_hint_enable = 1;
- unsigned int sysctl_panic_on_walt_bug;
- unsigned int sysctl_sched_suppress_region2;
- unsigned int sysctl_sched_skip_sp_newly_idle_lb = 1;
- unsigned int sysctl_sched_hyst_min_coloc_ns = 80000000;
- unsigned int sysctl_sched_asymcap_boost;
- unsigned int sysctl_sched_long_running_rt_task_ms;
- unsigned int sysctl_sched_idle_enough = SCHED_IDLE_ENOUGH_DEFAULT;
- unsigned int sysctl_sched_cluster_util_thres_pct = SCHED_CLUSTER_UTIL_THRES_PCT_DEFAULT;
- unsigned int sysctl_sched_idle_enough_clust[MAX_CLUSTERS];
- unsigned int sysctl_sched_cluster_util_thres_pct_clust[MAX_CLUSTERS];
- unsigned int sysctl_ed_boost_pct;
- unsigned int sysctl_em_inflate_pct = 100;
- unsigned int sysctl_em_inflate_thres = 1024;
- unsigned int sysctl_sched_heavy_nr;
- unsigned int sysctl_max_freq_partial_halt = FREQ_QOS_MAX_DEFAULT_VALUE;
- unsigned int sysctl_fmax_cap[MAX_CLUSTERS];
- unsigned int sysctl_sched_sbt_pause_cpus;
- unsigned int sysctl_sched_sbt_enable = 1;
- unsigned int sysctl_sched_sbt_delay_windows;
- unsigned int high_perf_cluster_freq_cap[MAX_CLUSTERS];
- unsigned int sysctl_sched_pipeline_cpus;
- unsigned int fmax_cap[MAX_FREQ_CAP][MAX_CLUSTERS];
- unsigned int sysctl_sched_pipeline_skip_prime;
- unsigned int sysctl_sched_fmax_uncap_thresh_ms;
- unsigned int sysctl_sched_fmax_uncap_thresh_util;
- bool sbt_ongoing;
- /* Entries for 4 clusters and 10 tuples(3 item in each tuple */
- unsigned int sysctl_cluster_arr[4][MAX_FREQ_RELATIONS * TUPLE_SIZE] = {
- [0] = {0, 0, 0},
- [1] = {0, 0, 0},
- [2] = {0, 0, 0},
- [3] = {0, 0, 0},
- };
- struct freq_relation_map relation_data[MAX_CLUSTERS][MAX_FREQ_RELATIONS];
- /* range is [1 .. INT_MAX] */
- static int sysctl_task_read_pid = 1;
- static int sched_freq_map_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int i, idx = 0, ret = -EPERM;
- unsigned int *data = (unsigned int *)table->data;
- static DEFINE_MUTEX(ignore_cluster_mutex);
- static int configured[MAX_CLUSTERS] = {0};
- int index;
- unsigned int val[MAX_FREQ_RELATIONS * TUPLE_SIZE];
- unsigned int src_cluster_fmax;
- unsigned int cluster_freq[MAX_CLUSTERS] = {0};
- struct ctl_table tmp = {
- .data = &val,
- .maxlen = sizeof(unsigned int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
- .mode = table->mode,
- };
- if (num_sched_clusters <= 1)
- return ret;
- index = (data == sysctl_cluster_arr[0]) ? 0 : (data == sysctl_cluster_arr[1]) ?
- 1 : (data == sysctl_cluster_arr[2]) ? 2 : 3;
- /* we are not allowing prime to have any relations for now */
- if (index >= num_sched_clusters - 1)
- return ret;
- mutex_lock(&ignore_cluster_mutex);
- if (!write) {
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- goto unlock;
- }
- /* updation allowed only once */
- if (configured[index])
- goto unlock;
- ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
- if (ret)
- goto unlock;
- src_cluster_fmax = sched_cluster[index]->max_possible_freq;
- configured[index] = 1;
- /*
- * tuple format:
- * <a b c>:
- * a : source cluster frequency
- * b : first cpu of target cluster
- * c : target cluster frequency
- */
- for (i = 0; i < MAX_FREQ_RELATIONS; i++) {
- int tgt_cluster_id;
- idx = i * 3;
- if ((val[idx + 0] == 0) || (val[idx + 1] >= cpumask_weight(cpu_possible_mask)) ||
- (val[idx + 2] == 0))
- break;
- tgt_cluster_id = cpu_cluster(val[idx + 1])->id;
- /* target cpu cannot be of same/lower cluster */
- if (tgt_cluster_id <= index)
- break;
- /* frequency should be always same or increasing */
- if (cluster_freq[index] > val[idx + 0])
- break;
- cluster_freq[index] = val[idx + 0];
- if (cluster_freq[tgt_cluster_id] >= val[idx + 2])
- break;
- cluster_freq[tgt_cluster_id] = val[idx + 2];
- relation_data[index][i].src_freq = data[idx + 0] = val[idx + 0];
- relation_data[index][i].target_cluster_cpu = data[idx + 1] = val[idx + 1];
- relation_data[index][i].tgt_freq = data[idx + 2] = val[idx + 2];
- }
- for (; i < MAX_FREQ_RELATIONS; i++) {
- idx = i * 3;
- relation_data[index][i].src_freq = data[idx + 0] = FREQ_QOS_MAX_DEFAULT_VALUE;
- relation_data[index][i].target_cluster_cpu = data[idx + 1] = -1;
- relation_data[index][i].tgt_freq = data[idx + 2] = FREQ_QOS_MAX_DEFAULT_VALUE;
- }
- update_freq_relation(sched_cluster[index]);
- unlock:
- mutex_unlock(&ignore_cluster_mutex);
- return ret;
- }
- static int walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- static DEFINE_MUTEX(mutex);
- struct rq *rq = cpu_rq(cpumask_first(cpu_possible_mask));
- unsigned long flags;
- if (unlikely(num_sched_clusters <= 0))
- return -EPERM;
- mutex_lock(&mutex);
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write) {
- mutex_unlock(&mutex);
- return ret;
- }
- /*
- * The load scale factor update happens with all
- * rqs locked. so acquiring 1 CPU rq lock and
- * updating the thresholds is sufficient for
- * an atomic update.
- */
- raw_spin_lock_irqsave(&rq->__lock, flags);
- walt_update_group_thresholds();
- raw_spin_unlock_irqrestore(&rq->__lock, flags);
- mutex_unlock(&mutex);
- return ret;
- }
- static int walt_proc_user_hint_handler(struct ctl_table *table,
- int write, void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- unsigned int old_value;
- static DEFINE_MUTEX(mutex);
- mutex_lock(&mutex);
- old_value = sysctl_sched_user_hint;
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write || (old_value == sysctl_sched_user_hint))
- goto unlock;
- sched_user_hint_reset_time = jiffies + HZ;
- walt_irq_work_queue(&walt_migration_irq_work);
- unlock:
- mutex_unlock(&mutex);
- return ret;
- }
- DECLARE_BITMAP(sysctl_bitmap, WALT_NR_CPUS);
- static int walt_proc_sbt_pause_handler(struct ctl_table *table,
- int write, void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret = 0;
- unsigned int old_value;
- unsigned long bitmask;
- const unsigned long *bitmaskp = &bitmask;
- static DEFINE_MUTEX(mutex);
- mutex_lock(&mutex);
- old_value = sysctl_sched_sbt_pause_cpus;
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write || (old_value == sysctl_sched_sbt_pause_cpus))
- goto unlock;
- bitmask = (unsigned long)sysctl_sched_sbt_pause_cpus;
- bitmap_copy(sysctl_bitmap, bitmaskp, WALT_NR_CPUS);
- if (!sbt_ongoing)
- cpumask_copy(&cpus_for_sbt_pause, to_cpumask(sysctl_bitmap));
- else
- pr_warn("sbt core control is on-going, ignore change\n");
- unlock:
- mutex_unlock(&mutex);
- return ret;
- }
- /*
- * pipeline cpus are non-prime cpus chosen to handle pipeline tasks, e.g. golds
- * Notice That
- * - This can be updated only if sysctl_sched_heavy_nr == 0 && pipeline_nr == 0
- * - CPU7 is not allowed to set sched_pipeline_cpus
- */
- static int walt_proc_pipeline_cpus_handler(struct ctl_table *table,
- int write, void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret = 0;
- unsigned int old_value;
- unsigned long bitmask;
- const unsigned long *bitmaskp = &bitmask;
- static DEFINE_MUTEX(mutex);
- int avoid_cpu = cpumask_last(&sched_cluster[num_sched_clusters - 1]->cpus);
- /* do not allow if pipeline is setup */
- if (write && (sysctl_sched_heavy_nr || pipeline_nr))
- return -EPERM;
- mutex_lock(&mutex);
- old_value = sysctl_sched_pipeline_cpus;
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write || (old_value == sysctl_sched_pipeline_cpus))
- goto unlock;
- bitmask = (unsigned long)sysctl_sched_pipeline_cpus;
- bitmap_copy(sysctl_bitmap, bitmaskp, WALT_NR_CPUS);
- cpumask_copy(&cpus_for_pipeline, to_cpumask(sysctl_bitmap));
- /* do not allow avoid_cpu to be present in sysctl nor the mask */
- cpumask_clear_cpu(avoid_cpu, &cpus_for_pipeline);
- sysctl_sched_pipeline_cpus &= ~(1 << avoid_cpu);
- unlock:
- mutex_unlock(&mutex);
- return ret;
- }
- /* pipeline cpus are non-prime cpus chosen to handle pipeline tasks, e.g. golds */
- static int walt_proc_heavy_nr_handler(struct ctl_table *table,
- int write, void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret = 0;
- static DEFINE_MUTEX(mutex);
- if (write && !sysctl_sched_pipeline_cpus)
- return -EPERM;
- mutex_lock(&mutex);
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- mutex_unlock(&mutex);
- return ret;
- }
- static int sched_ravg_window_handler(struct ctl_table *table,
- int write, void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret = -EPERM;
- static DEFINE_MUTEX(mutex);
- int val;
- struct ctl_table tmp = {
- .data = &val,
- .maxlen = sizeof(val),
- .mode = table->mode,
- };
- mutex_lock(&mutex);
- if (write && HZ != 250)
- goto unlock;
- val = sysctl_sched_ravg_window_nr_ticks;
- ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
- if (ret || !write || (val == sysctl_sched_ravg_window_nr_ticks))
- goto unlock;
- if (val != 2 && val != 3 && val != 4 && val != 5 && val != 8) {
- ret = -EINVAL;
- goto unlock;
- }
- sysctl_sched_ravg_window_nr_ticks = val;
- sched_window_nr_ticks_change();
- unlock:
- mutex_unlock(&mutex);
- return ret;
- }
- static int sched_ravg_window_handler_user(struct ctl_table *table,
- int write, void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret = -EPERM;
- static DEFINE_MUTEX(mutex);
- int val;
- struct ctl_table tmp = {
- .data = &val,
- .maxlen = sizeof(val),
- .mode = table->mode,
- };
- mutex_lock(&mutex);
- if (write && HZ != 250)
- goto unlock;
- val = sysctl_sched_ravg_window_nr_ticks_user;
- ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
- if (ret || !write || (val == sysctl_sched_ravg_window_nr_ticks_user))
- goto unlock;
- if (val != 0 && val != 2 && val != 3 && val != 4 && val != 5 && val != 8) {
- ret = -EINVAL;
- goto unlock;
- }
- sysctl_sched_ravg_window_nr_ticks_user = val;
- sched_window_nr_ticks_change();
- unlock:
- mutex_unlock(&mutex);
- return ret;
- }
- static DEFINE_MUTEX(sysctl_pid_mutex);
- static int sched_task_read_pid_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- mutex_lock(&sysctl_pid_mutex);
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- mutex_unlock(&sysctl_pid_mutex);
- return ret;
- }
- enum {
- TASK_BEGIN = 0,
- WAKE_UP_IDLE,
- INIT_TASK_LOAD,
- GROUP_ID,
- PER_TASK_BOOST,
- PER_TASK_BOOST_PERIOD_MS,
- LOW_LATENCY,
- PIPELINE,
- LOAD_BOOST,
- REDUCE_AFFINITY,
- };
- static int sched_task_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret, param;
- struct task_struct *task;
- int pid_and_val[2] = {-1, -1};
- int val;
- struct walt_task_struct *wts;
- struct rq *rq;
- struct rq_flags rf;
- unsigned long bitmask;
- const unsigned long *bitmaskp = &bitmask;
- struct ctl_table tmp = {
- .data = &pid_and_val,
- .maxlen = sizeof(pid_and_val),
- .mode = table->mode,
- };
- mutex_lock(&sysctl_pid_mutex);
- if (!write) {
- task = get_pid_task(find_vpid(sysctl_task_read_pid),
- PIDTYPE_PID);
- if (!task) {
- ret = -ENOENT;
- goto unlock_mutex;
- }
- wts = (struct walt_task_struct *) task->android_vendor_data1;
- pid_and_val[0] = sysctl_task_read_pid;
- param = (unsigned long)table->data;
- switch (param) {
- case WAKE_UP_IDLE:
- pid_and_val[1] = wts->wake_up_idle;
- break;
- case INIT_TASK_LOAD:
- pid_and_val[1] = wts->init_load_pct;
- break;
- case GROUP_ID:
- pid_and_val[1] = sched_get_group_id(task);
- break;
- case PER_TASK_BOOST:
- pid_and_val[1] = wts->boost;
- break;
- case PER_TASK_BOOST_PERIOD_MS:
- pid_and_val[1] =
- div64_ul(wts->boost_period,
- 1000000UL);
- break;
- case LOW_LATENCY:
- pid_and_val[1] = wts->low_latency &
- WALT_LOW_LATENCY_PROCFS;
- break;
- case PIPELINE:
- pid_and_val[1] = wts->low_latency &
- WALT_LOW_LATENCY_PIPELINE;
- break;
- case LOAD_BOOST:
- pid_and_val[1] = wts->load_boost;
- break;
- case REDUCE_AFFINITY:
- pid_and_val[1] = cpumask_bits(&wts->reduce_mask)[0];
- break;
- default:
- ret = -EINVAL;
- goto put_task;
- }
- ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
- goto put_task;
- }
- ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
- if (ret)
- goto unlock_mutex;
- if (pid_and_val[0] <= 0) {
- ret = -ENOENT;
- goto unlock_mutex;
- }
- /* parsed the values successfully in pid_and_val[] array */
- task = get_pid_task(find_vpid(pid_and_val[0]), PIDTYPE_PID);
- if (!task) {
- ret = -ENOENT;
- goto unlock_mutex;
- }
- wts = (struct walt_task_struct *) task->android_vendor_data1;
- param = (unsigned long)table->data;
- val = pid_and_val[1];
- if (param != LOAD_BOOST && val < 0) {
- ret = -EINVAL;
- goto put_task;
- }
- switch (param) {
- case WAKE_UP_IDLE:
- wts->wake_up_idle = val;
- break;
- case INIT_TASK_LOAD:
- if (pid_and_val[1] < 0 || pid_and_val[1] > 100) {
- ret = -EINVAL;
- goto put_task;
- }
- wts->init_load_pct = val;
- break;
- case GROUP_ID:
- ret = sched_set_group_id(task, val);
- break;
- case PER_TASK_BOOST:
- if (val < TASK_BOOST_NONE || val >= TASK_BOOST_END) {
- ret = -EINVAL;
- goto put_task;
- }
- wts->boost = val;
- if (val == 0)
- wts->boost_period = 0;
- break;
- case PER_TASK_BOOST_PERIOD_MS:
- if (wts->boost == 0 && val) {
- /* setting boost period w/o boost is invalid */
- ret = -EINVAL;
- goto put_task;
- }
- wts->boost_period = (u64)val * 1000 * 1000;
- wts->boost_expires = sched_clock() + wts->boost_period;
- break;
- case LOW_LATENCY:
- if (val)
- wts->low_latency |= WALT_LOW_LATENCY_PROCFS;
- else
- wts->low_latency &= ~WALT_LOW_LATENCY_PROCFS;
- break;
- case PIPELINE:
- /* deny write operation while empty pipeline cpus */
- if (!sysctl_sched_pipeline_cpus) {
- ret = -EPERM;
- goto put_task;
- }
- rq = task_rq_lock(task, &rf);
- if (READ_ONCE(task->__state) == TASK_DEAD) {
- ret = -EINVAL;
- task_rq_unlock(rq, task, &rf);
- goto put_task;
- }
- if (val) {
- ret = add_pipeline(wts);
- if (ret < 0) {
- task_rq_unlock(rq, task, &rf);
- goto put_task;
- }
- wts->low_latency |= WALT_LOW_LATENCY_PIPELINE;
- } else {
- wts->low_latency &= ~WALT_LOW_LATENCY_PIPELINE;
- remove_pipeline(wts);
- }
- task_rq_unlock(rq, task, &rf);
- break;
- case LOAD_BOOST:
- if (pid_and_val[1] < -90 || pid_and_val[1] > 90) {
- ret = -EINVAL;
- goto put_task;
- }
- wts->load_boost = val;
- if (val)
- wts->boosted_task_load = mult_frac((int64_t)1024, (int64_t)val, 100);
- else
- wts->boosted_task_load = 0;
- break;
- case REDUCE_AFFINITY:
- bitmask = (unsigned long) val;
- bitmap_copy(sysctl_bitmap, bitmaskp, WALT_NR_CPUS);
- cpumask_copy(&wts->reduce_mask, to_cpumask(sysctl_bitmap));
- break;
- default:
- ret = -EINVAL;
- }
- trace_sched_task_handler(task, param, val, CALLER_ADDR0, CALLER_ADDR1,
- CALLER_ADDR2, CALLER_ADDR3, CALLER_ADDR4, CALLER_ADDR5);
- put_task:
- put_task_struct(task);
- unlock_mutex:
- mutex_unlock(&sysctl_pid_mutex);
- return ret;
- }
- #ifdef CONFIG_PROC_SYSCTL
- static void sched_update_updown_migrate_values(bool up)
- {
- int i = 0, cpu;
- struct walt_sched_cluster *cluster;
- for_each_sched_cluster(cluster) {
- /*
- * No need to worry about CPUs in last cluster
- * if there are more than 2 clusters in the system
- */
- for_each_cpu(cpu, &cluster->cpus) {
- if (up)
- sched_capacity_margin_up[cpu] =
- SCHED_FIXEDPOINT_SCALE * 100 /
- sysctl_sched_capacity_margin_up_pct[i];
- else
- sched_capacity_margin_down[cpu] =
- SCHED_FIXEDPOINT_SCALE * 100 /
- sysctl_sched_capacity_margin_dn_pct[i];
- }
- trace_sched_update_updown_migrate_values(up, i);
- if (++i >= num_sched_clusters - 1)
- break;
- }
- }
- int sched_updown_migrate_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret, i;
- unsigned int *data = (unsigned int *)table->data;
- static DEFINE_MUTEX(mutex);
- int cap_margin_levels = num_sched_clusters ? num_sched_clusters - 1 : 0;
- int val[MAX_MARGIN_LEVELS];
- struct ctl_table tmp = {
- .data = &val,
- .maxlen = sizeof(int) * cap_margin_levels,
- .mode = table->mode,
- };
- if (cap_margin_levels <= 0)
- return -EINVAL;
- mutex_lock(&mutex);
- if (!write) {
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- goto unlock_mutex;
- }
- ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
- if (ret)
- goto unlock_mutex;
- /* check if valid pct values are passed in */
- for (i = 0; i < cap_margin_levels; i++) {
- if (val[i] <= 0 || val[i] > 100) {
- ret = -EINVAL;
- goto unlock_mutex;
- }
- }
- /* check up pct is greater than dn pct */
- if (data == &sysctl_sched_capacity_margin_up_pct[0]) {
- for (i = 0; i < cap_margin_levels; i++) {
- if (val[i] < sysctl_sched_capacity_margin_dn_pct[i]) {
- ret = -EINVAL;
- goto unlock_mutex;
- }
- }
- } else {
- for (i = 0; i < cap_margin_levels; i++) {
- if (sysctl_sched_capacity_margin_up_pct[i] < val[i]) {
- ret = -EINVAL;
- goto unlock_mutex;
- }
- }
- }
- /* all things checkout update the value */
- for (i = 0; i < cap_margin_levels; i++)
- data[i] = val[i];
- /* update individual cpu thresholds */
- sched_update_updown_migrate_values(data == &sysctl_sched_capacity_margin_up_pct[0]);
- unlock_mutex:
- mutex_unlock(&mutex);
- return ret;
- }
- static void sched_update_updown_early_migrate_values(bool up)
- {
- int i = 0, cpu;
- struct walt_sched_cluster *cluster;
- for_each_sched_cluster(cluster) {
- /*
- * No need to worry about CPUs in last cluster
- * if there are more than 2 clusters in the system
- */
- for_each_cpu(cpu, &cluster->cpus) {
- if (up)
- sched_capacity_margin_early_up[cpu] = sysctl_sched_early_up[i];
- else
- sched_capacity_margin_early_down[cpu] = sysctl_sched_early_down[i];
- }
- trace_sched_update_updown_early_migrate_values(up, i);
- if (++i >= num_sched_clusters - 1)
- break;
- }
- }
- int sched_updown_early_migrate_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret, i;
- unsigned int *data = (unsigned int *)table->data;
- static DEFINE_MUTEX(mutex);
- int cap_margin_levels = num_sched_clusters ? num_sched_clusters - 1 : 0;
- int val[MAX_MARGIN_LEVELS];
- struct ctl_table tmp = {
- .data = &val,
- .maxlen = sizeof(int) * cap_margin_levels,
- .mode = table->mode,
- };
- if (cap_margin_levels <= 0)
- return -EINVAL;
- mutex_lock(&mutex);
- if (!write) {
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- goto unlock_mutex;
- }
- ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
- if (ret)
- goto unlock_mutex;
- for (i = 0; i < cap_margin_levels; i++) {
- if (val[i] < 1024) {
- ret = -EINVAL;
- goto unlock_mutex;
- }
- }
- /* check up thresh is greater than dn thresh */
- if (data == &sysctl_sched_early_up[0]) {
- for (i = 0; i < cap_margin_levels; i++) {
- if (val[i] >= sysctl_sched_early_down[i]) {
- ret = -EINVAL;
- goto unlock_mutex;
- }
- }
- } else {
- for (i = 0; i < cap_margin_levels; i++) {
- if (sysctl_sched_early_up[i] >= val[i]) {
- ret = -EINVAL;
- goto unlock_mutex;
- }
- }
- }
- /* all things checkout update the value */
- for (i = 0; i < cap_margin_levels; i++)
- data[i] = val[i];
- /* update individual cpu thresholds */
- sched_update_updown_early_migrate_values(data == &sysctl_sched_early_up[0]);
- unlock_mutex:
- mutex_unlock(&mutex);
- return ret;
- }
- int sched_fmax_cap_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret, i;
- unsigned int *data = (unsigned int *)table->data;
- static DEFINE_MUTEX(mutex);
- int cap_margin_levels = num_sched_clusters;
- int val[MAX_CLUSTERS];
- struct ctl_table tmp = {
- .data = &val,
- .maxlen = sizeof(int) * cap_margin_levels,
- .mode = table->mode,
- };
- if (cap_margin_levels <= 0)
- return -EINVAL;
- mutex_lock(&mutex);
- if (!write) {
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- goto unlock_mutex;
- }
- ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
- if (ret)
- goto unlock_mutex;
- for (i = 0; i < cap_margin_levels; i++) {
- if (val[i] < 0) {
- ret = -EINVAL;
- goto unlock_mutex;
- }
- data[i] = val[i];
- }
- unlock_mutex:
- mutex_unlock(&mutex);
- return ret;
- }
- static DEFINE_MUTEX(idle_enough_mutex);
- int sched_idle_enough_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret, i;
- mutex_lock(&idle_enough_mutex);
- ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write)
- goto unlock_mutex;
- /* update all per-cluster entries to match what was written */
- for (i = 0; i < MAX_CLUSTERS; i++)
- sysctl_sched_idle_enough_clust[i] = sysctl_sched_idle_enough;
- unlock_mutex:
- mutex_unlock(&idle_enough_mutex);
- return ret;
- }
- int sched_idle_enough_clust_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- mutex_lock(&idle_enough_mutex);
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write)
- goto unlock_mutex;
- /* update the single-entry to match the first cluster updated here */
- sysctl_sched_idle_enough = sysctl_sched_idle_enough_clust[0];
- unlock_mutex:
- mutex_unlock(&idle_enough_mutex);
- return ret;
- }
- static DEFINE_MUTEX(util_thres_mutex);
- int sched_cluster_util_thres_pct_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret, i;
- mutex_lock(&util_thres_mutex);
- ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write)
- goto unlock_mutex;
- /* update all per-cluster entries to match what was written */
- for (i = 0; i < MAX_CLUSTERS; i++)
- sysctl_sched_cluster_util_thres_pct_clust[i] = sysctl_sched_cluster_util_thres_pct;
- unlock_mutex:
- mutex_unlock(&util_thres_mutex);
- return ret;
- }
- int sched_cluster_util_thres_pct_clust_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- mutex_lock(&util_thres_mutex);
- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (ret || !write)
- goto unlock_mutex;
- /* update the single-entry to match the first cluster updated here */
- sysctl_sched_cluster_util_thres_pct = sysctl_sched_cluster_util_thres_pct_clust[0];
- unlock_mutex:
- mutex_unlock(&util_thres_mutex);
- return ret;
- }
- #endif /* CONFIG_PROC_SYSCTL */
- struct ctl_table input_boost_sysctls[] = {
- {
- .procname = "input_boost_ms",
- .data = &sysctl_input_boost_ms,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_hundred_thousand,
- },
- {
- .procname = "input_boost_freq",
- .data = &sysctl_input_boost_freq,
- .maxlen = sizeof(unsigned int) * 8,
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_boost_on_input",
- .data = &sysctl_sched_boost_on_input,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- { }
- };
- struct ctl_table walt_table[] = {
- {
- .procname = "sched_user_hint",
- .data = &sysctl_sched_user_hint,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = walt_proc_user_hint_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = (void *)&sched_user_hint_max,
- },
- {
- .procname = "sched_window_stats_policy",
- .data = &sysctl_sched_window_stats_policy,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &four,
- },
- {
- .procname = "sched_group_upmigrate",
- .data = &sysctl_sched_group_upmigrate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = walt_proc_group_thresholds_handler,
- .extra1 = &sysctl_sched_group_downmigrate_pct,
- },
- {
- .procname = "sched_group_downmigrate",
- .data = &sysctl_sched_group_downmigrate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = walt_proc_group_thresholds_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &sysctl_sched_group_upmigrate_pct,
- },
- {
- .procname = "sched_boost",
- .data = &sysctl_sched_boost,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_boost_handler,
- .extra1 = &neg_four,
- .extra2 = &four,
- },
- {
- .procname = "sched_conservative_pl",
- .data = &sysctl_sched_conservative_pl,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_many_wakeup_threshold",
- .data = &sysctl_sched_many_wakeup_threshold,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &two,
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_walt_rotate_big_tasks",
- .data = &sysctl_sched_walt_rotate_big_tasks,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_min_task_util_for_boost",
- .data = &sysctl_sched_min_task_util_for_boost,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_min_task_util_for_uclamp",
- .data = &sysctl_sched_min_task_util_for_uclamp,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_min_task_util_for_colocation",
- .data = &sysctl_sched_min_task_util_for_colocation,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_coloc_downmigrate_ns",
- .data = &sysctl_sched_coloc_downmigrate_ns,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- },
- {
- .procname = "sched_task_unfilter_period",
- .data = &sysctl_sched_task_unfilter_period,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ONE,
- .extra2 = &two_hundred_million,
- },
- {
- .procname = "sched_busy_hysteresis_enable_cpus",
- .data = &sysctl_sched_busy_hyst_enable_cpus,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &two_hundred_fifty_five,
- },
- {
- .procname = "sched_busy_hyst_ns",
- .data = &sysctl_sched_busy_hyst,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &ns_per_sec,
- },
- {
- .procname = "sched_coloc_busy_hysteresis_enable_cpus",
- .data = &sysctl_sched_coloc_busy_hyst_enable_cpus,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &two_hundred_fifty_five,
- },
- {
- .procname = "sched_coloc_busy_hyst_cpu_ns",
- .data = &sysctl_sched_coloc_busy_hyst_cpu,
- .maxlen = sizeof(unsigned int) * WALT_NR_CPUS,
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &ns_per_sec,
- },
- {
- .procname = "sched_coloc_busy_hyst_max_ms",
- .data = &sysctl_sched_coloc_busy_hyst_max_ms,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_hundred_thousand,
- },
- {
- .procname = "sched_coloc_busy_hyst_cpu_busy_pct",
- .data = &sysctl_sched_coloc_busy_hyst_cpu_busy_pct,
- .maxlen = sizeof(unsigned int) * WALT_NR_CPUS,
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_util_busy_hysteresis_enable_cpus",
- .data = &sysctl_sched_util_busy_hyst_enable_cpus,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &two_hundred_fifty_five,
- },
- {
- .procname = "sched_util_busy_hyst_cpu_ns",
- .data = &sysctl_sched_util_busy_hyst_cpu,
- .maxlen = sizeof(unsigned int) * WALT_NR_CPUS,
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &ns_per_sec,
- },
- {
- .procname = "sched_util_busy_hyst_cpu_util",
- .data = &sysctl_sched_util_busy_hyst_cpu_util,
- .maxlen = sizeof(unsigned int) * WALT_NR_CPUS,
- .mode = 0644,
- .proc_handler = sched_busy_hyst_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_ravg_window_nr_ticks",
- .data = &sysctl_sched_ravg_window_nr_ticks,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_ravg_window_handler,
- },
- {
- .procname = "sched_ravg_window_nr_ticks_user",
- .data = &sysctl_sched_ravg_window_nr_ticks_user,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_ravg_window_handler_user,
- },
- {
- .procname = "sched_upmigrate",
- .data = &sysctl_sched_capacity_margin_up_pct,
- .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS,
- .mode = 0644,
- .proc_handler = sched_updown_migrate_handler,
- },
- {
- .procname = "sched_downmigrate",
- .data = &sysctl_sched_capacity_margin_dn_pct,
- .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS,
- .mode = 0644,
- .proc_handler = sched_updown_migrate_handler,
- },
- {
- .procname = "sched_early_upmigrate",
- .data = &sysctl_sched_early_up,
- .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS,
- .mode = 0644,
- .proc_handler = sched_updown_early_migrate_handler,
- },
- {
- .procname = "sched_early_downmigrate",
- .data = &sysctl_sched_early_down,
- .maxlen = sizeof(unsigned int) * MAX_MARGIN_LEVELS,
- .mode = 0644,
- .proc_handler = sched_updown_early_migrate_handler,
- },
- {
- .procname = "walt_rtg_cfs_boost_prio",
- .data = &sysctl_walt_rtg_cfs_boost_prio,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_cfs_boost_prio,
- .extra2 = &max_cfs_boost_prio,
- },
- {
- .procname = "walt_low_latency_task_threshold",
- .data = &sysctl_walt_low_latency_task_threshold,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_force_lb_enable",
- .data = &sysctl_sched_force_lb_enable,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_sync_hint_enable",
- .data = &sysctl_sched_sync_hint_enable,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_suppress_region2",
- .data = &sysctl_sched_suppress_region2,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_skip_sp_newly_idle_lb",
- .data = &sysctl_sched_skip_sp_newly_idle_lb,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_hyst_min_coloc_ns",
- .data = &sysctl_sched_hyst_min_coloc_ns,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- },
- {
- .procname = "panic_on_walt_bug",
- .data = &sysctl_panic_on_walt_bug,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_lib_name",
- .data = sched_lib_name,
- .maxlen = LIB_PATH_LENGTH,
- .mode = 0644,
- .proc_handler = proc_dostring,
- },
- {
- .procname = "sched_lib_mask_force",
- .data = &sched_lib_mask_force,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &two_hundred_fifty_five,
- },
- {
- .procname = "input_boost",
- .mode = 0555,
- .child = input_boost_sysctls,
- },
- {
- .procname = "sched_wake_up_idle",
- .data = (int *) WAKE_UP_IDLE,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "sched_init_task_load",
- .data = (int *) INIT_TASK_LOAD,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "sched_group_id",
- .data = (int *) GROUP_ID,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "sched_per_task_boost",
- .data = (int *) PER_TASK_BOOST,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "sched_per_task_boost_period_ms",
- .data = (int *) PER_TASK_BOOST_PERIOD_MS,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "sched_low_latency",
- .data = (int *) LOW_LATENCY,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "sched_pipeline",
- .data = (int *) PIPELINE,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "task_load_boost",
- .data = (int *) LOAD_BOOST,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "task_reduce_affinity",
- .data = (int *) REDUCE_AFFINITY,
- .maxlen = sizeof(unsigned int) * 2,
- .mode = 0644,
- .proc_handler = sched_task_handler,
- },
- {
- .procname = "sched_task_read_pid",
- .data = &sysctl_task_read_pid,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = sched_task_read_pid_handler,
- .extra1 = SYSCTL_ONE,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_enable_tp",
- .data = &sysctl_sched_dynamic_tp_enable,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_dynamic_tp_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_asymcap_boost",
- .data = &sysctl_sched_asymcap_boost,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_cluster_util_thres_pct",
- .data = &sysctl_sched_cluster_util_thres_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_cluster_util_thres_pct_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_cluster_util_thres_pct_clust",
- .data = &sysctl_sched_cluster_util_thres_pct_clust,
- .maxlen = sizeof(unsigned int) * MAX_CLUSTERS,
- .mode = 0644,
- .proc_handler = sched_cluster_util_thres_pct_clust_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_idle_enough",
- .data = &sysctl_sched_idle_enough,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_idle_enough_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_idle_enough_clust",
- .data = &sysctl_sched_idle_enough_clust,
- .maxlen = sizeof(unsigned int) * MAX_CLUSTERS,
- .mode = 0644,
- .proc_handler = sched_idle_enough_clust_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_long_running_rt_task_ms",
- .data = &sysctl_sched_long_running_rt_task_ms,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = sched_long_running_rt_task_ms_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &two_thousand,
- },
- {
- .procname = "sched_ed_boost",
- .data = &sysctl_ed_boost_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_hundred,
- },
- {
- .procname = "sched_em_inflate_pct",
- .data = &sysctl_em_inflate_pct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = &one_hundred,
- .extra2 = &one_thousand,
- },
- {
- .procname = "sched_em_inflate_thres",
- .data = &sysctl_em_inflate_thres,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &one_thousand_twenty_four,
- },
- {
- .procname = "sched_heavy_nr",
- .data = &sysctl_sched_heavy_nr,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = walt_proc_heavy_nr_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &walt_max_cpus,
- },
- {
- .procname = "sched_sbt_enable",
- .data = &sysctl_sched_sbt_enable,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "sched_sbt_pause_cpus",
- .data = &sysctl_sched_sbt_pause_cpus,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = walt_proc_sbt_pause_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_sbt_delay_windows",
- .data = &sysctl_sched_sbt_delay_windows,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_pipeline_cpus",
- .data = &sysctl_sched_pipeline_cpus,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = walt_proc_pipeline_cpus_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_max_freq_partial_halt",
- .data = &sysctl_max_freq_partial_halt,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_fmax_cap",
- .data = &sysctl_fmax_cap,
- .maxlen = sizeof(unsigned int) * MAX_CLUSTERS,
- .mode = 0644,
- .proc_handler = sched_fmax_cap_handler,
- },
- {
- .procname = "sched_high_perf_cluster_freq_cap",
- .data = &high_perf_cluster_freq_cap,
- .maxlen = sizeof(unsigned int) * MAX_CLUSTERS,
- .mode = 0644,
- .proc_handler = sched_fmax_cap_handler,
- },
- {
- .procname = "sched_cluster0_freq_map",
- .data = sysctl_cluster_arr[0],
- .maxlen = sizeof(int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
- .mode = 0644,
- .proc_handler = sched_freq_map_handler,
- },
- {
- .procname = "sched_cluster1_freq_map",
- .data = sysctl_cluster_arr[1],
- .maxlen = sizeof(int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
- .mode = 0644,
- .proc_handler = sched_freq_map_handler,
- },
- {
- .procname = "sched_cluster2_freq_map",
- .data = sysctl_cluster_arr[2],
- .maxlen = sizeof(int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
- .mode = 0644,
- .proc_handler = sched_freq_map_handler,
- },
- {
- .procname = "sched_cluster3_freq_map",
- .data = sysctl_cluster_arr[3],
- .maxlen = sizeof(int) * MAX_FREQ_RELATIONS * TUPLE_SIZE,
- .mode = 0644,
- .proc_handler = sched_freq_map_handler,
- },
- {
- .procname = "sched_pipeline_skip_prime",
- .data = &sysctl_sched_pipeline_skip_prime,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_fmax_uncap_thresh_ms",
- .data = &sysctl_sched_fmax_uncap_thresh_ms,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- {
- .procname = "sched_fmax_uncap_thresh_util",
- .data = &sysctl_sched_fmax_uncap_thresh_util,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_douintvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_INT_MAX,
- },
- { }
- };
- struct ctl_table walt_base_table[] = {
- {
- .procname = "walt",
- .mode = 0555,
- .child = walt_table,
- },
- { },
- };
- void walt_tunables(void)
- {
- int i, j;
- for (i = 0; i < MAX_MARGIN_LEVELS; i++) {
- sysctl_sched_capacity_margin_up_pct[i] = 95; /* ~5% margin */
- sysctl_sched_capacity_margin_dn_pct[i] = 85; /* ~15% margin */
- sysctl_sched_early_up[i] = 1077;
- sysctl_sched_early_down[i] = 1204;
- }
- sysctl_sched_group_upmigrate_pct = 100;
- sysctl_sched_group_downmigrate_pct = 95;
- sysctl_sched_task_unfilter_period = 100000000;
- sysctl_sched_window_stats_policy = WINDOW_STATS_MAX_RECENT_AVG;
- sysctl_sched_ravg_window_nr_ticks = (HZ / NR_WINDOWS_PER_SEC);
- sysctl_sched_ravg_window_nr_ticks_user = 0;
- sched_load_granule = DEFAULT_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
- for (i = 0; i < WALT_NR_CPUS; i++) {
- sysctl_sched_coloc_busy_hyst_cpu[i] = 39000000;
- sysctl_sched_coloc_busy_hyst_cpu_busy_pct[i] = 10;
- sysctl_sched_util_busy_hyst_cpu[i] = 5000000;
- sysctl_sched_util_busy_hyst_cpu_util[i] = 15;
- }
- sysctl_sched_coloc_busy_hyst_enable_cpus = 112;
- sysctl_sched_util_busy_hyst_enable_cpus = 255;
- sysctl_sched_coloc_busy_hyst_max_ms = 5000;
- sched_ravg_window = DEFAULT_SCHED_RAVG_WINDOW;
- sysctl_input_boost_ms = 40;
- sysctl_sched_fmax_uncap_thresh_ms = 300;
- sysctl_sched_fmax_uncap_thresh_util = 90;
- for (i = 0; i < 8; i++)
- sysctl_input_boost_freq[i] = 0;
- for (i = 0; i < MAX_CLUSTERS; i++) {
- sysctl_fmax_cap[i] = FREQ_QOS_MAX_DEFAULT_VALUE;
- high_perf_cluster_freq_cap[i] = FREQ_QOS_MAX_DEFAULT_VALUE;
- sysctl_sched_idle_enough_clust[i] = SCHED_IDLE_ENOUGH_DEFAULT;
- sysctl_sched_cluster_util_thres_pct_clust[i] = SCHED_CLUSTER_UTIL_THRES_PCT_DEFAULT;
- }
- for (i = 0; i < MAX_FREQ_CAP; i++) {
- for (j = 0; j < MAX_CLUSTERS; j++)
- fmax_cap[i][j] = FREQ_QOS_MAX_DEFAULT_VALUE;
- }
- for (i = 0; i < MAX_CLUSTERS; i++) {
- for (j = 0; j < MAX_FREQ_RELATIONS; j++) {
- relation_data[i][j].src_freq = relation_data[i][j].tgt_freq =
- FREQ_QOS_MAX_DEFAULT_VALUE;
- relation_data[i][j].target_cluster_cpu = -1;
- }
- }
- }
|