1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Pressure stall information for CPU, memory and IO
- *
- * Copyright (c) 2018 Facebook, Inc.
- * Author: Johannes Weiner <[email protected]>
- *
- * Polling support by Suren Baghdasaryan <[email protected]>
- * Copyright (c) 2018 Google, Inc.
- *
- * When CPU, memory and IO are contended, tasks experience delays that
- * reduce throughput and introduce latencies into the workload. Memory
- * and IO contention, in addition, can cause a full loss of forward
- * progress in which the CPU goes idle.
- *
- * This code aggregates individual task delays into resource pressure
- * metrics that indicate problems with both workload health and
- * resource utilization.
- *
- * Model
- *
- * The time in which a task can execute on a CPU is our baseline for
- * productivity. Pressure expresses the amount of time in which this
- * potential cannot be realized due to resource contention.
- *
- * This concept of productivity has two components: the workload and
- * the CPU. To measure the impact of pressure on both, we define two
- * contention states for a resource: SOME and FULL.
- *
- * In the SOME state of a given resource, one or more tasks are
- * delayed on that resource. This affects the workload's ability to
- * perform work, but the CPU may still be executing other tasks.
- *
- * In the FULL state of a given resource, all non-idle tasks are
- * delayed on that resource such that nobody is advancing and the CPU
- * goes idle. This leaves both workload and CPU unproductive.
- *
- * SOME = nr_delayed_tasks != 0
- * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
- *
- * What it means for a task to be productive is defined differently
- * for each resource. For IO, productive means a running task. For
- * memory, productive means a running task that isn't a reclaimer. For
- * CPU, productive means an oncpu task.
- *
- * Naturally, the FULL state doesn't exist for the CPU resource at the
- * system level, but exist at the cgroup level. At the cgroup level,
- * FULL means all non-idle tasks in the cgroup are delayed on the CPU
- * resource which is being used by others outside of the cgroup or
- * throttled by the cgroup cpu.max configuration.
- *
- * The percentage of wallclock time spent in those compound stall
- * states gives pressure numbers between 0 and 100 for each resource,
- * where the SOME percentage indicates workload slowdowns and the FULL
- * percentage indicates reduced CPU utilization:
- *
- * %SOME = time(SOME) / period
- * %FULL = time(FULL) / period
- *
- * Multiple CPUs
- *
- * The more tasks and available CPUs there are, the more work can be
- * performed concurrently. This means that the potential that can go
- * unrealized due to resource contention *also* scales with non-idle
- * tasks and CPUs.
- *
- * Consider a scenario where 257 number crunching tasks are trying to
- * run concurrently on 256 CPUs. If we simply aggregated the task
- * states, we would have to conclude a CPU SOME pressure number of
- * 100%, since *somebody* is waiting on a runqueue at all
- * times. However, that is clearly not the amount of contention the
- * workload is experiencing: only one out of 256 possible execution
- * threads will be contended at any given time, or about 0.4%.
- *
- * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
- * given time *one* of the tasks is delayed due to a lack of memory.
- * Again, looking purely at the task state would yield a memory FULL
- * pressure number of 0%, since *somebody* is always making forward
- * progress. But again this wouldn't capture the amount of execution
- * potential lost, which is 1 out of 4 CPUs, or 25%.
- *
- * To calculate wasted potential (pressure) with multiple processors,
- * we have to base our calculation on the number of non-idle tasks in
- * conjunction with the number of available CPUs, which is the number
- * of potential execution threads. SOME becomes then the proportion of
- * delayed tasks to possible threads, and FULL is the share of possible
- * threads that are unproductive due to delays:
- *
- * threads = min(nr_nonidle_tasks, nr_cpus)
- * SOME = min(nr_delayed_tasks / threads, 1)
- * FULL = (threads - min(nr_productive_tasks, threads)) / threads
- *
- * For the 257 number crunchers on 256 CPUs, this yields:
- *
- * threads = min(257, 256)
- * SOME = min(1 / 256, 1) = 0.4%
- * FULL = (256 - min(256, 256)) / 256 = 0%
- *
- * For the 1 out of 4 memory-delayed tasks, this yields:
- *
- * threads = min(4, 4)
- * SOME = min(1 / 4, 1) = 25%
- * FULL = (4 - min(3, 4)) / 4 = 25%
- *
- * [ Substitute nr_cpus with 1, and you can see that it's a natural
- * extension of the single-CPU model. ]
- *
- * Implementation
- *
- * To assess the precise time spent in each such state, we would have
- * to freeze the system on task changes and start/stop the state
- * clocks accordingly. Obviously that doesn't scale in practice.
- *
- * Because the scheduler aims to distribute the compute load evenly
- * among the available CPUs, we can track task state locally to each
- * CPU and, at much lower frequency, extrapolate the global state for
- * the cumulative stall times and the running averages.
- *
- * For each runqueue, we track:
- *
- * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
- * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
- * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
- *
- * and then periodically aggregate:
- *
- * tNONIDLE = sum(tNONIDLE[i])
- *
- * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
- * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
- *
- * %SOME = tSOME / period
- * %FULL = tFULL / period
- *
- * This gives us an approximation of pressure that is practical
- * cost-wise, yet way more sensitive and accurate than periodic
- * sampling of the aggregate task states would be.
- */
- #include <trace/hooks/psi.h>
- static int psi_bug __read_mostly;
- DEFINE_STATIC_KEY_FALSE(psi_disabled);
- DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
- #ifdef CONFIG_PSI_DEFAULT_DISABLED
- static bool psi_enable;
- #else
- static bool psi_enable = true;
- #endif
- static int __init setup_psi(char *str)
- {
- return kstrtobool(str, &psi_enable) == 0;
- }
- __setup("psi=", setup_psi);
- /* Running averages - we need to be higher-res than loadavg */
- #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
- #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
- #define EXP_60s 1981 /* 1/exp(2s/60s) */
- #define EXP_300s 2034 /* 1/exp(2s/300s) */
- /* PSI trigger definitions */
- #define WINDOW_MAX_US 10000000 /* Max window size is 10s */
- #define UPDATES_PER_WINDOW 10 /* 10 updates per window */
- /* Sampling frequency in nanoseconds */
- static u64 psi_period __read_mostly;
- /* System-level pressure and stall tracking */
- static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
- struct psi_group psi_system = {
- .pcpu = &system_group_pcpu,
- };
- static void psi_avgs_work(struct work_struct *work);
- static void poll_timer_fn(struct timer_list *t);
- static void group_init(struct psi_group *group)
- {
- int cpu;
- group->enabled = true;
- for_each_possible_cpu(cpu)
- seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
- group->avg_last_update = sched_clock();
- group->avg_next_update = group->avg_last_update + psi_period;
- INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
- mutex_init(&group->avgs_lock);
- /* Init trigger-related members */
- atomic_set(&group->poll_scheduled, 0);
- mutex_init(&group->trigger_lock);
- INIT_LIST_HEAD(&group->triggers);
- group->poll_min_period = U32_MAX;
- group->polling_next_update = ULLONG_MAX;
- init_waitqueue_head(&group->poll_wait);
- timer_setup(&group->poll_timer, poll_timer_fn, 0);
- rcu_assign_pointer(group->poll_task, NULL);
- }
- void __init psi_init(void)
- {
- if (!psi_enable) {
- static_branch_enable(&psi_disabled);
- static_branch_disable(&psi_cgroups_enabled);
- return;
- }
- if (!cgroup_psi_enabled())
- static_branch_disable(&psi_cgroups_enabled);
- psi_period = jiffies_to_nsecs(PSI_FREQ);
- group_init(&psi_system);
- }
- static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu)
- {
- switch (state) {
- case PSI_IO_SOME:
- return unlikely(tasks[NR_IOWAIT]);
- case PSI_IO_FULL:
- return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
- case PSI_MEM_SOME:
- return unlikely(tasks[NR_MEMSTALL]);
- case PSI_MEM_FULL:
- return unlikely(tasks[NR_MEMSTALL] &&
- tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
- case PSI_CPU_SOME:
- return unlikely(tasks[NR_RUNNING] > oncpu);
- case PSI_CPU_FULL:
- return unlikely(tasks[NR_RUNNING] && !oncpu);
- case PSI_NONIDLE:
- return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
- tasks[NR_RUNNING];
- default:
- return false;
- }
- }
- static void get_recent_times(struct psi_group *group, int cpu,
- enum psi_aggregators aggregator, u32 *times,
- u32 *pchanged_states)
- {
- struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
- u64 now, state_start;
- enum psi_states s;
- unsigned int seq;
- u32 state_mask;
- *pchanged_states = 0;
- /* Snapshot a coherent view of the CPU state */
- do {
- seq = read_seqcount_begin(&groupc->seq);
- now = cpu_clock(cpu);
- memcpy(times, groupc->times, sizeof(groupc->times));
- state_mask = groupc->state_mask;
- state_start = groupc->state_start;
- } while (read_seqcount_retry(&groupc->seq, seq));
- /* Calculate state time deltas against the previous snapshot */
- for (s = 0; s < NR_PSI_STATES; s++) {
- u32 delta;
- /*
- * In addition to already concluded states, we also
- * incorporate currently active states on the CPU,
- * since states may last for many sampling periods.
- *
- * This way we keep our delta sampling buckets small
- * (u32) and our reported pressure close to what's
- * actually happening.
- */
- if (state_mask & (1 << s))
- times[s] += now - state_start;
- delta = times[s] - groupc->times_prev[aggregator][s];
- groupc->times_prev[aggregator][s] = times[s];
- times[s] = delta;
- if (delta)
- *pchanged_states |= (1 << s);
- }
- }
- static void calc_avgs(unsigned long avg[3], int missed_periods,
- u64 time, u64 period)
- {
- unsigned long pct;
- /* Fill in zeroes for periods of no activity */
- if (missed_periods) {
- avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
- avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
- avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
- }
- /* Sample the most recent active period */
- pct = div_u64(time * 100, period);
- pct *= FIXED_1;
- avg[0] = calc_load(avg[0], EXP_10s, pct);
- avg[1] = calc_load(avg[1], EXP_60s, pct);
- avg[2] = calc_load(avg[2], EXP_300s, pct);
- }
- static void collect_percpu_times(struct psi_group *group,
- enum psi_aggregators aggregator,
- u32 *pchanged_states)
- {
- u64 deltas[NR_PSI_STATES - 1] = { 0, };
- unsigned long nonidle_total = 0;
- u32 changed_states = 0;
- int cpu;
- int s;
- /*
- * Collect the per-cpu time buckets and average them into a
- * single time sample that is normalized to wallclock time.
- *
- * For averaging, each CPU is weighted by its non-idle time in
- * the sampling period. This eliminates artifacts from uneven
- * loading, or even entirely idle CPUs.
- */
- for_each_possible_cpu(cpu) {
- u32 times[NR_PSI_STATES];
- u32 nonidle;
- u32 cpu_changed_states;
- get_recent_times(group, cpu, aggregator, times,
- &cpu_changed_states);
- changed_states |= cpu_changed_states;
- nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
- nonidle_total += nonidle;
- for (s = 0; s < PSI_NONIDLE; s++)
- deltas[s] += (u64)times[s] * nonidle;
- }
- /*
- * Integrate the sample into the running statistics that are
- * reported to userspace: the cumulative stall times and the
- * decaying averages.
- *
- * Pressure percentages are sampled at PSI_FREQ. We might be
- * called more often when the user polls more frequently than
- * that; we might be called less often when there is no task
- * activity, thus no data, and clock ticks are sporadic. The
- * below handles both.
- */
- /* total= */
- for (s = 0; s < NR_PSI_STATES - 1; s++)
- group->total[aggregator][s] +=
- div_u64(deltas[s], max(nonidle_total, 1UL));
- if (pchanged_states)
- *pchanged_states = changed_states;
- }
- static u64 update_averages(struct psi_group *group, u64 now)
- {
- unsigned long missed_periods = 0;
- u64 expires, period;
- u64 avg_next_update;
- int s;
- /* avgX= */
- expires = group->avg_next_update;
- if (now - expires >= psi_period)
- missed_periods = div_u64(now - expires, psi_period);
- /*
- * The periodic clock tick can get delayed for various
- * reasons, especially on loaded systems. To avoid clock
- * drift, we schedule the clock in fixed psi_period intervals.
- * But the deltas we sample out of the per-cpu buckets above
- * are based on the actual time elapsing between clock ticks.
- */
- avg_next_update = expires + ((1 + missed_periods) * psi_period);
- period = now - (group->avg_last_update + (missed_periods * psi_period));
- group->avg_last_update = now;
- for (s = 0; s < NR_PSI_STATES - 1; s++) {
- u32 sample;
- sample = group->total[PSI_AVGS][s] - group->avg_total[s];
- /*
- * Due to the lockless sampling of the time buckets,
- * recorded time deltas can slip into the next period,
- * which under full pressure can result in samples in
- * excess of the period length.
- *
- * We don't want to report non-sensical pressures in
- * excess of 100%, nor do we want to drop such events
- * on the floor. Instead we punt any overage into the
- * future until pressure subsides. By doing this we
- * don't underreport the occurring pressure curve, we
- * just report it delayed by one period length.
- *
- * The error isn't cumulative. As soon as another
- * delta slips from a period P to P+1, by definition
- * it frees up its time T in P.
- */
- if (sample > period)
- sample = period;
- group->avg_total[s] += sample;
- calc_avgs(group->avg[s], missed_periods, sample, period);
- }
- return avg_next_update;
- }
- static void psi_avgs_work(struct work_struct *work)
- {
- struct delayed_work *dwork;
- struct psi_group *group;
- u32 changed_states;
- bool nonidle;
- u64 now;
- dwork = to_delayed_work(work);
- group = container_of(dwork, struct psi_group, avgs_work);
- mutex_lock(&group->avgs_lock);
- now = sched_clock();
- collect_percpu_times(group, PSI_AVGS, &changed_states);
- nonidle = changed_states & (1 << PSI_NONIDLE);
- /*
- * If there is task activity, periodically fold the per-cpu
- * times and feed samples into the running averages. If things
- * are idle and there is no data to process, stop the clock.
- * Once restarted, we'll catch up the running averages in one
- * go - see calc_avgs() and missed_periods.
- */
- if (now >= group->avg_next_update)
- group->avg_next_update = update_averages(group, now);
- if (nonidle) {
- schedule_delayed_work(dwork, nsecs_to_jiffies(
- group->avg_next_update - now) + 1);
- }
- mutex_unlock(&group->avgs_lock);
- }
- /* Trigger tracking window manipulations */
- static void window_reset(struct psi_window *win, u64 now, u64 value,
- u64 prev_growth)
- {
- win->start_time = now;
- win->start_value = value;
- win->prev_growth = prev_growth;
- }
- /*
- * PSI growth tracking window update and growth calculation routine.
- *
- * This approximates a sliding tracking window by interpolating
- * partially elapsed windows using historical growth data from the
- * previous intervals. This minimizes memory requirements (by not storing
- * all the intermediate values in the previous window) and simplifies
- * the calculations. It works well because PSI signal changes only in
- * positive direction and over relatively small window sizes the growth
- * is close to linear.
- */
- static u64 window_update(struct psi_window *win, u64 now, u64 value)
- {
- u64 elapsed;
- u64 growth;
- elapsed = now - win->start_time;
- growth = value - win->start_value;
- /*
- * After each tracking window passes win->start_value and
- * win->start_time get reset and win->prev_growth stores
- * the average per-window growth of the previous window.
- * win->prev_growth is then used to interpolate additional
- * growth from the previous window assuming it was linear.
- */
- if (elapsed > win->size)
- window_reset(win, now, value, growth);
- else {
- u32 remaining;
- remaining = win->size - elapsed;
- growth += div64_u64(win->prev_growth * remaining, win->size);
- }
- return growth;
- }
- static void init_triggers(struct psi_group *group, u64 now)
- {
- struct psi_trigger *t;
- list_for_each_entry(t, &group->triggers, node)
- window_reset(&t->win, now,
- group->total[PSI_POLL][t->state], 0);
- memcpy(group->polling_total, group->total[PSI_POLL],
- sizeof(group->polling_total));
- group->polling_next_update = now + group->poll_min_period;
- }
- static u64 update_triggers(struct psi_group *group, u64 now)
- {
- struct psi_trigger *t;
- bool update_total = false;
- u64 *total = group->total[PSI_POLL];
- /*
- * On subsequent updates, calculate growth deltas and let
- * watchers know when their specified thresholds are exceeded.
- */
- list_for_each_entry(t, &group->triggers, node) {
- u64 growth;
- bool new_stall;
- new_stall = group->polling_total[t->state] != total[t->state];
- /* Check for stall activity or a previous threshold breach */
- if (!new_stall && !t->pending_event)
- continue;
- /*
- * Check for new stall activity, as well as deferred
- * events that occurred in the last window after the
- * trigger had already fired (we want to ratelimit
- * events without dropping any).
- */
- if (new_stall) {
- /*
- * Multiple triggers might be looking at the same state,
- * remember to update group->polling_total[] once we've
- * been through all of them. Also remember to extend the
- * polling time if we see new stall activity.
- */
- update_total = true;
- /* Calculate growth since last update */
- growth = window_update(&t->win, now, total[t->state]);
- if (!t->pending_event) {
- if (growth < t->threshold)
- continue;
- t->pending_event = true;
- }
- }
- /* Limit event signaling to once per window */
- if (now < t->last_event_time + t->win.size)
- continue;
- /* Generate an event */
- if (cmpxchg(&t->event, 0, 1) == 0)
- wake_up_interruptible(&t->event_wait);
- t->last_event_time = now;
- /* Reset threshold breach flag once event got generated */
- t->pending_event = false;
- }
- if (update_total)
- memcpy(group->polling_total, total,
- sizeof(group->polling_total));
- return now + group->poll_min_period;
- }
- /* Schedule polling if it's not already scheduled or forced. */
- static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
- bool force)
- {
- struct task_struct *task;
- /*
- * atomic_xchg should be called even when !force to provide a
- * full memory barrier (see the comment inside psi_poll_work).
- */
- if (atomic_xchg(&group->poll_scheduled, 1) && !force)
- return;
- rcu_read_lock();
- task = rcu_dereference(group->poll_task);
- /*
- * kworker might be NULL in case psi_trigger_destroy races with
- * psi_task_change (hotpath) which can't use locks
- */
- if (likely(task))
- mod_timer(&group->poll_timer, jiffies + delay);
- else
- atomic_set(&group->poll_scheduled, 0);
- rcu_read_unlock();
- }
- static void psi_poll_work(struct psi_group *group)
- {
- bool force_reschedule = false;
- u32 changed_states;
- u64 now;
- mutex_lock(&group->trigger_lock);
- now = sched_clock();
- if (now > group->polling_until) {
- /*
- * We are either about to start or might stop polling if no
- * state change was recorded. Resetting poll_scheduled leaves
- * a small window for psi_group_change to sneak in and schedule
- * an immediate poll_work before we get to rescheduling. One
- * potential extra wakeup at the end of the polling window
- * should be negligible and polling_next_update still keeps
- * updates correctly on schedule.
- */
- atomic_set(&group->poll_scheduled, 0);
- /*
- * A task change can race with the poll worker that is supposed to
- * report on it. To avoid missing events, ensure ordering between
- * poll_scheduled and the task state accesses, such that if the poll
- * worker misses the state update, the task change is guaranteed to
- * reschedule the poll worker:
- *
- * poll worker:
- * atomic_set(poll_scheduled, 0)
- * smp_mb()
- * LOAD states
- *
- * task change:
- * STORE states
- * if atomic_xchg(poll_scheduled, 1) == 0:
- * schedule poll worker
- *
- * The atomic_xchg() implies a full barrier.
- */
- smp_mb();
- } else {
- /* Polling window is not over, keep rescheduling */
- force_reschedule = true;
- }
- collect_percpu_times(group, PSI_POLL, &changed_states);
- if (changed_states & group->poll_states) {
- /* Initialize trigger windows when entering polling mode */
- if (now > group->polling_until)
- init_triggers(group, now);
- /*
- * Keep the monitor active for at least the duration of the
- * minimum tracking window as long as monitor states are
- * changing.
- */
- group->polling_until = now +
- group->poll_min_period * UPDATES_PER_WINDOW;
- }
- if (now > group->polling_until) {
- group->polling_next_update = ULLONG_MAX;
- goto out;
- }
- if (now >= group->polling_next_update)
- group->polling_next_update = update_triggers(group, now);
- psi_schedule_poll_work(group,
- nsecs_to_jiffies(group->polling_next_update - now) + 1,
- force_reschedule);
- out:
- mutex_unlock(&group->trigger_lock);
- }
- static int psi_poll_worker(void *data)
- {
- struct psi_group *group = (struct psi_group *)data;
- sched_set_fifo_low(current);
- while (true) {
- wait_event_interruptible(group->poll_wait,
- atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
- kthread_should_stop());
- if (kthread_should_stop())
- break;
- psi_poll_work(group);
- }
- return 0;
- }
- static void poll_timer_fn(struct timer_list *t)
- {
- struct psi_group *group = from_timer(group, t, poll_timer);
- atomic_set(&group->poll_wakeup, 1);
- wake_up_interruptible(&group->poll_wait);
- }
- static void record_times(struct psi_group_cpu *groupc, u64 now)
- {
- u32 delta;
- delta = now - groupc->state_start;
- groupc->state_start = now;
- if (groupc->state_mask & (1 << PSI_IO_SOME)) {
- groupc->times[PSI_IO_SOME] += delta;
- if (groupc->state_mask & (1 << PSI_IO_FULL))
- groupc->times[PSI_IO_FULL] += delta;
- }
- if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
- groupc->times[PSI_MEM_SOME] += delta;
- if (groupc->state_mask & (1 << PSI_MEM_FULL))
- groupc->times[PSI_MEM_FULL] += delta;
- }
- if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
- groupc->times[PSI_CPU_SOME] += delta;
- if (groupc->state_mask & (1 << PSI_CPU_FULL))
- groupc->times[PSI_CPU_FULL] += delta;
- }
- if (groupc->state_mask & (1 << PSI_NONIDLE))
- groupc->times[PSI_NONIDLE] += delta;
- }
- static void psi_group_change(struct psi_group *group, int cpu,
- unsigned int clear, unsigned int set, u64 now,
- bool wake_clock)
- {
- struct psi_group_cpu *groupc;
- unsigned int t, m;
- enum psi_states s;
- u32 state_mask;
- groupc = per_cpu_ptr(group->pcpu, cpu);
- /*
- * First we update the task counts according to the state
- * change requested through the @clear and @set bits.
- *
- * Then if the cgroup PSI stats accounting enabled, we
- * assess the aggregate resource states this CPU's tasks
- * have been in since the last change, and account any
- * SOME and FULL time these may have resulted in.
- */
- write_seqcount_begin(&groupc->seq);
- /*
- * Start with TSK_ONCPU, which doesn't have a corresponding
- * task count - it's just a boolean flag directly encoded in
- * the state mask. Clear, set, or carry the current state if
- * no changes are requested.
- */
- if (unlikely(clear & TSK_ONCPU)) {
- state_mask = 0;
- clear &= ~TSK_ONCPU;
- } else if (unlikely(set & TSK_ONCPU)) {
- state_mask = PSI_ONCPU;
- set &= ~TSK_ONCPU;
- } else {
- state_mask = groupc->state_mask & PSI_ONCPU;
- }
- /*
- * The rest of the state mask is calculated based on the task
- * counts. Update those first, then construct the mask.
- */
- for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
- if (!(m & (1 << t)))
- continue;
- if (groupc->tasks[t]) {
- groupc->tasks[t]--;
- } else if (!psi_bug) {
- printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
- cpu, t, groupc->tasks[0],
- groupc->tasks[1], groupc->tasks[2],
- groupc->tasks[3], clear, set);
- psi_bug = 1;
- }
- }
- for (t = 0; set; set &= ~(1 << t), t++)
- if (set & (1 << t))
- groupc->tasks[t]++;
- if (!group->enabled) {
- /*
- * On the first group change after disabling PSI, conclude
- * the current state and flush its time. This is unlikely
- * to matter to the user, but aggregation (get_recent_times)
- * may have already incorporated the live state into times_prev;
- * avoid a delta sample underflow when PSI is later re-enabled.
- */
- if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
- record_times(groupc, now);
- groupc->state_mask = state_mask;
- write_seqcount_end(&groupc->seq);
- return;
- }
- for (s = 0; s < NR_PSI_STATES; s++) {
- if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU))
- state_mask |= (1 << s);
- }
- /*
- * Since we care about lost potential, a memstall is FULL
- * when there are no other working tasks, but also when
- * the CPU is actively reclaiming and nothing productive
- * could run even if it were runnable. So when the current
- * task in a cgroup is in_memstall, the corresponding groupc
- * on that cpu is in PSI_MEM_FULL state.
- */
- if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
- state_mask |= (1 << PSI_MEM_FULL);
- record_times(groupc, now);
- groupc->state_mask = state_mask;
- write_seqcount_end(&groupc->seq);
- if (state_mask & group->poll_states)
- psi_schedule_poll_work(group, 1, false);
- if (wake_clock && !delayed_work_pending(&group->avgs_work))
- schedule_delayed_work(&group->avgs_work, PSI_FREQ);
- }
- static inline struct psi_group *task_psi_group(struct task_struct *task)
- {
- #ifdef CONFIG_CGROUPS
- if (static_branch_likely(&psi_cgroups_enabled))
- return cgroup_psi(task_dfl_cgroup(task));
- #endif
- return &psi_system;
- }
- static void psi_flags_change(struct task_struct *task, int clear, int set)
- {
- if (((task->psi_flags & set) ||
- (task->psi_flags & clear) != clear) &&
- !psi_bug) {
- printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
- task->pid, task->comm, task_cpu(task),
- task->psi_flags, clear, set);
- psi_bug = 1;
- }
- task->psi_flags &= ~clear;
- task->psi_flags |= set;
- }
- void psi_task_change(struct task_struct *task, int clear, int set)
- {
- int cpu = task_cpu(task);
- struct psi_group *group;
- u64 now;
- if (!task->pid)
- return;
- psi_flags_change(task, clear, set);
- now = cpu_clock(cpu);
- group = task_psi_group(task);
- do {
- psi_group_change(group, cpu, clear, set, now, true);
- } while ((group = group->parent));
- }
- void psi_task_switch(struct task_struct *prev, struct task_struct *next,
- bool sleep)
- {
- struct psi_group *group, *common = NULL;
- int cpu = task_cpu(prev);
- u64 now = cpu_clock(cpu);
- if (next->pid) {
- psi_flags_change(next, 0, TSK_ONCPU);
- /*
- * Set TSK_ONCPU on @next's cgroups. If @next shares any
- * ancestors with @prev, those will already have @prev's
- * TSK_ONCPU bit set, and we can stop the iteration there.
- */
- group = task_psi_group(next);
- do {
- if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
- PSI_ONCPU) {
- common = group;
- break;
- }
- psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
- } while ((group = group->parent));
- }
- if (prev->pid) {
- int clear = TSK_ONCPU, set = 0;
- bool wake_clock = true;
- /*
- * When we're going to sleep, psi_dequeue() lets us
- * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
- * TSK_IOWAIT here, where we can combine it with
- * TSK_ONCPU and save walking common ancestors twice.
- */
- if (sleep) {
- clear |= TSK_RUNNING;
- if (prev->in_memstall)
- clear |= TSK_MEMSTALL_RUNNING;
- if (prev->in_iowait)
- set |= TSK_IOWAIT;
- /*
- * Periodic aggregation shuts off if there is a period of no
- * task changes, so we wake it back up if necessary. However,
- * don't do this if the task change is the aggregation worker
- * itself going to sleep, or we'll ping-pong forever.
- */
- if (unlikely((prev->flags & PF_WQ_WORKER) &&
- wq_worker_last_func(prev) == psi_avgs_work))
- wake_clock = false;
- }
- psi_flags_change(prev, clear, set);
- group = task_psi_group(prev);
- do {
- if (group == common)
- break;
- psi_group_change(group, cpu, clear, set, now, wake_clock);
- } while ((group = group->parent));
- /*
- * TSK_ONCPU is handled up to the common ancestor. If there are
- * any other differences between the two tasks (e.g. prev goes
- * to sleep, or only one task is memstall), finish propagating
- * those differences all the way up to the root.
- */
- if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
- clear &= ~TSK_ONCPU;
- for (; group; group = group->parent)
- psi_group_change(group, cpu, clear, set, now, wake_clock);
- }
- }
- }
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- void psi_account_irqtime(struct task_struct *task, u32 delta)
- {
- int cpu = task_cpu(task);
- struct psi_group *group;
- struct psi_group_cpu *groupc;
- u64 now;
- if (!task->pid)
- return;
- now = cpu_clock(cpu);
- group = task_psi_group(task);
- do {
- if (!group->enabled)
- continue;
- groupc = per_cpu_ptr(group->pcpu, cpu);
- write_seqcount_begin(&groupc->seq);
- record_times(groupc, now);
- groupc->times[PSI_IRQ_FULL] += delta;
- write_seqcount_end(&groupc->seq);
- if (group->poll_states & (1 << PSI_IRQ_FULL))
- psi_schedule_poll_work(group, 1, false);
- } while ((group = group->parent));
- }
- #endif
- /**
- * psi_memstall_enter - mark the beginning of a memory stall section
- * @flags: flags to handle nested sections
- *
- * Marks the calling task as being stalled due to a lack of memory,
- * such as waiting for a refault or performing reclaim.
- */
- void psi_memstall_enter(unsigned long *flags)
- {
- struct rq_flags rf;
- struct rq *rq;
- if (static_branch_likely(&psi_disabled))
- return;
- *flags = current->in_memstall;
- if (*flags)
- return;
- /*
- * in_memstall setting & accounting needs to be atomic wrt
- * changes to the task's scheduling state, otherwise we can
- * race with CPU migration.
- */
- rq = this_rq_lock_irq(&rf);
- current->in_memstall = 1;
- psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
- rq_unlock_irq(rq, &rf);
- }
- EXPORT_SYMBOL_GPL(psi_memstall_enter);
- /**
- * psi_memstall_leave - mark the end of an memory stall section
- * @flags: flags to handle nested memdelay sections
- *
- * Marks the calling task as no longer stalled due to lack of memory.
- */
- void psi_memstall_leave(unsigned long *flags)
- {
- struct rq_flags rf;
- struct rq *rq;
- if (static_branch_likely(&psi_disabled))
- return;
- if (*flags)
- return;
- /*
- * in_memstall clearing & accounting needs to be atomic wrt
- * changes to the task's scheduling state, otherwise we could
- * race with CPU migration.
- */
- rq = this_rq_lock_irq(&rf);
- current->in_memstall = 0;
- psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
- rq_unlock_irq(rq, &rf);
- }
- EXPORT_SYMBOL_GPL(psi_memstall_leave);
- #ifdef CONFIG_CGROUPS
- int psi_cgroup_alloc(struct cgroup *cgroup)
- {
- if (!static_branch_likely(&psi_cgroups_enabled))
- return 0;
- cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
- if (!cgroup->psi)
- return -ENOMEM;
- cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
- if (!cgroup->psi->pcpu) {
- kfree(cgroup->psi);
- return -ENOMEM;
- }
- group_init(cgroup->psi);
- cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
- return 0;
- }
- void psi_cgroup_free(struct cgroup *cgroup)
- {
- if (!static_branch_likely(&psi_cgroups_enabled))
- return;
- cancel_delayed_work_sync(&cgroup->psi->avgs_work);
- free_percpu(cgroup->psi->pcpu);
- /* All triggers must be removed by now */
- WARN_ONCE(cgroup->psi->poll_states, "psi: trigger leak\n");
- kfree(cgroup->psi);
- }
- /**
- * cgroup_move_task - move task to a different cgroup
- * @task: the task
- * @to: the target css_set
- *
- * Move task to a new cgroup and safely migrate its associated stall
- * state between the different groups.
- *
- * This function acquires the task's rq lock to lock out concurrent
- * changes to the task's scheduling state and - in case the task is
- * running - concurrent changes to its stall state.
- */
- void cgroup_move_task(struct task_struct *task, struct css_set *to)
- {
- unsigned int task_flags;
- struct rq_flags rf;
- struct rq *rq;
- if (!static_branch_likely(&psi_cgroups_enabled)) {
- /*
- * Lame to do this here, but the scheduler cannot be locked
- * from the outside, so we move cgroups from inside sched/.
- */
- rcu_assign_pointer(task->cgroups, to);
- return;
- }
- rq = task_rq_lock(task, &rf);
- /*
- * We may race with schedule() dropping the rq lock between
- * deactivating prev and switching to next. Because the psi
- * updates from the deactivation are deferred to the switch
- * callback to save cgroup tree updates, the task's scheduling
- * state here is not coherent with its psi state:
- *
- * schedule() cgroup_move_task()
- * rq_lock()
- * deactivate_task()
- * p->on_rq = 0
- * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
- * pick_next_task()
- * rq_unlock()
- * rq_lock()
- * psi_task_change() // old cgroup
- * task->cgroups = to
- * psi_task_change() // new cgroup
- * rq_unlock()
- * rq_lock()
- * psi_sched_switch() // does deferred updates in new cgroup
- *
- * Don't rely on the scheduling state. Use psi_flags instead.
- */
- task_flags = task->psi_flags;
- if (task_flags)
- psi_task_change(task, task_flags, 0);
- /* See comment above */
- rcu_assign_pointer(task->cgroups, to);
- if (task_flags)
- psi_task_change(task, 0, task_flags);
- task_rq_unlock(rq, task, &rf);
- }
- void psi_cgroup_restart(struct psi_group *group)
- {
- int cpu;
- /*
- * After we disable psi_group->enabled, we don't actually
- * stop percpu tasks accounting in each psi_group_cpu,
- * instead only stop test_state() loop, record_times()
- * and averaging worker, see psi_group_change() for details.
- *
- * When disable cgroup PSI, this function has nothing to sync
- * since cgroup pressure files are hidden and percpu psi_group_cpu
- * would see !psi_group->enabled and only do task accounting.
- *
- * When re-enable cgroup PSI, this function use psi_group_change()
- * to get correct state mask from test_state() loop on tasks[],
- * and restart groupc->state_start from now, use .clear = .set = 0
- * here since no task status really changed.
- */
- if (!group->enabled)
- return;
- for_each_possible_cpu(cpu) {
- struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
- u64 now;
- rq_lock_irq(rq, &rf);
- now = cpu_clock(cpu);
- psi_group_change(group, cpu, 0, 0, now, true);
- rq_unlock_irq(rq, &rf);
- }
- }
- #endif /* CONFIG_CGROUPS */
- int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
- {
- bool only_full = false;
- int full;
- u64 now;
- if (static_branch_likely(&psi_disabled))
- return -EOPNOTSUPP;
- /* Update averages before reporting them */
- mutex_lock(&group->avgs_lock);
- now = sched_clock();
- collect_percpu_times(group, PSI_AVGS, NULL);
- if (now >= group->avg_next_update)
- group->avg_next_update = update_averages(group, now);
- mutex_unlock(&group->avgs_lock);
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- only_full = res == PSI_IRQ;
- #endif
- for (full = 0; full < 2 - only_full; full++) {
- unsigned long avg[3] = { 0, };
- u64 total = 0;
- int w;
- /* CPU FULL is undefined at the system level */
- if (!(group == &psi_system && res == PSI_CPU && full)) {
- for (w = 0; w < 3; w++)
- avg[w] = group->avg[res * 2 + full][w];
- total = div_u64(group->total[PSI_AVGS][res * 2 + full],
- NSEC_PER_USEC);
- }
- seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
- full || only_full ? "full" : "some",
- LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
- LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
- LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
- total);
- }
- return 0;
- }
- struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, enum psi_res res)
- {
- struct psi_trigger *t;
- enum psi_states state;
- u32 threshold_us;
- u32 window_us;
- if (static_branch_likely(&psi_disabled))
- return ERR_PTR(-EOPNOTSUPP);
- if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
- state = PSI_IO_SOME + res * 2;
- else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
- state = PSI_IO_FULL + res * 2;
- else
- return ERR_PTR(-EINVAL);
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
- return ERR_PTR(-EINVAL);
- #endif
- if (state >= PSI_NONIDLE)
- return ERR_PTR(-EINVAL);
- if (window_us == 0 || window_us > WINDOW_MAX_US)
- return ERR_PTR(-EINVAL);
- /* Check threshold */
- if (threshold_us == 0 || threshold_us > window_us)
- return ERR_PTR(-EINVAL);
- t = kmalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
- return ERR_PTR(-ENOMEM);
- t->group = group;
- t->state = state;
- t->threshold = threshold_us * NSEC_PER_USEC;
- t->win.size = window_us * NSEC_PER_USEC;
- window_reset(&t->win, sched_clock(),
- group->total[PSI_POLL][t->state], 0);
- t->event = 0;
- t->last_event_time = 0;
- init_waitqueue_head(&t->event_wait);
- t->pending_event = false;
- mutex_lock(&group->trigger_lock);
- if (!rcu_access_pointer(group->poll_task)) {
- struct task_struct *task;
- task = kthread_create(psi_poll_worker, group, "psimon");
- if (IS_ERR(task)) {
- kfree(t);
- mutex_unlock(&group->trigger_lock);
- return ERR_CAST(task);
- }
- atomic_set(&group->poll_wakeup, 0);
- wake_up_process(task);
- rcu_assign_pointer(group->poll_task, task);
- }
- list_add(&t->node, &group->triggers);
- group->poll_min_period = min(group->poll_min_period,
- div_u64(t->win.size, UPDATES_PER_WINDOW));
- group->nr_triggers[t->state]++;
- group->poll_states |= (1 << t->state);
- mutex_unlock(&group->trigger_lock);
- return t;
- }
- void psi_trigger_destroy(struct psi_trigger *t)
- {
- struct psi_group *group;
- struct task_struct *task_to_destroy = NULL;
- /*
- * We do not check psi_disabled since it might have been disabled after
- * the trigger got created.
- */
- if (!t)
- return;
- group = t->group;
- /*
- * Wakeup waiters to stop polling and clear the queue to prevent it from
- * being accessed later. Can happen if cgroup is deleted from under a
- * polling process.
- */
- wake_up_pollfree(&t->event_wait);
- mutex_lock(&group->trigger_lock);
- if (!list_empty(&t->node)) {
- struct psi_trigger *tmp;
- u64 period = ULLONG_MAX;
- list_del(&t->node);
- group->nr_triggers[t->state]--;
- if (!group->nr_triggers[t->state])
- group->poll_states &= ~(1 << t->state);
- /* reset min update period for the remaining triggers */
- list_for_each_entry(tmp, &group->triggers, node)
- period = min(period, div_u64(tmp->win.size,
- UPDATES_PER_WINDOW));
- group->poll_min_period = period;
- /* Destroy poll_task when the last trigger is destroyed */
- if (group->poll_states == 0) {
- group->polling_until = 0;
- task_to_destroy = rcu_dereference_protected(
- group->poll_task,
- lockdep_is_held(&group->trigger_lock));
- rcu_assign_pointer(group->poll_task, NULL);
- del_timer(&group->poll_timer);
- }
- }
- mutex_unlock(&group->trigger_lock);
- /*
- * Wait for psi_schedule_poll_work RCU to complete its read-side
- * critical section before destroying the trigger and optionally the
- * poll_task.
- */
- synchronize_rcu();
- /*
- * Stop kthread 'psimon' after releasing trigger_lock to prevent a
- * deadlock while waiting for psi_poll_work to acquire trigger_lock
- */
- if (task_to_destroy) {
- /*
- * After the RCU grace period has expired, the worker
- * can no longer be found through group->poll_task.
- */
- kthread_stop(task_to_destroy);
- atomic_set(&group->poll_scheduled, 0);
- }
- kfree(t);
- }
- __poll_t psi_trigger_poll(void **trigger_ptr,
- struct file *file, poll_table *wait)
- {
- __poll_t ret = DEFAULT_POLLMASK;
- struct psi_trigger *t;
- if (static_branch_likely(&psi_disabled))
- return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
- t = smp_load_acquire(trigger_ptr);
- if (!t)
- return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
- poll_wait(file, &t->event_wait, wait);
- if (cmpxchg(&t->event, 1, 0) == 1)
- ret |= EPOLLPRI;
- return ret;
- }
- #ifdef CONFIG_PROC_FS
- static int psi_io_show(struct seq_file *m, void *v)
- {
- return psi_show(m, &psi_system, PSI_IO);
- }
- static int psi_memory_show(struct seq_file *m, void *v)
- {
- return psi_show(m, &psi_system, PSI_MEM);
- }
- static int psi_cpu_show(struct seq_file *m, void *v)
- {
- return psi_show(m, &psi_system, PSI_CPU);
- }
- static int psi_io_open(struct inode *inode, struct file *file)
- {
- return single_open(file, psi_io_show, NULL);
- }
- static int psi_memory_open(struct inode *inode, struct file *file)
- {
- return single_open(file, psi_memory_show, NULL);
- }
- static int psi_cpu_open(struct inode *inode, struct file *file)
- {
- return single_open(file, psi_cpu_show, NULL);
- }
- static ssize_t psi_write(struct file *file, const char __user *user_buf,
- size_t nbytes, enum psi_res res)
- {
- char buf[32];
- size_t buf_size;
- struct seq_file *seq;
- struct psi_trigger *new;
- if (static_branch_likely(&psi_disabled))
- return -EOPNOTSUPP;
- if (!nbytes)
- return -EINVAL;
- buf_size = min(nbytes, sizeof(buf));
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- buf[buf_size - 1] = '\0';
- seq = file->private_data;
- /* Take seq->lock to protect seq->private from concurrent writes */
- mutex_lock(&seq->lock);
- /* Allow only one trigger per file descriptor */
- if (seq->private) {
- mutex_unlock(&seq->lock);
- return -EBUSY;
- }
- new = psi_trigger_create(&psi_system, buf, res);
- if (IS_ERR(new)) {
- mutex_unlock(&seq->lock);
- return PTR_ERR(new);
- }
- smp_store_release(&seq->private, new);
- mutex_unlock(&seq->lock);
- return nbytes;
- }
- static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
- size_t nbytes, loff_t *ppos)
- {
- return psi_write(file, user_buf, nbytes, PSI_IO);
- }
- static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
- size_t nbytes, loff_t *ppos)
- {
- return psi_write(file, user_buf, nbytes, PSI_MEM);
- }
- static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
- size_t nbytes, loff_t *ppos)
- {
- return psi_write(file, user_buf, nbytes, PSI_CPU);
- }
- static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
- {
- struct seq_file *seq = file->private_data;
- return psi_trigger_poll(&seq->private, file, wait);
- }
- static int psi_fop_release(struct inode *inode, struct file *file)
- {
- struct seq_file *seq = file->private_data;
- psi_trigger_destroy(seq->private);
- return single_release(inode, file);
- }
- static const struct proc_ops psi_io_proc_ops = {
- .proc_open = psi_io_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_write = psi_io_write,
- .proc_poll = psi_fop_poll,
- .proc_release = psi_fop_release,
- };
- static const struct proc_ops psi_memory_proc_ops = {
- .proc_open = psi_memory_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_write = psi_memory_write,
- .proc_poll = psi_fop_poll,
- .proc_release = psi_fop_release,
- };
- static const struct proc_ops psi_cpu_proc_ops = {
- .proc_open = psi_cpu_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_write = psi_cpu_write,
- .proc_poll = psi_fop_poll,
- .proc_release = psi_fop_release,
- };
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- static int psi_irq_show(struct seq_file *m, void *v)
- {
- return psi_show(m, &psi_system, PSI_IRQ);
- }
- static int psi_irq_open(struct inode *inode, struct file *file)
- {
- return single_open(file, psi_irq_show, NULL);
- }
- static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
- size_t nbytes, loff_t *ppos)
- {
- return psi_write(file, user_buf, nbytes, PSI_IRQ);
- }
- static const struct proc_ops psi_irq_proc_ops = {
- .proc_open = psi_irq_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_write = psi_irq_write,
- .proc_poll = psi_fop_poll,
- .proc_release = psi_fop_release,
- };
- #endif
- static int __init psi_proc_init(void)
- {
- if (psi_enable) {
- proc_mkdir("pressure", NULL);
- proc_create("pressure/io", 0, NULL, &psi_io_proc_ops);
- proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops);
- proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops);
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- proc_create("pressure/irq", 0, NULL, &psi_irq_proc_ops);
- #endif
- }
- return 0;
- }
- module_init(psi_proc_init);
- #endif /* CONFIG_PROC_FS */
|