1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * Simple CPU accounting cgroup controller
- */
- #include <linux/cpufreq_times.h>
- #include <trace/hooks/sched.h>
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- /*
- * There are no locks covering percpu hardirq/softirq time.
- * They are only modified in vtime_account, on corresponding CPU
- * with interrupts disabled. So, writes are safe.
- * They are read and saved off onto struct rq in update_rq_clock().
- * This may result in other CPU reading this CPU's irq time and can
- * race with irq/vtime_account on this CPU. We would either get old
- * or new value with a side effect of accounting a slice of irq time to wrong
- * task when irq is in progress while we read rq->clock. That is a worthy
- * compromise in place of having locks on each irq in account_system_time.
- */
- DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
- EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
- static int sched_clock_irqtime;
- void enable_sched_clock_irqtime(void)
- {
- sched_clock_irqtime = 1;
- }
- void disable_sched_clock_irqtime(void)
- {
- sched_clock_irqtime = 0;
- }
- static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
- enum cpu_usage_stat idx)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- u64_stats_update_begin(&irqtime->sync);
- cpustat[idx] += delta;
- irqtime->total += delta;
- irqtime->tick_delta += delta;
- u64_stats_update_end(&irqtime->sync);
- }
- /*
- * Called after incrementing preempt_count on {soft,}irq_enter
- * and before decrementing preempt_count on {soft,}irq_exit.
- */
- void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
- {
- struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
- unsigned int pc;
- s64 delta;
- int cpu;
- bool irq_start = true;
- if (!sched_clock_irqtime)
- return;
- cpu = smp_processor_id();
- delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
- irqtime->irq_start_time += delta;
- pc = irq_count() - offset;
- /*
- * We do not account for softirq time from ksoftirqd here.
- * We want to continue accounting softirq time to ksoftirqd thread
- * in that case, so as not to confuse scheduler with a special task
- * that do not consume any time, but still wants to run.
- */
- if (pc & HARDIRQ_MASK) {
- irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
- irq_start = false;
- } else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd()) {
- irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
- irq_start = false;
- }
- trace_android_rvh_account_irq(curr, cpu, delta, irq_start);
- }
- static u64 irqtime_tick_accounted(u64 maxtime)
- {
- struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
- u64 delta;
- delta = min(irqtime->tick_delta, maxtime);
- irqtime->tick_delta -= delta;
- return delta;
- }
- #else /* CONFIG_IRQ_TIME_ACCOUNTING */
- #define sched_clock_irqtime (0)
- static u64 irqtime_tick_accounted(u64 dummy)
- {
- return 0;
- }
- #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
- static inline void task_group_account_field(struct task_struct *p, int index,
- u64 tmp)
- {
- /*
- * Since all updates are sure to touch the root cgroup, we
- * get ourselves ahead and touch it first. If the root cgroup
- * is the only cgroup, then nothing else should be necessary.
- *
- */
- __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
- cgroup_account_cputime_field(p, index, tmp);
- }
- /*
- * Account user CPU time to a process.
- * @p: the process that the CPU time gets accounted to
- * @cputime: the CPU time spent in user space since the last update
- */
- void account_user_time(struct task_struct *p, u64 cputime)
- {
- int index;
- /* Add user time to process. */
- p->utime += cputime;
- account_group_user_time(p, cputime);
- index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
- /* Add user time to cpustat. */
- task_group_account_field(p, index, cputime);
- /* Account for user time used */
- acct_account_cputime(p);
- /* Account power usage for user time */
- cpufreq_acct_update_power(p, cputime);
- }
- /*
- * Account guest CPU time to a process.
- * @p: the process that the CPU time gets accounted to
- * @cputime: the CPU time spent in virtual machine since the last update
- */
- void account_guest_time(struct task_struct *p, u64 cputime)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- /* Add guest time to process. */
- p->utime += cputime;
- account_group_user_time(p, cputime);
- p->gtime += cputime;
- /* Add guest time to cpustat. */
- if (task_nice(p) > 0) {
- task_group_account_field(p, CPUTIME_NICE, cputime);
- cpustat[CPUTIME_GUEST_NICE] += cputime;
- } else {
- task_group_account_field(p, CPUTIME_USER, cputime);
- cpustat[CPUTIME_GUEST] += cputime;
- }
- }
- /*
- * Account system CPU time to a process and desired cpustat field
- * @p: the process that the CPU time gets accounted to
- * @cputime: the CPU time spent in kernel space since the last update
- * @index: pointer to cpustat field that has to be updated
- */
- void account_system_index_time(struct task_struct *p,
- u64 cputime, enum cpu_usage_stat index)
- {
- /* Add system time to process. */
- p->stime += cputime;
- account_group_system_time(p, cputime);
- /* Add system time to cpustat. */
- task_group_account_field(p, index, cputime);
- /* Account for system time used */
- acct_account_cputime(p);
- /* Account power usage for system time */
- cpufreq_acct_update_power(p, cputime);
- }
- /*
- * Account system CPU time to a process.
- * @p: the process that the CPU time gets accounted to
- * @hardirq_offset: the offset to subtract from hardirq_count()
- * @cputime: the CPU time spent in kernel space since the last update
- */
- void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
- {
- int index;
- if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
- account_guest_time(p, cputime);
- return;
- }
- if (hardirq_count() - hardirq_offset)
- index = CPUTIME_IRQ;
- else if (in_serving_softirq())
- index = CPUTIME_SOFTIRQ;
- else
- index = CPUTIME_SYSTEM;
- account_system_index_time(p, cputime, index);
- }
- /*
- * Account for involuntary wait time.
- * @cputime: the CPU time spent in involuntary wait
- */
- void account_steal_time(u64 cputime)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- cpustat[CPUTIME_STEAL] += cputime;
- }
- /*
- * Account for idle time.
- * @cputime: the CPU time spent in idle wait
- */
- void account_idle_time(u64 cputime)
- {
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- struct rq *rq = this_rq();
- if (atomic_read(&rq->nr_iowait) > 0)
- cpustat[CPUTIME_IOWAIT] += cputime;
- else
- cpustat[CPUTIME_IDLE] += cputime;
- }
- #ifdef CONFIG_SCHED_CORE
- /*
- * Account for forceidle time due to core scheduling.
- *
- * REQUIRES: schedstat is enabled.
- */
- void __account_forceidle_time(struct task_struct *p, u64 delta)
- {
- __schedstat_add(p->stats.core_forceidle_sum, delta);
- task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
- }
- #endif
- /*
- * When a guest is interrupted for a longer amount of time, missed clock
- * ticks are not redelivered later. Due to that, this function may on
- * occasion account more time than the calling functions think elapsed.
- */
- static __always_inline u64 steal_account_process_time(u64 maxtime)
- {
- #ifdef CONFIG_PARAVIRT
- if (static_key_false(¶virt_steal_enabled)) {
- u64 steal;
- steal = paravirt_steal_clock(smp_processor_id());
- steal -= this_rq()->prev_steal_time;
- steal = min(steal, maxtime);
- account_steal_time(steal);
- this_rq()->prev_steal_time += steal;
- return steal;
- }
- #endif
- return 0;
- }
- /*
- * Account how much elapsed time was spent in steal, irq, or softirq time.
- */
- static inline u64 account_other_time(u64 max)
- {
- u64 accounted;
- lockdep_assert_irqs_disabled();
- accounted = steal_account_process_time(max);
- if (accounted < max)
- accounted += irqtime_tick_accounted(max - accounted);
- return accounted;
- }
- #ifdef CONFIG_64BIT
- static inline u64 read_sum_exec_runtime(struct task_struct *t)
- {
- return t->se.sum_exec_runtime;
- }
- #else
- static u64 read_sum_exec_runtime(struct task_struct *t)
- {
- u64 ns;
- struct rq_flags rf;
- struct rq *rq;
- rq = task_rq_lock(t, &rf);
- ns = t->se.sum_exec_runtime;
- task_rq_unlock(rq, t, &rf);
- return ns;
- }
- #endif
- /*
- * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
- * tasks (sum on group iteration) belonging to @tsk's group.
- */
- void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
- {
- struct signal_struct *sig = tsk->signal;
- u64 utime, stime;
- struct task_struct *t;
- unsigned int seq, nextseq;
- unsigned long flags;
- /*
- * Update current task runtime to account pending time since last
- * scheduler action or thread_group_cputime() call. This thread group
- * might have other running tasks on different CPUs, but updating
- * their runtime can affect syscall performance, so we skip account
- * those pending times and rely only on values updated on tick or
- * other scheduler action.
- */
- if (same_thread_group(current, tsk))
- (void) task_sched_runtime(current);
- rcu_read_lock();
- /* Attempt a lockless read on the first round. */
- nextseq = 0;
- do {
- seq = nextseq;
- flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
- times->utime = sig->utime;
- times->stime = sig->stime;
- times->sum_exec_runtime = sig->sum_sched_runtime;
- for_each_thread(tsk, t) {
- task_cputime(t, &utime, &stime);
- times->utime += utime;
- times->stime += stime;
- times->sum_exec_runtime += read_sum_exec_runtime(t);
- }
- /* If lockless access failed, take the lock. */
- nextseq = 1;
- } while (need_seqretry(&sig->stats_lock, seq));
- done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
- rcu_read_unlock();
- }
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- /*
- * Account a tick to a process and cpustat
- * @p: the process that the CPU time gets accounted to
- * @user_tick: is the tick from userspace
- * @rq: the pointer to rq
- *
- * Tick demultiplexing follows the order
- * - pending hardirq update
- * - pending softirq update
- * - user_time
- * - idle_time
- * - system time
- * - check for guest_time
- * - else account as system_time
- *
- * Check for hardirq is done both for system and user time as there is
- * no timer going off while we are on hardirq and hence we may never get an
- * opportunity to update it solely in system time.
- * p->stime and friends are only updated on system time and not on irq
- * softirq as those do not count in task exec_runtime any more.
- */
- static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
- int ticks)
- {
- u64 other, cputime = TICK_NSEC * ticks;
- /*
- * When returning from idle, many ticks can get accounted at
- * once, including some ticks of steal, irq, and softirq time.
- * Subtract those ticks from the amount of time accounted to
- * idle, or potentially user or system time. Due to rounding,
- * other time can exceed ticks occasionally.
- */
- other = account_other_time(ULONG_MAX);
- if (other >= cputime)
- return;
- cputime -= other;
- if (this_cpu_ksoftirqd() == p) {
- /*
- * ksoftirqd time do not get accounted in cpu_softirq_time.
- * So, we have to handle it separately here.
- * Also, p->stime needs to be updated for ksoftirqd.
- */
- account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
- } else if (user_tick) {
- account_user_time(p, cputime);
- } else if (p == this_rq()->idle) {
- account_idle_time(cputime);
- } else if (p->flags & PF_VCPU) { /* System time or guest time */
- account_guest_time(p, cputime);
- } else {
- account_system_index_time(p, cputime, CPUTIME_SYSTEM);
- }
- trace_android_vh_irqtime_account_process_tick(p, this_rq(), user_tick, ticks);
- }
- static void irqtime_account_idle_ticks(int ticks)
- {
- irqtime_account_process_tick(current, 0, ticks);
- }
- #else /* CONFIG_IRQ_TIME_ACCOUNTING */
- static inline void irqtime_account_idle_ticks(int ticks) { }
- static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
- int nr_ticks) { }
- #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
- /*
- * Use precise platform statistics if available:
- */
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- # ifndef __ARCH_HAS_VTIME_TASK_SWITCH
- void vtime_task_switch(struct task_struct *prev)
- {
- if (is_idle_task(prev))
- vtime_account_idle(prev);
- else
- vtime_account_kernel(prev);
- vtime_flush(prev);
- arch_vtime_task_switch(prev);
- }
- # endif
- void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
- {
- unsigned int pc = irq_count() - offset;
- if (pc & HARDIRQ_OFFSET) {
- vtime_account_hardirq(tsk);
- } else if (pc & SOFTIRQ_OFFSET) {
- vtime_account_softirq(tsk);
- } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
- is_idle_task(tsk)) {
- vtime_account_idle(tsk);
- } else {
- vtime_account_kernel(tsk);
- }
- }
- void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
- u64 *ut, u64 *st)
- {
- *ut = curr->utime;
- *st = curr->stime;
- }
- void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- *ut = p->utime;
- *st = p->stime;
- }
- EXPORT_SYMBOL_GPL(task_cputime_adjusted);
- void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- struct task_cputime cputime;
- thread_group_cputime(p, &cputime);
- *ut = cputime.utime;
- *st = cputime.stime;
- }
- EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
- #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
- /*
- * Account a single tick or a few ticks of CPU time.
- * @p: the process that the CPU time gets accounted to
- * @user_tick: indicates if the tick is a user or a system tick
- */
- void account_process_tick(struct task_struct *p, int user_tick)
- {
- u64 cputime, steal;
- int ticks = 1;
- trace_android_vh_account_process_tick_gran(user_tick, &ticks);
- if (!ticks)
- return;
- if (vtime_accounting_enabled_this_cpu())
- return;
- trace_android_vh_account_task_time(p, this_rq(), user_tick, ticks);
- if (sched_clock_irqtime) {
- irqtime_account_process_tick(p, user_tick, ticks);
- return;
- }
- cputime = TICK_NSEC * ticks;
- steal = steal_account_process_time(ULONG_MAX);
- if (steal >= cputime)
- return;
- cputime -= steal;
- if (user_tick)
- account_user_time(p, cputime);
- else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
- account_system_time(p, HARDIRQ_OFFSET, cputime);
- else
- account_idle_time(cputime);
- }
- /*
- * Account multiple ticks of idle time.
- * @ticks: number of stolen ticks
- */
- void account_idle_ticks(unsigned long ticks)
- {
- u64 cputime, steal;
- if (sched_clock_irqtime) {
- irqtime_account_idle_ticks(ticks);
- return;
- }
- cputime = ticks * TICK_NSEC;
- steal = steal_account_process_time(ULONG_MAX);
- if (steal >= cputime)
- return;
- cputime -= steal;
- account_idle_time(cputime);
- }
- /*
- * Adjust tick based cputime random precision against scheduler runtime
- * accounting.
- *
- * Tick based cputime accounting depend on random scheduling timeslices of a
- * task to be interrupted or not by the timer. Depending on these
- * circumstances, the number of these interrupts may be over or
- * under-optimistic, matching the real user and system cputime with a variable
- * precision.
- *
- * Fix this by scaling these tick based values against the total runtime
- * accounted by the CFS scheduler.
- *
- * This code provides the following guarantees:
- *
- * stime + utime == rtime
- * stime_i+1 >= stime_i, utime_i+1 >= utime_i
- *
- * Assuming that rtime_i+1 >= rtime_i.
- */
- void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
- u64 *ut, u64 *st)
- {
- u64 rtime, stime, utime;
- unsigned long flags;
- /* Serialize concurrent callers such that we can honour our guarantees */
- raw_spin_lock_irqsave(&prev->lock, flags);
- rtime = curr->sum_exec_runtime;
- /*
- * This is possible under two circumstances:
- * - rtime isn't monotonic after all (a bug);
- * - we got reordered by the lock.
- *
- * In both cases this acts as a filter such that the rest of the code
- * can assume it is monotonic regardless of anything else.
- */
- if (prev->stime + prev->utime >= rtime)
- goto out;
- stime = curr->stime;
- utime = curr->utime;
- /*
- * If either stime or utime are 0, assume all runtime is userspace.
- * Once a task gets some ticks, the monotonicity code at 'update:'
- * will ensure things converge to the observed ratio.
- */
- if (stime == 0) {
- utime = rtime;
- goto update;
- }
- if (utime == 0) {
- stime = rtime;
- goto update;
- }
- stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
- update:
- /*
- * Make sure stime doesn't go backwards; this preserves monotonicity
- * for utime because rtime is monotonic.
- *
- * utime_i+1 = rtime_i+1 - stime_i
- * = rtime_i+1 - (rtime_i - utime_i)
- * = (rtime_i+1 - rtime_i) + utime_i
- * >= utime_i
- */
- if (stime < prev->stime)
- stime = prev->stime;
- utime = rtime - stime;
- /*
- * Make sure utime doesn't go backwards; this still preserves
- * monotonicity for stime, analogous argument to above.
- */
- if (utime < prev->utime) {
- utime = prev->utime;
- stime = rtime - utime;
- }
- prev->stime = stime;
- prev->utime = utime;
- out:
- *ut = prev->utime;
- *st = prev->stime;
- raw_spin_unlock_irqrestore(&prev->lock, flags);
- }
- void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- struct task_cputime cputime = {
- .sum_exec_runtime = p->se.sum_exec_runtime,
- };
- if (task_cputime(p, &cputime.utime, &cputime.stime))
- cputime.sum_exec_runtime = task_sched_runtime(p);
- cputime_adjust(&cputime, &p->prev_cputime, ut, st);
- }
- EXPORT_SYMBOL_GPL(task_cputime_adjusted);
- void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
- {
- struct task_cputime cputime;
- thread_group_cputime(p, &cputime);
- cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
- }
- EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
- #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
- #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- static u64 vtime_delta(struct vtime *vtime)
- {
- unsigned long long clock;
- clock = sched_clock();
- if (clock < vtime->starttime)
- return 0;
- return clock - vtime->starttime;
- }
- static u64 get_vtime_delta(struct vtime *vtime)
- {
- u64 delta = vtime_delta(vtime);
- u64 other;
- /*
- * Unlike tick based timing, vtime based timing never has lost
- * ticks, and no need for steal time accounting to make up for
- * lost ticks. Vtime accounts a rounded version of actual
- * elapsed time. Limit account_other_time to prevent rounding
- * errors from causing elapsed vtime to go negative.
- */
- other = account_other_time(delta);
- WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
- vtime->starttime += delta;
- return delta - other;
- }
- static void vtime_account_system(struct task_struct *tsk,
- struct vtime *vtime)
- {
- vtime->stime += get_vtime_delta(vtime);
- if (vtime->stime >= TICK_NSEC) {
- account_system_time(tsk, irq_count(), vtime->stime);
- vtime->stime = 0;
- }
- }
- static void vtime_account_guest(struct task_struct *tsk,
- struct vtime *vtime)
- {
- vtime->gtime += get_vtime_delta(vtime);
- if (vtime->gtime >= TICK_NSEC) {
- account_guest_time(tsk, vtime->gtime);
- vtime->gtime = 0;
- }
- }
- static void __vtime_account_kernel(struct task_struct *tsk,
- struct vtime *vtime)
- {
- /* We might have scheduled out from guest path */
- if (vtime->state == VTIME_GUEST)
- vtime_account_guest(tsk, vtime);
- else
- vtime_account_system(tsk, vtime);
- }
- void vtime_account_kernel(struct task_struct *tsk)
- {
- struct vtime *vtime = &tsk->vtime;
- if (!vtime_delta(vtime))
- return;
- write_seqcount_begin(&vtime->seqcount);
- __vtime_account_kernel(tsk, vtime);
- write_seqcount_end(&vtime->seqcount);
- }
- void vtime_user_enter(struct task_struct *tsk)
- {
- struct vtime *vtime = &tsk->vtime;
- write_seqcount_begin(&vtime->seqcount);
- vtime_account_system(tsk, vtime);
- vtime->state = VTIME_USER;
- write_seqcount_end(&vtime->seqcount);
- }
- void vtime_user_exit(struct task_struct *tsk)
- {
- struct vtime *vtime = &tsk->vtime;
- write_seqcount_begin(&vtime->seqcount);
- vtime->utime += get_vtime_delta(vtime);
- if (vtime->utime >= TICK_NSEC) {
- account_user_time(tsk, vtime->utime);
- vtime->utime = 0;
- }
- vtime->state = VTIME_SYS;
- write_seqcount_end(&vtime->seqcount);
- }
- void vtime_guest_enter(struct task_struct *tsk)
- {
- struct vtime *vtime = &tsk->vtime;
- /*
- * The flags must be updated under the lock with
- * the vtime_starttime flush and update.
- * That enforces a right ordering and update sequence
- * synchronization against the reader (task_gtime())
- * that can thus safely catch up with a tickless delta.
- */
- write_seqcount_begin(&vtime->seqcount);
- vtime_account_system(tsk, vtime);
- tsk->flags |= PF_VCPU;
- vtime->state = VTIME_GUEST;
- write_seqcount_end(&vtime->seqcount);
- }
- EXPORT_SYMBOL_GPL(vtime_guest_enter);
- void vtime_guest_exit(struct task_struct *tsk)
- {
- struct vtime *vtime = &tsk->vtime;
- write_seqcount_begin(&vtime->seqcount);
- vtime_account_guest(tsk, vtime);
- tsk->flags &= ~PF_VCPU;
- vtime->state = VTIME_SYS;
- write_seqcount_end(&vtime->seqcount);
- }
- EXPORT_SYMBOL_GPL(vtime_guest_exit);
- void vtime_account_idle(struct task_struct *tsk)
- {
- account_idle_time(get_vtime_delta(&tsk->vtime));
- }
- void vtime_task_switch_generic(struct task_struct *prev)
- {
- struct vtime *vtime = &prev->vtime;
- write_seqcount_begin(&vtime->seqcount);
- if (vtime->state == VTIME_IDLE)
- vtime_account_idle(prev);
- else
- __vtime_account_kernel(prev, vtime);
- vtime->state = VTIME_INACTIVE;
- vtime->cpu = -1;
- write_seqcount_end(&vtime->seqcount);
- vtime = ¤t->vtime;
- write_seqcount_begin(&vtime->seqcount);
- if (is_idle_task(current))
- vtime->state = VTIME_IDLE;
- else if (current->flags & PF_VCPU)
- vtime->state = VTIME_GUEST;
- else
- vtime->state = VTIME_SYS;
- vtime->starttime = sched_clock();
- vtime->cpu = smp_processor_id();
- write_seqcount_end(&vtime->seqcount);
- }
- void vtime_init_idle(struct task_struct *t, int cpu)
- {
- struct vtime *vtime = &t->vtime;
- unsigned long flags;
- local_irq_save(flags);
- write_seqcount_begin(&vtime->seqcount);
- vtime->state = VTIME_IDLE;
- vtime->starttime = sched_clock();
- vtime->cpu = cpu;
- write_seqcount_end(&vtime->seqcount);
- local_irq_restore(flags);
- }
- u64 task_gtime(struct task_struct *t)
- {
- struct vtime *vtime = &t->vtime;
- unsigned int seq;
- u64 gtime;
- if (!vtime_accounting_enabled())
- return t->gtime;
- do {
- seq = read_seqcount_begin(&vtime->seqcount);
- gtime = t->gtime;
- if (vtime->state == VTIME_GUEST)
- gtime += vtime->gtime + vtime_delta(vtime);
- } while (read_seqcount_retry(&vtime->seqcount, seq));
- return gtime;
- }
- /*
- * Fetch cputime raw values from fields of task_struct and
- * add up the pending nohz execution time since the last
- * cputime snapshot.
- */
- bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
- {
- struct vtime *vtime = &t->vtime;
- unsigned int seq;
- u64 delta;
- int ret;
- if (!vtime_accounting_enabled()) {
- *utime = t->utime;
- *stime = t->stime;
- return false;
- }
- do {
- ret = false;
- seq = read_seqcount_begin(&vtime->seqcount);
- *utime = t->utime;
- *stime = t->stime;
- /* Task is sleeping or idle, nothing to add */
- if (vtime->state < VTIME_SYS)
- continue;
- ret = true;
- delta = vtime_delta(vtime);
- /*
- * Task runs either in user (including guest) or kernel space,
- * add pending nohz time to the right place.
- */
- if (vtime->state == VTIME_SYS)
- *stime += vtime->stime + delta;
- else
- *utime += vtime->utime + delta;
- } while (read_seqcount_retry(&vtime->seqcount, seq));
- return ret;
- }
- static int vtime_state_fetch(struct vtime *vtime, int cpu)
- {
- int state = READ_ONCE(vtime->state);
- /*
- * We raced against a context switch, fetch the
- * kcpustat task again.
- */
- if (vtime->cpu != cpu && vtime->cpu != -1)
- return -EAGAIN;
- /*
- * Two possible things here:
- * 1) We are seeing the scheduling out task (prev) or any past one.
- * 2) We are seeing the scheduling in task (next) but it hasn't
- * passed though vtime_task_switch() yet so the pending
- * cputime of the prev task may not be flushed yet.
- *
- * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
- */
- if (state == VTIME_INACTIVE)
- return -EAGAIN;
- return state;
- }
- static u64 kcpustat_user_vtime(struct vtime *vtime)
- {
- if (vtime->state == VTIME_USER)
- return vtime->utime + vtime_delta(vtime);
- else if (vtime->state == VTIME_GUEST)
- return vtime->gtime + vtime_delta(vtime);
- return 0;
- }
- static int kcpustat_field_vtime(u64 *cpustat,
- struct task_struct *tsk,
- enum cpu_usage_stat usage,
- int cpu, u64 *val)
- {
- struct vtime *vtime = &tsk->vtime;
- unsigned int seq;
- do {
- int state;
- seq = read_seqcount_begin(&vtime->seqcount);
- state = vtime_state_fetch(vtime, cpu);
- if (state < 0)
- return state;
- *val = cpustat[usage];
- /*
- * Nice VS unnice cputime accounting may be inaccurate if
- * the nice value has changed since the last vtime update.
- * But proper fix would involve interrupting target on nice
- * updates which is a no go on nohz_full (although the scheduler
- * may still interrupt the target if rescheduling is needed...)
- */
- switch (usage) {
- case CPUTIME_SYSTEM:
- if (state == VTIME_SYS)
- *val += vtime->stime + vtime_delta(vtime);
- break;
- case CPUTIME_USER:
- if (task_nice(tsk) <= 0)
- *val += kcpustat_user_vtime(vtime);
- break;
- case CPUTIME_NICE:
- if (task_nice(tsk) > 0)
- *val += kcpustat_user_vtime(vtime);
- break;
- case CPUTIME_GUEST:
- if (state == VTIME_GUEST && task_nice(tsk) <= 0)
- *val += vtime->gtime + vtime_delta(vtime);
- break;
- case CPUTIME_GUEST_NICE:
- if (state == VTIME_GUEST && task_nice(tsk) > 0)
- *val += vtime->gtime + vtime_delta(vtime);
- break;
- default:
- break;
- }
- } while (read_seqcount_retry(&vtime->seqcount, seq));
- return 0;
- }
- u64 kcpustat_field(struct kernel_cpustat *kcpustat,
- enum cpu_usage_stat usage, int cpu)
- {
- u64 *cpustat = kcpustat->cpustat;
- u64 val = cpustat[usage];
- struct rq *rq;
- int err;
- if (!vtime_accounting_enabled_cpu(cpu))
- return val;
- rq = cpu_rq(cpu);
- for (;;) {
- struct task_struct *curr;
- rcu_read_lock();
- curr = rcu_dereference(rq->curr);
- if (WARN_ON_ONCE(!curr)) {
- rcu_read_unlock();
- return cpustat[usage];
- }
- err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
- rcu_read_unlock();
- if (!err)
- return val;
- cpu_relax();
- }
- }
- EXPORT_SYMBOL_GPL(kcpustat_field);
- static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
- const struct kernel_cpustat *src,
- struct task_struct *tsk, int cpu)
- {
- struct vtime *vtime = &tsk->vtime;
- unsigned int seq;
- do {
- u64 *cpustat;
- u64 delta;
- int state;
- seq = read_seqcount_begin(&vtime->seqcount);
- state = vtime_state_fetch(vtime, cpu);
- if (state < 0)
- return state;
- *dst = *src;
- cpustat = dst->cpustat;
- /* Task is sleeping, dead or idle, nothing to add */
- if (state < VTIME_SYS)
- continue;
- delta = vtime_delta(vtime);
- /*
- * Task runs either in user (including guest) or kernel space,
- * add pending nohz time to the right place.
- */
- if (state == VTIME_SYS) {
- cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
- } else if (state == VTIME_USER) {
- if (task_nice(tsk) > 0)
- cpustat[CPUTIME_NICE] += vtime->utime + delta;
- else
- cpustat[CPUTIME_USER] += vtime->utime + delta;
- } else {
- WARN_ON_ONCE(state != VTIME_GUEST);
- if (task_nice(tsk) > 0) {
- cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
- cpustat[CPUTIME_NICE] += vtime->gtime + delta;
- } else {
- cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
- cpustat[CPUTIME_USER] += vtime->gtime + delta;
- }
- }
- } while (read_seqcount_retry(&vtime->seqcount, seq));
- return 0;
- }
- void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
- {
- const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
- struct rq *rq;
- int err;
- if (!vtime_accounting_enabled_cpu(cpu)) {
- *dst = *src;
- return;
- }
- rq = cpu_rq(cpu);
- for (;;) {
- struct task_struct *curr;
- rcu_read_lock();
- curr = rcu_dereference(rq->curr);
- if (WARN_ON_ONCE(!curr)) {
- rcu_read_unlock();
- *dst = *src;
- return;
- }
- err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
- rcu_read_unlock();
- if (!err)
- return;
- cpu_relax();
- }
- }
- EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
- #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|