cputime: Use accessors to read task cputime stats
This is in preparation for the full dynticks feature. While remotely reading the cputime of a task running in a full dynticks CPU, we'll need to do some extra-computation. This way we can account the time it spent tickless in userspace since its last cputime snapshot. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
@@ -566,6 +566,7 @@ out:
|
||||
void acct_collect(long exitcode, int group_dead)
|
||||
{
|
||||
struct pacct_struct *pacct = ¤t->signal->pacct;
|
||||
cputime_t utime, stime;
|
||||
unsigned long vsize = 0;
|
||||
|
||||
if (group_dead && current->mm) {
|
||||
@@ -593,8 +594,9 @@ void acct_collect(long exitcode, int group_dead)
|
||||
pacct->ac_flag |= ACORE;
|
||||
if (current->flags & PF_SIGNALED)
|
||||
pacct->ac_flag |= AXSIG;
|
||||
pacct->ac_utime += current->utime;
|
||||
pacct->ac_stime += current->stime;
|
||||
task_cputime(current, &utime, &stime);
|
||||
pacct->ac_utime += utime;
|
||||
pacct->ac_stime += stime;
|
||||
pacct->ac_minflt += current->min_flt;
|
||||
pacct->ac_majflt += current->maj_flt;
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
@@ -224,11 +224,13 @@ void clear_tasks_mm_cpumask(int cpu)
|
||||
static inline void check_for_tasks(int cpu)
|
||||
{
|
||||
struct task_struct *p;
|
||||
cputime_t utime, stime;
|
||||
|
||||
write_lock_irq(&tasklist_lock);
|
||||
for_each_process(p) {
|
||||
task_cputime(p, &utime, &stime);
|
||||
if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
|
||||
(p->utime || p->stime))
|
||||
(utime || stime))
|
||||
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
|
||||
"(state = %ld, flags = %x)\n",
|
||||
p->comm, task_pid_nr(p), cpu,
|
||||
|
@@ -106,6 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
||||
unsigned long long t2, t3;
|
||||
unsigned long flags;
|
||||
struct timespec ts;
|
||||
cputime_t utime, stime, stimescaled, utimescaled;
|
||||
|
||||
/* Though tsk->delays accessed later, early exit avoids
|
||||
* unnecessary returning of other data
|
||||
@@ -114,12 +115,14 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
||||
goto done;
|
||||
|
||||
tmp = (s64)d->cpu_run_real_total;
|
||||
cputime_to_timespec(tsk->utime + tsk->stime, &ts);
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
cputime_to_timespec(utime + stime, &ts);
|
||||
tmp += timespec_to_ns(&ts);
|
||||
d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
|
||||
|
||||
tmp = (s64)d->cpu_scaled_run_real_total;
|
||||
cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
|
||||
task_cputime_scaled(tsk, &utimescaled, &stimescaled);
|
||||
cputime_to_timespec(utimescaled + stimescaled, &ts);
|
||||
tmp += timespec_to_ns(&ts);
|
||||
d->cpu_scaled_run_real_total =
|
||||
(tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
|
||||
|
@@ -85,6 +85,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
bool group_dead = thread_group_leader(tsk);
|
||||
struct sighand_struct *sighand;
|
||||
struct tty_struct *uninitialized_var(tty);
|
||||
cputime_t utime, stime;
|
||||
|
||||
sighand = rcu_dereference_check(tsk->sighand,
|
||||
lockdep_tasklist_lock_is_held());
|
||||
@@ -123,9 +124,10 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
* We won't ever get here for the group leader, since it
|
||||
* will have been the last reference on the signal_struct.
|
||||
*/
|
||||
sig->utime += tsk->utime;
|
||||
sig->stime += tsk->stime;
|
||||
sig->gtime += tsk->gtime;
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
sig->utime += utime;
|
||||
sig->stime += stime;
|
||||
sig->gtime += task_gtime(tsk);
|
||||
sig->min_flt += tsk->min_flt;
|
||||
sig->maj_flt += tsk->maj_flt;
|
||||
sig->nvcsw += tsk->nvcsw;
|
||||
@@ -1092,7 +1094,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
||||
sig = p->signal;
|
||||
psig->cutime += tgutime + sig->cutime;
|
||||
psig->cstime += tgstime + sig->cstime;
|
||||
psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
|
||||
psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
|
||||
psig->cmin_flt +=
|
||||
p->min_flt + sig->min_flt + sig->cmin_flt;
|
||||
psig->cmaj_flt +=
|
||||
|
@@ -155,11 +155,19 @@ static void bump_cpu_timer(struct k_itimer *timer,
|
||||
|
||||
static inline cputime_t prof_ticks(struct task_struct *p)
|
||||
{
|
||||
return p->utime + p->stime;
|
||||
cputime_t utime, stime;
|
||||
|
||||
task_cputime(p, &utime, &stime);
|
||||
|
||||
return utime + stime;
|
||||
}
|
||||
static inline cputime_t virt_ticks(struct task_struct *p)
|
||||
{
|
||||
return p->utime;
|
||||
cputime_t utime;
|
||||
|
||||
task_cputime(p, &utime, NULL);
|
||||
|
||||
return utime;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -471,18 +479,23 @@ static void cleanup_timers(struct list_head *head,
|
||||
*/
|
||||
void posix_cpu_timers_exit(struct task_struct *tsk)
|
||||
{
|
||||
cputime_t utime, stime;
|
||||
|
||||
add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
|
||||
sizeof(unsigned long long));
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
cleanup_timers(tsk->cpu_timers,
|
||||
tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
|
||||
utime, stime, tsk->se.sum_exec_runtime);
|
||||
|
||||
}
|
||||
void posix_cpu_timers_exit_group(struct task_struct *tsk)
|
||||
{
|
||||
struct signal_struct *const sig = tsk->signal;
|
||||
cputime_t utime, stime;
|
||||
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
cleanup_timers(tsk->signal->cpu_timers,
|
||||
tsk->utime + sig->utime, tsk->stime + sig->stime,
|
||||
utime + sig->utime, stime + sig->stime,
|
||||
tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
|
||||
}
|
||||
|
||||
@@ -1226,11 +1239,14 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
|
||||
static inline int fastpath_timer_check(struct task_struct *tsk)
|
||||
{
|
||||
struct signal_struct *sig;
|
||||
cputime_t utime, stime;
|
||||
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
|
||||
if (!task_cputime_zero(&tsk->cputime_expires)) {
|
||||
struct task_cputime task_sample = {
|
||||
.utime = tsk->utime,
|
||||
.stime = tsk->stime,
|
||||
.utime = utime,
|
||||
.stime = stime,
|
||||
.sum_exec_runtime = tsk->se.sum_exec_runtime
|
||||
};
|
||||
|
||||
|
@@ -164,7 +164,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
|
||||
task_group_account_field(p, index, (__force u64) cputime);
|
||||
|
||||
/* Account for user time used */
|
||||
acct_update_integrals(p);
|
||||
acct_account_cputime(p);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -214,7 +214,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
|
||||
task_group_account_field(p, index, (__force u64) cputime);
|
||||
|
||||
/* Account for system time used */
|
||||
acct_update_integrals(p);
|
||||
acct_account_cputime(p);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -296,6 +296,7 @@ static __always_inline bool steal_account_process_tick(void)
|
||||
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
{
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
cputime_t utime, stime;
|
||||
struct task_struct *t;
|
||||
|
||||
times->utime = sig->utime;
|
||||
@@ -309,8 +310,9 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
|
||||
t = tsk;
|
||||
do {
|
||||
times->utime += t->utime;
|
||||
times->stime += t->stime;
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
times->utime += utime;
|
||||
times->stime += stime;
|
||||
times->sum_exec_runtime += task_sched_runtime(t);
|
||||
} while_each_thread(tsk, t);
|
||||
out:
|
||||
@@ -588,11 +590,10 @@ static void cputime_adjust(struct task_cputime *curr,
|
||||
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
struct task_cputime cputime = {
|
||||
.utime = p->utime,
|
||||
.stime = p->stime,
|
||||
.sum_exec_runtime = p->se.sum_exec_runtime,
|
||||
};
|
||||
|
||||
task_cputime(p, &cputime.utime, &cputime.stime);
|
||||
cputime_adjust(&cputime, &p->prev_cputime, ut, st);
|
||||
}
|
||||
|
||||
|
@@ -1638,6 +1638,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
|
||||
unsigned long flags;
|
||||
struct sighand_struct *psig;
|
||||
bool autoreap = false;
|
||||
cputime_t utime, stime;
|
||||
|
||||
BUG_ON(sig == -1);
|
||||
|
||||
@@ -1675,8 +1676,9 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
|
||||
task_uid(tsk));
|
||||
rcu_read_unlock();
|
||||
|
||||
info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
|
||||
info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
|
||||
info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
|
||||
|
||||
info.si_status = tsk->exit_code & 0x7f;
|
||||
if (tsk->exit_code & 0x80)
|
||||
@@ -1740,6 +1742,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
|
||||
unsigned long flags;
|
||||
struct task_struct *parent;
|
||||
struct sighand_struct *sighand;
|
||||
cputime_t utime, stime;
|
||||
|
||||
if (for_ptracer) {
|
||||
parent = tsk->parent;
|
||||
@@ -1758,8 +1761,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
|
||||
info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
|
||||
rcu_read_unlock();
|
||||
|
||||
info.si_utime = cputime_to_clock_t(tsk->utime);
|
||||
info.si_stime = cputime_to_clock_t(tsk->stime);
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
info.si_utime = cputime_to_clock_t(utime);
|
||||
info.si_stime = cputime_to_clock_t(stime);
|
||||
|
||||
info.si_code = why;
|
||||
switch (why) {
|
||||
|
@@ -32,6 +32,7 @@ void bacct_add_tsk(struct user_namespace *user_ns,
|
||||
{
|
||||
const struct cred *tcred;
|
||||
struct timespec uptime, ts;
|
||||
cputime_t utime, stime, utimescaled, stimescaled;
|
||||
u64 ac_etime;
|
||||
|
||||
BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
|
||||
@@ -65,10 +66,15 @@ void bacct_add_tsk(struct user_namespace *user_ns,
|
||||
stats->ac_ppid = pid_alive(tsk) ?
|
||||
task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0;
|
||||
rcu_read_unlock();
|
||||
stats->ac_utime = cputime_to_usecs(tsk->utime);
|
||||
stats->ac_stime = cputime_to_usecs(tsk->stime);
|
||||
stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled);
|
||||
stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled);
|
||||
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
stats->ac_utime = cputime_to_usecs(utime);
|
||||
stats->ac_stime = cputime_to_usecs(stime);
|
||||
|
||||
task_cputime_scaled(tsk, &utimescaled, &stimescaled);
|
||||
stats->ac_utimescaled = cputime_to_usecs(utimescaled);
|
||||
stats->ac_stimescaled = cputime_to_usecs(stimescaled);
|
||||
|
||||
stats->ac_minflt = tsk->min_flt;
|
||||
stats->ac_majflt = tsk->maj_flt;
|
||||
|
||||
@@ -115,11 +121,8 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
|
||||
#undef KB
|
||||
#undef MB
|
||||
|
||||
/**
|
||||
* acct_update_integrals - update mm integral fields in task_struct
|
||||
* @tsk: task_struct for accounting
|
||||
*/
|
||||
void acct_update_integrals(struct task_struct *tsk)
|
||||
static void __acct_update_integrals(struct task_struct *tsk,
|
||||
cputime_t utime, cputime_t stime)
|
||||
{
|
||||
if (likely(tsk->mm)) {
|
||||
cputime_t time, dtime;
|
||||
@@ -128,7 +131,7 @@ void acct_update_integrals(struct task_struct *tsk)
|
||||
u64 delta;
|
||||
|
||||
local_irq_save(flags);
|
||||
time = tsk->stime + tsk->utime;
|
||||
time = stime + utime;
|
||||
dtime = time - tsk->acct_timexpd;
|
||||
jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
|
||||
delta = value.tv_sec;
|
||||
@@ -144,6 +147,27 @@ void acct_update_integrals(struct task_struct *tsk)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* acct_update_integrals - update mm integral fields in task_struct
|
||||
* @tsk: task_struct for accounting
|
||||
*/
|
||||
void acct_update_integrals(struct task_struct *tsk)
|
||||
{
|
||||
cputime_t utime, stime;
|
||||
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
__acct_update_integrals(tsk, utime, stime);
|
||||
}
|
||||
|
||||
/**
|
||||
* acct_account_cputime - update mm integral after cputime update
|
||||
* @tsk: task_struct for accounting
|
||||
*/
|
||||
void acct_account_cputime(struct task_struct *tsk)
|
||||
{
|
||||
__acct_update_integrals(tsk, tsk->utime, tsk->stime);
|
||||
}
|
||||
|
||||
/**
|
||||
* acct_clear_integrals - clear the mm integral fields in task_struct
|
||||
* @tsk: task_struct whose accounting fields are cleared
|
||||
|
Reference in New Issue
Block a user