sched/cputime: Convert task/group cputime to nsecs

Now that most cputime readers use the transition API which return the
task cputime in old style cputime_t, we can safely store the cputime in
nsecs. This will eventually make cputime statistics less opaque and more
granular. Back and forth convertions between cputime_t and nsecs in order
to deal with cputime_t random granularity won't be needed anymore.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Link: http://lkml.kernel.org/r/1485832191-26889-8-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Frederic Weisbecker
2017-01-31 04:09:23 +01:00
committed by Ingo Molnar
parent a1cecf2ba7
commit 5613fda9a5
12 changed files with 89 additions and 69 deletions

View File

@@ -585,8 +585,8 @@ struct cpu_itimer {
*/
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
cputime_t utime;
cputime_t stime;
u64 utime;
u64 stime;
raw_spinlock_t lock;
#endif
};
@@ -601,8 +601,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @utime: time spent in user mode, in nanoseconds
* @stime: time spent in kernel mode, in nanoseconds
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are tracked for
@@ -610,8 +610,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
* these counts together and treat all three of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
u64 utime;
u64 stime;
unsigned long long sum_exec_runtime;
};
@@ -780,7 +780,7 @@ struct signal_struct {
* in __exit_signal, except for the group leader.
*/
seqlock_t stats_lock;
cputime_t utime, stime, cutime, cstime;
u64 utime, stime, cutime, cstime;
u64 gtime;
u64 cgtime;
struct prev_cputime prev_cputime;
@@ -1661,9 +1661,9 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime;
u64 utime, stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
cputime_t utimescaled, stimescaled;
u64 utimescaled, stimescaled;
#endif
u64 gtime;
struct prev_cputime prev_cputime;
@@ -2260,11 +2260,11 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime);
u64 *utime, u64 *stime);
extern u64 task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime)
u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
@@ -2278,16 +2278,16 @@ static inline u64 task_gtime(struct task_struct *t)
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
static inline void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
u64 *utimescaled,
u64 *stimescaled)
{
*utimescaled = t->utimescaled;
*stimescaled = t->stimescaled;
}
#else
static inline void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
u64 *utimescaled,
u64 *stimescaled)
{
task_cputime(t, utimescaled, stimescaled);
}
@@ -2296,18 +2296,26 @@ static inline void task_cputime_scaled(struct task_struct *t,
static inline void task_cputime_t(struct task_struct *t,
cputime_t *utime, cputime_t *stime)
{
task_cputime(t, utime, stime);
u64 ut, st;
task_cputime(t, &ut, &st);
*utime = nsecs_to_cputime(ut);
*stime = nsecs_to_cputime(st);
}
static inline void task_cputime_t_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
{
task_cputime_scaled(t, utimescaled, stimescaled);
u64 ut, st;
task_cputime_scaled(t, &ut, &st);
*utimescaled = nsecs_to_cputime(ut);
*stimescaled = nsecs_to_cputime(st);
}
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
/*
* Per process flags
@@ -3522,9 +3530,14 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times);
static inline void thread_group_cputime_t(struct task_struct *tsk,
struct task_cputime_t *times)
struct task_cputime_t *cputime)
{
thread_group_cputime(tsk, (struct task_cputime *)times);
struct task_cputime times;
thread_group_cputime(tsk, &times);
cputime->utime = nsecs_to_cputime(times.utime);
cputime->stime = nsecs_to_cputime(times.stime);
cputime->sum_exec_runtime = times.sum_exec_runtime;
}
/*