sched/accounting: Change cpustat fields to an array

This patch changes fields in cpustat from a structure, to an
u64 array. Math gets easier, and the code is more flexible.

Signed-off-by: Glauber Costa <glommer@parallels.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Tuner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1322498719-2255-2-git-send-email-glommer@parallels.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Glauber Costa
2011-11-28 14:45:17 -02:00
committed by Ingo Molnar
parent 786d6dc7ae
commit 3292beb340
9 changed files with 142 additions and 141 deletions

View File

@@ -896,14 +896,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
static int irqtime_account_hi_update(void)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
u64 *cpustat = kcpustat_this_cpu->cpustat;
unsigned long flags;
u64 latest_ns;
int ret = 0;
local_irq_save(flags);
latest_ns = this_cpu_read(cpu_hardirq_time);
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_IRQ]))
ret = 1;
local_irq_restore(flags);
return ret;
@@ -911,14 +911,14 @@ static int irqtime_account_hi_update(void)
static int irqtime_account_si_update(void)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
u64 *cpustat = kcpustat_this_cpu->cpustat;
unsigned long flags;
u64 latest_ns;
int ret = 0;
local_irq_save(flags);
latest_ns = this_cpu_read(cpu_softirq_time);
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_SOFTIRQ]))
ret = 1;
local_irq_restore(flags);
return ret;
@@ -2500,8 +2500,10 @@ unlock:
#endif
DEFINE_PER_CPU(struct kernel_stat, kstat);
DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
EXPORT_PER_CPU_SYMBOL(kstat);
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
/*
* Return any ns on the sched_clock that have not yet been accounted in
@@ -2563,8 +2565,9 @@ unsigned long long task_sched_runtime(struct task_struct *p)
void account_user_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t tmp;
u64 *cpustat = kcpustat_this_cpu->cpustat;
u64 tmp;
int index;
/* Add user time to process. */
p->utime = cputime_add(p->utime, cputime);
@@ -2573,10 +2576,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
if (TASK_NICE(p) > 0)
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
cpustat[index] += tmp;
cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
/* Account for user time used */
@@ -2592,8 +2594,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
static void account_guest_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled)
{
cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
u64 tmp;
u64 *cpustat = kcpustat_this_cpu->cpustat;
tmp = cputime_to_cputime64(cputime);
@@ -2605,11 +2607,11 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
/* Add guest time to cpustat. */
if (TASK_NICE(p) > 0) {
cpustat->nice = cputime64_add(cpustat->nice, tmp);
cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
cpustat[CPUTIME_NICE] += tmp;
cpustat[CPUTIME_GUEST_NICE] += tmp;
} else {
cpustat->user = cputime64_add(cpustat->user, tmp);
cpustat->guest = cputime64_add(cpustat->guest, tmp);
cpustat[CPUTIME_USER] += tmp;
cpustat[CPUTIME_GUEST] += tmp;
}
}
@@ -2622,9 +2624,10 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
*/
static inline
void __account_system_time(struct task_struct *p, cputime_t cputime,
cputime_t cputime_scaled, cputime64_t *target_cputime64)
cputime_t cputime_scaled, int index)
{
cputime64_t tmp = cputime_to_cputime64(cputime);
u64 tmp = cputime_to_cputime64(cputime);
u64 *cpustat = kcpustat_this_cpu->cpustat;
/* Add system time to process. */
p->stime = cputime_add(p->stime, cputime);
@@ -2632,7 +2635,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
*target_cputime64 = cputime64_add(*target_cputime64, tmp);
cpustat[index] += tmp;
cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
/* Account for system time used */
@@ -2649,8 +2652,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
void account_system_time(struct task_struct *p, int hardirq_offset,
cputime_t cputime, cputime_t cputime_scaled)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t *target_cputime64;
int index;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
account_guest_time(p, cputime, cputime_scaled);
@@ -2658,13 +2660,13 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
}
if (hardirq_count() - hardirq_offset)
target_cputime64 = &cpustat->irq;
index = CPUTIME_IRQ;
else if (in_serving_softirq())
target_cputime64 = &cpustat->softirq;
index = CPUTIME_SOFTIRQ;
else
target_cputime64 = &cpustat->system;
index = CPUTIME_SYSTEM;
__account_system_time(p, cputime, cputime_scaled, target_cputime64);
__account_system_time(p, cputime, cputime_scaled, index);
}
/*
@@ -2673,10 +2675,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
*/
void account_steal_time(cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
u64 *cpustat = kcpustat_this_cpu->cpustat;
u64 cputime64 = cputime_to_cputime64(cputime);
cpustat->steal = cputime64_add(cpustat->steal, cputime64);
cpustat[CPUTIME_STEAL] += cputime64;
}
/*
@@ -2685,14 +2687,14 @@ void account_steal_time(cputime_t cputime)
*/
void account_idle_time(cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
u64 *cpustat = kcpustat_this_cpu->cpustat;
u64 cputime64 = cputime_to_cputime64(cputime);
struct rq *rq = this_rq();
if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
cpustat[CPUTIME_IOWAIT] += cputime64;
else
cpustat->idle = cputime64_add(cpustat->idle, cputime64);
cpustat[CPUTIME_IDLE] += cputime64;
}
static __always_inline bool steal_account_process_tick(void)
@@ -2742,16 +2744,16 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq)
{
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
u64 tmp = cputime_to_cputime64(cputime_one_jiffy);
u64 *cpustat = kcpustat_this_cpu->cpustat;
if (steal_account_process_tick())
return;
if (irqtime_account_hi_update()) {
cpustat->irq = cputime64_add(cpustat->irq, tmp);
cpustat[CPUTIME_IRQ] += tmp;
} else if (irqtime_account_si_update()) {
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
cpustat[CPUTIME_SOFTIRQ] += tmp;
} else if (this_cpu_ksoftirqd() == p) {
/*
* ksoftirqd time do not get accounted in cpu_softirq_time.
@@ -2759,7 +2761,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
* Also, p->stime needs to be updated for ksoftirqd.
*/
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
&cpustat->softirq);
CPUTIME_SOFTIRQ);
} else if (user_tick) {
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
} else if (p == rq->idle) {
@@ -2768,7 +2770,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
} else {
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
&cpustat->system);
CPUTIME_SYSTEM);
}
}