perfcounters: fix task clock counter
Impact: fix per task clock counter precision Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -638,7 +638,7 @@ static inline int cpu_of(struct rq *rq)
|
||||
#define task_rq(p) cpu_rq(task_cpu(p))
|
||||
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
|
||||
|
||||
static inline void update_rq_clock(struct rq *rq)
|
||||
inline void update_rq_clock(struct rq *rq)
|
||||
{
|
||||
rq->clock = sched_clock_cpu(cpu_of(rq));
|
||||
}
|
||||
@@ -969,6 +969,26 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
||||
}
|
||||
}
|
||||
|
||||
void curr_rq_lock_irq_save(unsigned long *flags)
|
||||
__acquires(rq->lock)
|
||||
{
|
||||
struct rq *rq;
|
||||
|
||||
local_irq_save(*flags);
|
||||
rq = cpu_rq(smp_processor_id());
|
||||
spin_lock(&rq->lock);
|
||||
}
|
||||
|
||||
void curr_rq_unlock_irq_restore(unsigned long *flags)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
struct rq *rq;
|
||||
|
||||
rq = cpu_rq(smp_processor_id());
|
||||
spin_unlock(&rq->lock);
|
||||
local_irq_restore(*flags);
|
||||
}
|
||||
|
||||
void task_rq_unlock_wait(struct task_struct *p)
|
||||
{
|
||||
struct rq *rq = task_rq(p);
|
||||
@@ -2558,7 +2578,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
{
|
||||
fire_sched_out_preempt_notifiers(prev, next);
|
||||
perf_counter_task_sched_out(prev, cpu_of(rq));
|
||||
prepare_lock_switch(rq, next);
|
||||
prepare_arch_switch(next);
|
||||
}
|
||||
@@ -4089,6 +4108,29 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
|
||||
|
||||
EXPORT_PER_CPU_SYMBOL(kstat);
|
||||
|
||||
/*
|
||||
* Return any ns on the sched_clock that have not yet been banked in
|
||||
* @p in case that task is currently running.
|
||||
*/
|
||||
unsigned long long __task_delta_exec(struct task_struct *p, int update)
|
||||
{
|
||||
s64 delta_exec;
|
||||
struct rq *rq;
|
||||
|
||||
rq = task_rq(p);
|
||||
WARN_ON_ONCE(!runqueue_is_locked());
|
||||
WARN_ON_ONCE(!task_current(rq, p));
|
||||
|
||||
if (update)
|
||||
update_rq_clock(rq);
|
||||
|
||||
delta_exec = rq->clock - p->se.exec_start;
|
||||
|
||||
WARN_ON_ONCE(delta_exec < 0);
|
||||
|
||||
return delta_exec;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return any ns on the sched_clock that have not yet been banked in
|
||||
* @p in case that task is currently running.
|
||||
@@ -4316,13 +4358,13 @@ void scheduler_tick(void)
|
||||
update_rq_clock(rq);
|
||||
update_cpu_load(rq);
|
||||
curr->sched_class->task_tick(rq, curr, 0);
|
||||
perf_counter_task_tick(curr, cpu);
|
||||
spin_unlock(&rq->lock);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
rq->idle_at_tick = idle_cpu(cpu);
|
||||
trigger_load_balance(rq, cpu);
|
||||
#endif
|
||||
perf_counter_task_tick(curr, cpu);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
|
||||
@@ -4512,6 +4554,7 @@ need_resched_nonpreemptible:
|
||||
|
||||
if (likely(prev != next)) {
|
||||
sched_info_switch(prev, next);
|
||||
perf_counter_task_sched_out(prev, cpu);
|
||||
|
||||
rq->nr_switches++;
|
||||
rq->curr = next;
|
||||
|
Reference in New Issue
Block a user