Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer core changes from Ingo Molnar: "Continued cleanups of the core time and NTP code, plus more nohz work preparing for tick-less userspace execution." * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: time: Rework timekeeping functions to take timekeeper ptr as argument time: Move xtime_nsec adjustment underflow handling timekeeping_adjust time: Move arch_gettimeoffset() usage into timekeeping_get_ns() time: Refactor accumulation of nsecs to secs time: Condense timekeeper.xtime into xtime_sec time: Explicitly use u32 instead of int for shift values time: Whitespace cleanups per Ingo%27s requests nohz: Move next idle expiry time record into idle logic area nohz: Move ts->idle_calls incrementation into strict idle logic nohz: Rename ts->idle_tick to ts->last_tick nohz: Make nohz API agnostic against idle ticks cputime accounting nohz: Separate idle sleeping time accounting from nohz logic timers: Improve get_next_timer_interrupt() timers: Add accounting of non deferrable timers timers: Consolidate base->next_timer update timers: Create detach_if_pending() and use it
This commit is contained in:
@@ -271,50 +271,15 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
|
||||
|
||||
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
|
||||
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
ktime_t now, int cpu)
|
||||
{
|
||||
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
|
||||
ktime_t last_update, expires, ret = { .tv64 = 0 };
|
||||
unsigned long rcu_delta_jiffies;
|
||||
ktime_t last_update, expires, now;
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
u64 time_delta;
|
||||
int cpu;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
|
||||
now = tick_nohz_start_idle(cpu, ts);
|
||||
|
||||
/*
|
||||
* If this cpu is offline and it is the one which updates
|
||||
* jiffies, then give up the assignment and let it be taken by
|
||||
* the cpu which runs the tick timer next. If we don't drop
|
||||
* this here the jiffies might be stale and do_timer() never
|
||||
* invoked.
|
||||
*/
|
||||
if (unlikely(!cpu_online(cpu))) {
|
||||
if (cpu == tick_do_timer_cpu)
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
}
|
||||
|
||||
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
|
||||
return;
|
||||
|
||||
if (need_resched())
|
||||
return;
|
||||
|
||||
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
|
||||
static int ratelimit;
|
||||
|
||||
if (ratelimit < 10) {
|
||||
printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
||||
(unsigned int) local_softirq_pending());
|
||||
ratelimit++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ts->idle_calls++;
|
||||
/* Read jiffies and the time when jiffies were updated last */
|
||||
do {
|
||||
seq = read_seqbegin(&xtime_lock);
|
||||
@@ -397,6 +362,8 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
|
||||
if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
|
||||
goto out;
|
||||
|
||||
ret = expires;
|
||||
|
||||
/*
|
||||
* nohz_stop_sched_tick can be called several times before
|
||||
* the nohz_restart_sched_tick is called. This happens when
|
||||
@@ -408,16 +375,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
|
||||
select_nohz_load_balancer(1);
|
||||
calc_load_enter_idle();
|
||||
|
||||
ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
|
||||
ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
|
||||
ts->tick_stopped = 1;
|
||||
ts->idle_jiffies = last_jiffies;
|
||||
}
|
||||
|
||||
ts->idle_sleeps++;
|
||||
|
||||
/* Mark expires */
|
||||
ts->idle_expires = expires;
|
||||
|
||||
/*
|
||||
* If the expiration time == KTIME_MAX, then
|
||||
* in this case we simply stop the tick timer.
|
||||
@@ -448,6 +409,65 @@ out:
|
||||
ts->next_jiffies = next_jiffies;
|
||||
ts->last_jiffies = last_jiffies;
|
||||
ts->sleep_length = ktime_sub(dev->next_event, now);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
|
||||
{
|
||||
/*
|
||||
* If this cpu is offline and it is the one which updates
|
||||
* jiffies, then give up the assignment and let it be taken by
|
||||
* the cpu which runs the tick timer next. If we don't drop
|
||||
* this here the jiffies might be stale and do_timer() never
|
||||
* invoked.
|
||||
*/
|
||||
if (unlikely(!cpu_online(cpu))) {
|
||||
if (cpu == tick_do_timer_cpu)
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
}
|
||||
|
||||
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
|
||||
return false;
|
||||
|
||||
if (need_resched())
|
||||
return false;
|
||||
|
||||
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
|
||||
static int ratelimit;
|
||||
|
||||
if (ratelimit < 10) {
|
||||
printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
||||
(unsigned int) local_softirq_pending());
|
||||
ratelimit++;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __tick_nohz_idle_enter(struct tick_sched *ts)
|
||||
{
|
||||
ktime_t now, expires;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
now = tick_nohz_start_idle(cpu, ts);
|
||||
|
||||
if (can_stop_idle_tick(cpu, ts)) {
|
||||
int was_stopped = ts->tick_stopped;
|
||||
|
||||
ts->idle_calls++;
|
||||
|
||||
expires = tick_nohz_stop_sched_tick(ts, now, cpu);
|
||||
if (expires.tv64 > 0LL) {
|
||||
ts->idle_sleeps++;
|
||||
ts->idle_expires = expires;
|
||||
}
|
||||
|
||||
if (!was_stopped && ts->tick_stopped)
|
||||
ts->idle_jiffies = ts->last_jiffies;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -485,7 +505,7 @@ void tick_nohz_idle_enter(void)
|
||||
* update of the idle time accounting in tick_nohz_start_idle().
|
||||
*/
|
||||
ts->inidle = 1;
|
||||
tick_nohz_stop_sched_tick(ts);
|
||||
__tick_nohz_idle_enter(ts);
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
@@ -505,7 +525,7 @@ void tick_nohz_irq_exit(void)
|
||||
if (!ts->inidle)
|
||||
return;
|
||||
|
||||
tick_nohz_stop_sched_tick(ts);
|
||||
__tick_nohz_idle_enter(ts);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -523,7 +543,7 @@ ktime_t tick_nohz_get_sleep_length(void)
|
||||
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
hrtimer_cancel(&ts->sched_timer);
|
||||
hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
|
||||
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
|
||||
|
||||
while (1) {
|
||||
/* Forward the time to expire in the future */
|
||||
@@ -546,6 +566,41 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||||
}
|
||||
}
|
||||
|
||||
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
/* Update jiffies first */
|
||||
select_nohz_load_balancer(0);
|
||||
tick_do_update_jiffies64(now);
|
||||
update_cpu_load_nohz();
|
||||
|
||||
touch_softlockup_watchdog();
|
||||
/*
|
||||
* Cancel the scheduled timer and restore the tick
|
||||
*/
|
||||
ts->tick_stopped = 0;
|
||||
ts->idle_exittime = now;
|
||||
|
||||
tick_nohz_restart(ts, now);
|
||||
}
|
||||
|
||||
static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
|
||||
{
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
unsigned long ticks;
|
||||
/*
|
||||
* We stopped the tick in idle. Update process times would miss the
|
||||
* time we slept as update_process_times does only a 1 tick
|
||||
* accounting. Enforce that this is accounted to idle !
|
||||
*/
|
||||
ticks = jiffies - ts->idle_jiffies;
|
||||
/*
|
||||
* We might be one off. Do not randomly account a huge number of ticks!
|
||||
*/
|
||||
if (ticks && ticks < LONG_MAX)
|
||||
account_idle_ticks(ticks);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_nohz_idle_exit - restart the idle tick from the idle task
|
||||
*
|
||||
@@ -557,9 +612,6 @@ void tick_nohz_idle_exit(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
unsigned long ticks;
|
||||
#endif
|
||||
ktime_t now;
|
||||
|
||||
local_irq_disable();
|
||||
@@ -574,40 +626,11 @@ void tick_nohz_idle_exit(void)
|
||||
if (ts->idle_active)
|
||||
tick_nohz_stop_idle(cpu, now);
|
||||
|
||||
if (!ts->tick_stopped) {
|
||||
local_irq_enable();
|
||||
return;
|
||||
if (ts->tick_stopped) {
|
||||
tick_nohz_restart_sched_tick(ts, now);
|
||||
tick_nohz_account_idle_ticks(ts);
|
||||
}
|
||||
|
||||
/* Update jiffies first */
|
||||
select_nohz_load_balancer(0);
|
||||
tick_do_update_jiffies64(now);
|
||||
update_cpu_load_nohz();
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
/*
|
||||
* We stopped the tick in idle. Update process times would miss the
|
||||
* time we slept as update_process_times does only a 1 tick
|
||||
* accounting. Enforce that this is accounted to idle !
|
||||
*/
|
||||
ticks = jiffies - ts->idle_jiffies;
|
||||
/*
|
||||
* We might be one off. Do not randomly account a huge number of ticks!
|
||||
*/
|
||||
if (ticks && ticks < LONG_MAX)
|
||||
account_idle_ticks(ticks);
|
||||
#endif
|
||||
|
||||
calc_load_exit_idle();
|
||||
touch_softlockup_watchdog();
|
||||
/*
|
||||
* Cancel the scheduled timer and restore the tick
|
||||
*/
|
||||
ts->tick_stopped = 0;
|
||||
ts->idle_exittime = now;
|
||||
|
||||
tick_nohz_restart(ts, now);
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
@@ -811,7 +834,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
||||
*/
|
||||
if (ts->tick_stopped) {
|
||||
touch_softlockup_watchdog();
|
||||
ts->idle_jiffies++;
|
||||
if (idle_cpu(cpu))
|
||||
ts->idle_jiffies++;
|
||||
}
|
||||
update_process_times(user_mode(regs));
|
||||
profile_tick(CPU_PROFILING);
|
||||
|
Reference in New Issue
Block a user