Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: timers, init: Limit the number of per cpu calibration bootup messages posix-cpu-timers: optimize and document timer_create callback clockevents: Add missing include to pacify sparse x86: vmiclock: Fix printk format x86: Fix printk format due to variable type change sparc: fix printk for change of variable type clocksource/events: Fix fallout of generic code changes nohz: Allow 32-bit machines to sleep for more than 2.15 seconds nohz: Track last do_timer() cpu nohz: Prevent clocksource wrapping during idle nohz: Type cast printk argument mips: Use generic mult/shift factor calculation for clocks clocksource: Provide a generic mult/shift factor calculation clockevents: Use u32 for mult and shift factors nohz: Introduce arch_needs_cpu nohz: Reuse ktime in sub-functions of tick_check_idle. time: Remove xtime_cache time: Implement logarithmic time accumulation
This commit is contained in:
@@ -134,18 +134,13 @@ __setup("nohz=", setup_tick_nohz);
|
||||
* value. We do this unconditionally on any cpu, as we don't know whether the
|
||||
* cpu, which has the update task assigned is in a long sleep.
|
||||
*/
|
||||
static void tick_nohz_update_jiffies(void)
|
||||
static void tick_nohz_update_jiffies(ktime_t now)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
unsigned long flags;
|
||||
ktime_t now;
|
||||
|
||||
if (!ts->tick_stopped)
|
||||
return;
|
||||
|
||||
cpumask_clear_cpu(cpu, nohz_cpu_mask);
|
||||
now = ktime_get();
|
||||
ts->idle_waketime = now;
|
||||
|
||||
local_irq_save(flags);
|
||||
@@ -155,20 +150,17 @@ static void tick_nohz_update_jiffies(void)
|
||||
touch_softlockup_watchdog();
|
||||
}
|
||||
|
||||
static void tick_nohz_stop_idle(int cpu)
|
||||
static void tick_nohz_stop_idle(int cpu, ktime_t now)
|
||||
{
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
ktime_t delta;
|
||||
|
||||
if (ts->idle_active) {
|
||||
ktime_t now, delta;
|
||||
now = ktime_get();
|
||||
delta = ktime_sub(now, ts->idle_entrytime);
|
||||
ts->idle_lastupdate = now;
|
||||
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
|
||||
ts->idle_active = 0;
|
||||
delta = ktime_sub(now, ts->idle_entrytime);
|
||||
ts->idle_lastupdate = now;
|
||||
ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
|
||||
ts->idle_active = 0;
|
||||
|
||||
sched_clock_idle_wakeup_event(0);
|
||||
}
|
||||
sched_clock_idle_wakeup_event(0);
|
||||
}
|
||||
|
||||
static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
|
||||
@@ -216,6 +208,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
struct tick_sched *ts;
|
||||
ktime_t last_update, expires, now;
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
u64 time_delta;
|
||||
int cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
@@ -263,7 +256,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
|
||||
if (ratelimit < 10) {
|
||||
printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
|
||||
local_softirq_pending());
|
||||
(unsigned int) local_softirq_pending());
|
||||
ratelimit++;
|
||||
}
|
||||
goto end;
|
||||
@@ -275,14 +268,18 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
seq = read_seqbegin(&xtime_lock);
|
||||
last_update = last_jiffies_update;
|
||||
last_jiffies = jiffies;
|
||||
time_delta = timekeeping_max_deferment();
|
||||
} while (read_seqretry(&xtime_lock, seq));
|
||||
|
||||
/* Get the next timer wheel timer */
|
||||
next_jiffies = get_next_timer_interrupt(last_jiffies);
|
||||
delta_jiffies = next_jiffies - last_jiffies;
|
||||
|
||||
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
|
||||
if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
|
||||
arch_needs_cpu(cpu)) {
|
||||
next_jiffies = last_jiffies + 1;
|
||||
delta_jiffies = 1;
|
||||
} else {
|
||||
/* Get the next timer wheel timer */
|
||||
next_jiffies = get_next_timer_interrupt(last_jiffies);
|
||||
delta_jiffies = next_jiffies - last_jiffies;
|
||||
}
|
||||
/*
|
||||
* Do not stop the tick, if we are only one off
|
||||
* or if the cpu is required for rcu
|
||||
@@ -293,23 +290,52 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
/* Schedule the tick, if we are at least one jiffie off */
|
||||
if ((long)delta_jiffies >= 1) {
|
||||
|
||||
/*
|
||||
* calculate the expiry time for the next timer wheel
|
||||
* timer
|
||||
*/
|
||||
expires = ktime_add_ns(last_update, tick_period.tv64 *
|
||||
delta_jiffies);
|
||||
|
||||
/*
|
||||
* If this cpu is the one which updates jiffies, then
|
||||
* give up the assignment and let it be taken by the
|
||||
* cpu which runs the tick timer next, which might be
|
||||
* this cpu as well. If we don't drop this here the
|
||||
* jiffies might be stale and do_timer() never
|
||||
* invoked.
|
||||
* invoked. Keep track of the fact that it was the one
|
||||
* which had the do_timer() duty last. If this cpu is
|
||||
* the one which had the do_timer() duty last, we
|
||||
* limit the sleep time to the timekeeping
|
||||
* max_deferement value which we retrieved
|
||||
* above. Otherwise we can sleep as long as we want.
|
||||
*/
|
||||
if (cpu == tick_do_timer_cpu)
|
||||
if (cpu == tick_do_timer_cpu) {
|
||||
tick_do_timer_cpu = TICK_DO_TIMER_NONE;
|
||||
ts->do_timer_last = 1;
|
||||
} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
|
||||
time_delta = KTIME_MAX;
|
||||
ts->do_timer_last = 0;
|
||||
} else if (!ts->do_timer_last) {
|
||||
time_delta = KTIME_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* calculate the expiry time for the next timer wheel
|
||||
* timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
|
||||
* that there is no timer pending or at least extremely
|
||||
* far into the future (12 days for HZ=1000). In this
|
||||
* case we set the expiry to the end of time.
|
||||
*/
|
||||
if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
|
||||
/*
|
||||
* Calculate the time delta for the next timer event.
|
||||
* If the time delta exceeds the maximum time delta
|
||||
* permitted by the current clocksource then adjust
|
||||
* the time delta accordingly to ensure the
|
||||
* clocksource does not wrap.
|
||||
*/
|
||||
time_delta = min_t(u64, time_delta,
|
||||
tick_period.tv64 * delta_jiffies);
|
||||
}
|
||||
|
||||
if (time_delta < KTIME_MAX)
|
||||
expires = ktime_add_ns(last_update, time_delta);
|
||||
else
|
||||
expires.tv64 = KTIME_MAX;
|
||||
|
||||
if (delta_jiffies > 1)
|
||||
cpumask_set_cpu(cpu, nohz_cpu_mask);
|
||||
@@ -342,22 +368,19 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
|
||||
ts->idle_sleeps++;
|
||||
|
||||
/* Mark expires */
|
||||
ts->idle_expires = expires;
|
||||
|
||||
/*
|
||||
* delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that
|
||||
* there is no timer pending or at least extremly far
|
||||
* into the future (12 days for HZ=1000). In this case
|
||||
* we simply stop the tick timer:
|
||||
* If the expiration time == KTIME_MAX, then
|
||||
* in this case we simply stop the tick timer.
|
||||
*/
|
||||
if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) {
|
||||
ts->idle_expires.tv64 = KTIME_MAX;
|
||||
if (unlikely(expires.tv64 == KTIME_MAX)) {
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
||||
hrtimer_cancel(&ts->sched_timer);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Mark expiries */
|
||||
ts->idle_expires = expires;
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
|
||||
hrtimer_start(&ts->sched_timer, expires,
|
||||
HRTIMER_MODE_ABS_PINNED);
|
||||
@@ -436,7 +459,11 @@ void tick_nohz_restart_sched_tick(void)
|
||||
ktime_t now;
|
||||
|
||||
local_irq_disable();
|
||||
tick_nohz_stop_idle(cpu);
|
||||
if (ts->idle_active || (ts->inidle && ts->tick_stopped))
|
||||
now = ktime_get();
|
||||
|
||||
if (ts->idle_active)
|
||||
tick_nohz_stop_idle(cpu, now);
|
||||
|
||||
if (!ts->inidle || !ts->tick_stopped) {
|
||||
ts->inidle = 0;
|
||||
@@ -450,7 +477,6 @@ void tick_nohz_restart_sched_tick(void)
|
||||
|
||||
/* Update jiffies first */
|
||||
select_nohz_load_balancer(0);
|
||||
now = ktime_get();
|
||||
tick_do_update_jiffies64(now);
|
||||
cpumask_clear_cpu(cpu, nohz_cpu_mask);
|
||||
|
||||
@@ -584,22 +610,18 @@ static void tick_nohz_switch_to_nohz(void)
|
||||
* timer and do not touch the other magic bits which need to be done
|
||||
* when idle is left.
|
||||
*/
|
||||
static void tick_nohz_kick_tick(int cpu)
|
||||
static void tick_nohz_kick_tick(int cpu, ktime_t now)
|
||||
{
|
||||
#if 0
|
||||
/* Switch back to 2.6.27 behaviour */
|
||||
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
ktime_t delta, now;
|
||||
|
||||
if (!ts->tick_stopped)
|
||||
return;
|
||||
ktime_t delta;
|
||||
|
||||
/*
|
||||
* Do not touch the tick device, when the next expiry is either
|
||||
* already reached or less/equal than the tick period.
|
||||
*/
|
||||
now = ktime_get();
|
||||
delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
|
||||
if (delta.tv64 <= tick_period.tv64)
|
||||
return;
|
||||
@@ -608,9 +630,26 @@ static void tick_nohz_kick_tick(int cpu)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tick_check_nohz(int cpu)
|
||||
{
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
ktime_t now;
|
||||
|
||||
if (!ts->idle_active && !ts->tick_stopped)
|
||||
return;
|
||||
now = ktime_get();
|
||||
if (ts->idle_active)
|
||||
tick_nohz_stop_idle(cpu, now);
|
||||
if (ts->tick_stopped) {
|
||||
tick_nohz_update_jiffies(now);
|
||||
tick_nohz_kick_tick(cpu, now);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void tick_nohz_switch_to_nohz(void) { }
|
||||
static inline void tick_check_nohz(int cpu) { }
|
||||
|
||||
#endif /* NO_HZ */
|
||||
|
||||
@@ -620,11 +659,7 @@ static inline void tick_nohz_switch_to_nohz(void) { }
|
||||
void tick_check_idle(int cpu)
|
||||
{
|
||||
tick_check_oneshot_broadcast(cpu);
|
||||
#ifdef CONFIG_NO_HZ
|
||||
tick_nohz_stop_idle(cpu);
|
||||
tick_nohz_update_jiffies();
|
||||
tick_nohz_kick_tick(cpu);
|
||||
#endif
|
||||
tick_check_nohz(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user