Merge commit 'v3.3-rc6' into next
This commit is contained in:
@@ -25,7 +25,7 @@ config HIGH_RES_TIMERS
|
||||
config GENERIC_CLOCKEVENTS_BUILD
|
||||
bool
|
||||
default y
|
||||
depends on GENERIC_CLOCKEVENTS || GENERIC_CLOCKEVENTS_MIGR
|
||||
depends on GENERIC_CLOCKEVENTS
|
||||
|
||||
config GENERIC_CLOCKEVENTS_MIN_ADJUST
|
||||
bool
|
||||
|
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
|
||||
struct alarm *alarm;
|
||||
ktime_t expired = next->expires;
|
||||
|
||||
if (expired.tv64 >= now.tv64)
|
||||
if (expired.tv64 > now.tv64)
|
||||
break;
|
||||
|
||||
alarm = container_of(next, struct alarm, node);
|
||||
|
@@ -17,7 +17,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/sysdev.h>
|
||||
|
||||
#include "tick-internal.h"
|
||||
|
||||
|
@@ -23,8 +23,8 @@
|
||||
* o Allow clocksource drivers to be unregistered
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
|
||||
@@ -491,6 +491,22 @@ void clocksource_touch_watchdog(void)
|
||||
clocksource_resume_watchdog();
|
||||
}
|
||||
|
||||
/**
|
||||
* clocksource_max_adjustment- Returns max adjustment amount
|
||||
* @cs: Pointer to clocksource
|
||||
*
|
||||
*/
|
||||
static u32 clocksource_max_adjustment(struct clocksource *cs)
|
||||
{
|
||||
u64 ret;
|
||||
/*
|
||||
* We won't try to correct for more then 11% adjustments (110,000 ppm),
|
||||
*/
|
||||
ret = (u64)cs->mult * 11;
|
||||
do_div(ret,100);
|
||||
return (u32)ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* clocksource_max_deferment - Returns max time the clocksource can be deferred
|
||||
* @cs: Pointer to clocksource
|
||||
@@ -503,25 +519,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
|
||||
/*
|
||||
* Calculate the maximum number of cycles that we can pass to the
|
||||
* cyc2ns function without overflowing a 64-bit signed result. The
|
||||
* maximum number of cycles is equal to ULLONG_MAX/cs->mult which
|
||||
* is equivalent to the below.
|
||||
* max_cycles < (2^63)/cs->mult
|
||||
* max_cycles < 2^(log2((2^63)/cs->mult))
|
||||
* max_cycles < 2^(log2(2^63) - log2(cs->mult))
|
||||
* max_cycles < 2^(63 - log2(cs->mult))
|
||||
* max_cycles < 1 << (63 - log2(cs->mult))
|
||||
* maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
|
||||
* which is equivalent to the below.
|
||||
* max_cycles < (2^63)/(cs->mult + cs->maxadj)
|
||||
* max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
|
||||
* max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
|
||||
* max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
|
||||
* max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
|
||||
* Please note that we add 1 to the result of the log2 to account for
|
||||
* any rounding errors, ensure the above inequality is satisfied and
|
||||
* no overflow will occur.
|
||||
*/
|
||||
max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
|
||||
max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
|
||||
|
||||
/*
|
||||
* The actual maximum number of cycles we can defer the clocksource is
|
||||
* determined by the minimum of max_cycles and cs->mask.
|
||||
* Note: Here we subtract the maxadj to make sure we don't sleep for
|
||||
* too long if there's a large negative adjustment.
|
||||
*/
|
||||
max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
|
||||
max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
|
||||
max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
|
||||
cs->shift);
|
||||
|
||||
/*
|
||||
* To ensure that the clocksource does not wrap whilst we are idle,
|
||||
@@ -529,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
|
||||
* note a margin of 12.5% is used because this can be computed with
|
||||
* a shift, versus say 10% which would require division.
|
||||
*/
|
||||
return max_nsecs - (max_nsecs >> 5);
|
||||
return max_nsecs - (max_nsecs >> 3);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
|
||||
@@ -628,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
|
||||
|
||||
/**
|
||||
* __clocksource_updatefreq_scale - Used update clocksource with new freq
|
||||
* @t: clocksource to be registered
|
||||
* @cs: clocksource to be registered
|
||||
* @scale: Scale factor multiplied against freq to get clocksource hz
|
||||
* @freq: clocksource frequency (cycles per second) divided by scale
|
||||
*
|
||||
@@ -640,7 +659,6 @@ static void clocksource_enqueue(struct clocksource *cs)
|
||||
void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
|
||||
{
|
||||
u64 sec;
|
||||
|
||||
/*
|
||||
* Calc the maximum number of seconds which we can run before
|
||||
* wrapping around. For clocksources which have a mask > 32bit
|
||||
@@ -651,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
|
||||
* ~ 0.06ppm granularity for NTP. We apply the same 12.5%
|
||||
* margin as we do in clocksource_max_deferment()
|
||||
*/
|
||||
sec = (cs->mask - (cs->mask >> 5));
|
||||
sec = (cs->mask - (cs->mask >> 3));
|
||||
do_div(sec, freq);
|
||||
do_div(sec, scale);
|
||||
if (!sec)
|
||||
@@ -661,13 +679,27 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
|
||||
|
||||
clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
|
||||
NSEC_PER_SEC / scale, sec * scale);
|
||||
|
||||
/*
|
||||
* for clocksources that have large mults, to avoid overflow.
|
||||
* Since mult may be adjusted by ntp, add an safety extra margin
|
||||
*
|
||||
*/
|
||||
cs->maxadj = clocksource_max_adjustment(cs);
|
||||
while ((cs->mult + cs->maxadj < cs->mult)
|
||||
|| (cs->mult - cs->maxadj > cs->mult)) {
|
||||
cs->mult >>= 1;
|
||||
cs->shift--;
|
||||
cs->maxadj = clocksource_max_adjustment(cs);
|
||||
}
|
||||
|
||||
cs->max_idle_ns = clocksource_max_deferment(cs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
|
||||
|
||||
/**
|
||||
* __clocksource_register_scale - Used to install new clocksources
|
||||
* @t: clocksource to be registered
|
||||
* @cs: clocksource to be registered
|
||||
* @scale: Scale factor multiplied against freq to get clocksource hz
|
||||
* @freq: clocksource frequency (cycles per second) divided by scale
|
||||
*
|
||||
@@ -695,12 +727,18 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
|
||||
|
||||
/**
|
||||
* clocksource_register - Used to install new clocksources
|
||||
* @t: clocksource to be registered
|
||||
* @cs: clocksource to be registered
|
||||
*
|
||||
* Returns -EBUSY if registration fails, zero otherwise.
|
||||
*/
|
||||
int clocksource_register(struct clocksource *cs)
|
||||
{
|
||||
/* calculate max adjustment for given mult/shift */
|
||||
cs->maxadj = clocksource_max_adjustment(cs);
|
||||
WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
|
||||
"Clocksource %s might overflow on 11%% adjustment\n",
|
||||
cs->name);
|
||||
|
||||
/* calculate max idle time permitted for this clocksource */
|
||||
cs->max_idle_ns = clocksource_max_deferment(cs);
|
||||
|
||||
@@ -723,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
|
||||
|
||||
/**
|
||||
* clocksource_change_rating - Change the rating of a registered clocksource
|
||||
* @cs: clocksource to be changed
|
||||
* @rating: new rating
|
||||
*/
|
||||
void clocksource_change_rating(struct clocksource *cs, int rating)
|
||||
{
|
||||
@@ -734,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
|
||||
|
||||
/**
|
||||
* clocksource_unregister - remove a registered clocksource
|
||||
* @cs: clocksource to be unregistered
|
||||
*/
|
||||
void clocksource_unregister(struct clocksource *cs)
|
||||
{
|
||||
@@ -749,13 +790,14 @@ EXPORT_SYMBOL(clocksource_unregister);
|
||||
/**
|
||||
* sysfs_show_current_clocksources - sysfs interface for current clocksource
|
||||
* @dev: unused
|
||||
* @attr: unused
|
||||
* @buf: char buffer to be filled with clocksource list
|
||||
*
|
||||
* Provides sysfs interface for listing current clocksource.
|
||||
*/
|
||||
static ssize_t
|
||||
sysfs_show_current_clocksources(struct sys_device *dev,
|
||||
struct sysdev_attribute *attr, char *buf)
|
||||
sysfs_show_current_clocksources(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t count = 0;
|
||||
|
||||
@@ -769,14 +811,15 @@ sysfs_show_current_clocksources(struct sys_device *dev,
|
||||
/**
|
||||
* sysfs_override_clocksource - interface for manually overriding clocksource
|
||||
* @dev: unused
|
||||
* @attr: unused
|
||||
* @buf: name of override clocksource
|
||||
* @count: length of buffer
|
||||
*
|
||||
* Takes input from sysfs interface for manually overriding the default
|
||||
* clocksource selection.
|
||||
*/
|
||||
static ssize_t sysfs_override_clocksource(struct sys_device *dev,
|
||||
struct sysdev_attribute *attr,
|
||||
static ssize_t sysfs_override_clocksource(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
size_t ret = count;
|
||||
@@ -804,13 +847,14 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
|
||||
/**
|
||||
* sysfs_show_available_clocksources - sysfs interface for listing clocksource
|
||||
* @dev: unused
|
||||
* @attr: unused
|
||||
* @buf: char buffer to be filled with clocksource list
|
||||
*
|
||||
* Provides sysfs interface for listing registered clocksources
|
||||
*/
|
||||
static ssize_t
|
||||
sysfs_show_available_clocksources(struct sys_device *dev,
|
||||
struct sysdev_attribute *attr,
|
||||
sysfs_show_available_clocksources(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct clocksource *src;
|
||||
@@ -839,35 +883,36 @@ sysfs_show_available_clocksources(struct sys_device *dev,
|
||||
/*
|
||||
* Sysfs setup bits:
|
||||
*/
|
||||
static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
|
||||
static DEVICE_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
|
||||
sysfs_override_clocksource);
|
||||
|
||||
static SYSDEV_ATTR(available_clocksource, 0444,
|
||||
static DEVICE_ATTR(available_clocksource, 0444,
|
||||
sysfs_show_available_clocksources, NULL);
|
||||
|
||||
static struct sysdev_class clocksource_sysclass = {
|
||||
static struct bus_type clocksource_subsys = {
|
||||
.name = "clocksource",
|
||||
.dev_name = "clocksource",
|
||||
};
|
||||
|
||||
static struct sys_device device_clocksource = {
|
||||
static struct device device_clocksource = {
|
||||
.id = 0,
|
||||
.cls = &clocksource_sysclass,
|
||||
.bus = &clocksource_subsys,
|
||||
};
|
||||
|
||||
static int __init init_clocksource_sysfs(void)
|
||||
{
|
||||
int error = sysdev_class_register(&clocksource_sysclass);
|
||||
int error = subsys_system_register(&clocksource_subsys, NULL);
|
||||
|
||||
if (!error)
|
||||
error = sysdev_register(&device_clocksource);
|
||||
error = device_register(&device_clocksource);
|
||||
if (!error)
|
||||
error = sysdev_create_file(
|
||||
error = device_create_file(
|
||||
&device_clocksource,
|
||||
&attr_current_clocksource);
|
||||
&dev_attr_current_clocksource);
|
||||
if (!error)
|
||||
error = sysdev_create_file(
|
||||
error = device_create_file(
|
||||
&device_clocksource,
|
||||
&attr_available_clocksource);
|
||||
&dev_attr_available_clocksource);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
|
||||
(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
||||
return 0;
|
||||
|
||||
clockevents_exchange_device(NULL, dev);
|
||||
clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
|
||||
tick_broadcast_device.evtdev = dev;
|
||||
if (!cpumask_empty(tick_get_broadcast_mask()))
|
||||
tick_broadcast_start_periodic(dev);
|
||||
|
@@ -275,42 +275,17 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
|
||||
|
||||
/**
|
||||
* tick_nohz_stop_sched_tick - stop the idle tick from the idle task
|
||||
*
|
||||
* When the next event is more than a tick into the future, stop the idle tick
|
||||
* Called either from the idle loop or from irq_exit() when an idle period was
|
||||
* just interrupted by an interrupt which did not cause a reschedule.
|
||||
*/
|
||||
void tick_nohz_stop_sched_tick(int inidle)
|
||||
static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
|
||||
{
|
||||
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
|
||||
struct tick_sched *ts;
|
||||
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
|
||||
ktime_t last_update, expires, now;
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
u64 time_delta;
|
||||
int cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
|
||||
/*
|
||||
* Call to tick_nohz_start_idle stops the last_update_time from being
|
||||
* updated. Thus, it must not be called in the event we are called from
|
||||
* irq_exit() with the prior state different than idle.
|
||||
*/
|
||||
if (!inidle && !ts->inidle)
|
||||
goto end;
|
||||
|
||||
/*
|
||||
* Set ts->inidle unconditionally. Even if the system did not
|
||||
* switch to NOHZ mode the cpu frequency governers rely on the
|
||||
* update of the idle time accounting in tick_nohz_start_idle().
|
||||
*/
|
||||
ts->inidle = 1;
|
||||
|
||||
now = tick_nohz_start_idle(cpu, ts);
|
||||
|
||||
/*
|
||||
@@ -326,10 +301,10 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
}
|
||||
|
||||
if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
|
||||
goto end;
|
||||
return;
|
||||
|
||||
if (need_resched())
|
||||
goto end;
|
||||
return;
|
||||
|
||||
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
|
||||
static int ratelimit;
|
||||
@@ -339,7 +314,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
(unsigned int) local_softirq_pending());
|
||||
ratelimit++;
|
||||
}
|
||||
goto end;
|
||||
return;
|
||||
}
|
||||
|
||||
ts->idle_calls++;
|
||||
@@ -434,7 +409,6 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||
ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
|
||||
ts->tick_stopped = 1;
|
||||
ts->idle_jiffies = last_jiffies;
|
||||
rcu_enter_nohz();
|
||||
}
|
||||
|
||||
ts->idle_sleeps++;
|
||||
@@ -472,8 +446,64 @@ out:
|
||||
ts->next_jiffies = next_jiffies;
|
||||
ts->last_jiffies = last_jiffies;
|
||||
ts->sleep_length = ktime_sub(dev->next_event, now);
|
||||
end:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_nohz_idle_enter - stop the idle tick from the idle task
|
||||
*
|
||||
* When the next event is more than a tick into the future, stop the idle tick
|
||||
* Called when we start the idle loop.
|
||||
*
|
||||
* The arch is responsible of calling:
|
||||
*
|
||||
* - rcu_idle_enter() after its last use of RCU before the CPU is put
|
||||
* to sleep.
|
||||
* - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
|
||||
*/
|
||||
void tick_nohz_idle_enter(void)
|
||||
{
|
||||
struct tick_sched *ts;
|
||||
|
||||
WARN_ON_ONCE(irqs_disabled());
|
||||
|
||||
/*
|
||||
* Update the idle state in the scheduler domain hierarchy
|
||||
* when tick_nohz_stop_sched_tick() is called from the idle loop.
|
||||
* State will be updated to busy during the first busy tick after
|
||||
* exiting idle.
|
||||
*/
|
||||
set_cpu_sd_state_idle();
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
ts = &__get_cpu_var(tick_cpu_sched);
|
||||
/*
|
||||
* set ts->inidle unconditionally. even if the system did not
|
||||
* switch to nohz mode the cpu frequency governers rely on the
|
||||
* update of the idle time accounting in tick_nohz_start_idle().
|
||||
*/
|
||||
ts->inidle = 1;
|
||||
tick_nohz_stop_sched_tick(ts);
|
||||
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_nohz_irq_exit - update next tick event from interrupt exit
|
||||
*
|
||||
* When an interrupt fires while we are idle and it doesn't cause
|
||||
* a reschedule, it may still add, modify or delete a timer, enqueue
|
||||
* an RCU callback, etc...
|
||||
* So we need to re-calculate and reprogram the next tick event.
|
||||
*/
|
||||
void tick_nohz_irq_exit(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
|
||||
if (!ts->inidle)
|
||||
return;
|
||||
|
||||
tick_nohz_stop_sched_tick(ts);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -515,11 +545,13 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_nohz_restart_sched_tick - restart the idle tick from the idle task
|
||||
* tick_nohz_idle_exit - restart the idle tick from the idle task
|
||||
*
|
||||
* Restart the idle tick when the CPU is woken up from idle
|
||||
* This also exit the RCU extended quiescent state. The CPU
|
||||
* can use RCU again after this function is called.
|
||||
*/
|
||||
void tick_nohz_restart_sched_tick(void)
|
||||
void tick_nohz_idle_exit(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
|
||||
@@ -529,6 +561,7 @@ void tick_nohz_restart_sched_tick(void)
|
||||
ktime_t now;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
if (ts->idle_active || (ts->inidle && ts->tick_stopped))
|
||||
now = ktime_get();
|
||||
|
||||
@@ -543,8 +576,6 @@ void tick_nohz_restart_sched_tick(void)
|
||||
|
||||
ts->inidle = 0;
|
||||
|
||||
rcu_exit_nohz();
|
||||
|
||||
/* Update jiffies first */
|
||||
select_nohz_load_balancer(0);
|
||||
tick_do_update_jiffies64(now);
|
||||
|
@@ -131,7 +131,7 @@ static inline s64 timekeeping_get_ns_raw(void)
|
||||
/* calculate the delta since the last update_wall_time: */
|
||||
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
|
||||
|
||||
/* return delta convert to nanoseconds using ntp adjusted mult. */
|
||||
/* return delta convert to nanoseconds. */
|
||||
return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
|
||||
}
|
||||
|
||||
@@ -249,6 +249,8 @@ ktime_t ktime_get(void)
|
||||
secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
|
||||
nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
|
||||
nsecs += timekeeping_get_ns();
|
||||
/* If arch requires, add in gettimeoffset() */
|
||||
nsecs += arch_gettimeoffset();
|
||||
|
||||
} while (read_seqretry(&xtime_lock, seq));
|
||||
/*
|
||||
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
|
||||
*ts = xtime;
|
||||
tomono = wall_to_monotonic;
|
||||
nsecs = timekeeping_get_ns();
|
||||
/* If arch requires, add in gettimeoffset() */
|
||||
nsecs += arch_gettimeoffset();
|
||||
|
||||
} while (read_seqretry(&xtime_lock, seq));
|
||||
|
||||
@@ -802,14 +806,44 @@ static void timekeeping_adjust(s64 offset)
|
||||
s64 error, interval = timekeeper.cycle_interval;
|
||||
int adj;
|
||||
|
||||
/*
|
||||
* The point of this is to check if the error is greater then half
|
||||
* an interval.
|
||||
*
|
||||
* First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
|
||||
*
|
||||
* Note we subtract one in the shift, so that error is really error*2.
|
||||
* This "saves" dividing(shifting) interval twice, but keeps the
|
||||
* (error > interval) comparison as still measuring if error is
|
||||
* larger then half an interval.
|
||||
*
|
||||
* Note: It does not "save" on aggravation when reading the code.
|
||||
*/
|
||||
error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
|
||||
if (error > interval) {
|
||||
/*
|
||||
* We now divide error by 4(via shift), which checks if
|
||||
* the error is greater then twice the interval.
|
||||
* If it is greater, we need a bigadjust, if its smaller,
|
||||
* we can adjust by 1.
|
||||
*/
|
||||
error >>= 2;
|
||||
/*
|
||||
* XXX - In update_wall_time, we round up to the next
|
||||
* nanosecond, and store the amount rounded up into
|
||||
* the error. This causes the likely below to be unlikely.
|
||||
*
|
||||
* The proper fix is to avoid rounding up by using
|
||||
* the high precision timekeeper.xtime_nsec instead of
|
||||
* xtime.tv_nsec everywhere. Fixing this will take some
|
||||
* time.
|
||||
*/
|
||||
if (likely(error <= interval))
|
||||
adj = 1;
|
||||
else
|
||||
adj = timekeeping_bigadjust(error, &interval, &offset);
|
||||
} else if (error < -interval) {
|
||||
/* See comment above, this is just switched for the negative */
|
||||
error >>= 2;
|
||||
if (likely(error >= -interval)) {
|
||||
adj = -1;
|
||||
@@ -817,9 +851,65 @@ static void timekeeping_adjust(s64 offset)
|
||||
offset = -offset;
|
||||
} else
|
||||
adj = timekeeping_bigadjust(error, &interval, &offset);
|
||||
} else
|
||||
} else /* No adjustment needed */
|
||||
return;
|
||||
|
||||
WARN_ONCE(timekeeper.clock->maxadj &&
|
||||
(timekeeper.mult + adj > timekeeper.clock->mult +
|
||||
timekeeper.clock->maxadj),
|
||||
"Adjusting %s more then 11%% (%ld vs %ld)\n",
|
||||
timekeeper.clock->name, (long)timekeeper.mult + adj,
|
||||
(long)timekeeper.clock->mult +
|
||||
timekeeper.clock->maxadj);
|
||||
/*
|
||||
* So the following can be confusing.
|
||||
*
|
||||
* To keep things simple, lets assume adj == 1 for now.
|
||||
*
|
||||
* When adj != 1, remember that the interval and offset values
|
||||
* have been appropriately scaled so the math is the same.
|
||||
*
|
||||
* The basic idea here is that we're increasing the multiplier
|
||||
* by one, this causes the xtime_interval to be incremented by
|
||||
* one cycle_interval. This is because:
|
||||
* xtime_interval = cycle_interval * mult
|
||||
* So if mult is being incremented by one:
|
||||
* xtime_interval = cycle_interval * (mult + 1)
|
||||
* Its the same as:
|
||||
* xtime_interval = (cycle_interval * mult) + cycle_interval
|
||||
* Which can be shortened to:
|
||||
* xtime_interval += cycle_interval
|
||||
*
|
||||
* So offset stores the non-accumulated cycles. Thus the current
|
||||
* time (in shifted nanoseconds) is:
|
||||
* now = (offset * adj) + xtime_nsec
|
||||
* Now, even though we're adjusting the clock frequency, we have
|
||||
* to keep time consistent. In other words, we can't jump back
|
||||
* in time, and we also want to avoid jumping forward in time.
|
||||
*
|
||||
* So given the same offset value, we need the time to be the same
|
||||
* both before and after the freq adjustment.
|
||||
* now = (offset * adj_1) + xtime_nsec_1
|
||||
* now = (offset * adj_2) + xtime_nsec_2
|
||||
* So:
|
||||
* (offset * adj_1) + xtime_nsec_1 =
|
||||
* (offset * adj_2) + xtime_nsec_2
|
||||
* And we know:
|
||||
* adj_2 = adj_1 + 1
|
||||
* So:
|
||||
* (offset * adj_1) + xtime_nsec_1 =
|
||||
* (offset * (adj_1+1)) + xtime_nsec_2
|
||||
* (offset * adj_1) + xtime_nsec_1 =
|
||||
* (offset * adj_1) + offset + xtime_nsec_2
|
||||
* Canceling the sides:
|
||||
* xtime_nsec_1 = offset + xtime_nsec_2
|
||||
* Which gives us:
|
||||
* xtime_nsec_2 = xtime_nsec_1 - offset
|
||||
* Which simplfies to:
|
||||
* xtime_nsec -= offset
|
||||
*
|
||||
* XXX - TODO: Doc ntp_error calculation.
|
||||
*/
|
||||
timekeeper.mult += adj;
|
||||
timekeeper.xtime_interval += interval;
|
||||
timekeeper.xtime_nsec -= offset;
|
||||
|
Reference in New Issue
Block a user