Merge branch 'timers/urgent' into timers/core
Pick up dependent changes.
This commit is contained in:
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
|
||||
{
|
||||
lockdep_assert_held(&cgroup_mutex);
|
||||
|
||||
if (css->flags & CSS_DYING)
|
||||
return;
|
||||
|
||||
css->flags |= CSS_DYING;
|
||||
|
||||
/*
|
||||
* This must happen before css is disassociated with its cgroup.
|
||||
* See seq_css() for details.
|
||||
|
@@ -176,9 +176,9 @@ typedef enum {
|
||||
} cpuset_flagbits_t;
|
||||
|
||||
/* convenient tests for these bits */
|
||||
static inline bool is_cpuset_online(const struct cpuset *cs)
|
||||
static inline bool is_cpuset_online(struct cpuset *cs)
|
||||
{
|
||||
return test_bit(CS_ONLINE, &cs->flags);
|
||||
return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
|
||||
}
|
||||
|
||||
static inline int is_cpu_exclusive(const struct cpuset *cs)
|
||||
|
@@ -1658,13 +1658,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
|
||||
ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
|
||||
mutex_unlock(&cpuhp_state_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
if (st->state < target)
|
||||
ret = do_cpu_up(dev->id, target);
|
||||
else
|
||||
ret = do_cpu_down(dev->id, target);
|
||||
|
||||
out:
|
||||
unlock_device_hotplug();
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
@@ -7316,6 +7316,21 @@ int perf_event_account_interrupt(struct perf_event *event)
|
||||
return __perf_event_account_interrupt(event, 1);
|
||||
}
|
||||
|
||||
static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Due to interrupt latency (AKA "skid"), we may enter the
|
||||
* kernel before taking an overflow, even if the PMU is only
|
||||
* counting user events.
|
||||
* To avoid leaking information to userspace, we must always
|
||||
* reject kernel samples when exclude_kernel is set.
|
||||
*/
|
||||
if (event->attr.exclude_kernel && !user_mode(regs))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic event overflow handling, sampling.
|
||||
*/
|
||||
@@ -7336,6 +7351,12 @@ static int __perf_event_overflow(struct perf_event *event,
|
||||
|
||||
ret = __perf_event_account_interrupt(event, throttle);
|
||||
|
||||
/*
|
||||
* For security, drop the skid kernel samples if necessary.
|
||||
*/
|
||||
if (!sample_is_allowed(event, regs))
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* XXX event_limit might not quite work as expected on inherited
|
||||
* events
|
||||
|
@@ -1312,8 +1312,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
ret = __irq_set_trigger(desc,
|
||||
new->flags & IRQF_TRIGGER_MASK);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
irq_release_resources(desc);
|
||||
goto out_mask;
|
||||
}
|
||||
}
|
||||
|
||||
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
|
||||
|
@@ -132,7 +132,7 @@ int freeze_processes(void)
|
||||
if (!pm_freezing)
|
||||
atomic_inc(&system_freezing_cnt);
|
||||
|
||||
pm_wakeup_clear(true);
|
||||
pm_wakeup_clear();
|
||||
pr_info("Freezing user space processes ... ");
|
||||
pm_freezing = true;
|
||||
error = try_to_freeze_tasks(true);
|
||||
|
@@ -72,8 +72,6 @@ static void freeze_begin(void)
|
||||
|
||||
static void freeze_enter(void)
|
||||
{
|
||||
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
|
||||
|
||||
spin_lock_irq(&suspend_freeze_lock);
|
||||
if (pm_wakeup_pending())
|
||||
goto out;
|
||||
@@ -100,27 +98,6 @@ static void freeze_enter(void)
|
||||
out:
|
||||
suspend_freeze_state = FREEZE_STATE_NONE;
|
||||
spin_unlock_irq(&suspend_freeze_lock);
|
||||
|
||||
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
|
||||
}
|
||||
|
||||
static void s2idle_loop(void)
|
||||
{
|
||||
do {
|
||||
freeze_enter();
|
||||
|
||||
if (freeze_ops && freeze_ops->wake)
|
||||
freeze_ops->wake();
|
||||
|
||||
dpm_resume_noirq(PMSG_RESUME);
|
||||
if (freeze_ops && freeze_ops->sync)
|
||||
freeze_ops->sync();
|
||||
|
||||
if (pm_wakeup_pending())
|
||||
break;
|
||||
|
||||
pm_wakeup_clear(false);
|
||||
} while (!dpm_suspend_noirq(PMSG_SUSPEND));
|
||||
}
|
||||
|
||||
void freeze_wake(void)
|
||||
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
|
||||
* all the devices are suspended.
|
||||
*/
|
||||
if (state == PM_SUSPEND_FREEZE) {
|
||||
s2idle_loop();
|
||||
goto Platform_early_resume;
|
||||
trace_suspend_resume(TPS("machine_suspend"), state, true);
|
||||
freeze_enter();
|
||||
trace_suspend_resume(TPS("machine_suspend"), state, false);
|
||||
goto Platform_wake;
|
||||
}
|
||||
|
||||
error = disable_nonboot_cpus();
|
||||
|
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
|
||||
#define MAX_CMDLINECONSOLES 8
|
||||
|
||||
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
|
||||
static int console_cmdline_cnt;
|
||||
|
||||
static int preferred_console = -1;
|
||||
int console_set_on_cmdline;
|
||||
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
|
||||
* See if this tty is not yet registered, and
|
||||
* if we have a slot free.
|
||||
*/
|
||||
for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) {
|
||||
for (i = 0, c = console_cmdline;
|
||||
i < MAX_CMDLINECONSOLES && c->name[0];
|
||||
i++, c++) {
|
||||
if (strcmp(c->name, name) == 0 && c->index == idx) {
|
||||
if (brl_options)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Maintain an invariant that will help to find if
|
||||
* the matching console is preferred, see
|
||||
* register_console():
|
||||
*
|
||||
* The last non-braille console is always
|
||||
* the preferred one.
|
||||
*/
|
||||
if (i != console_cmdline_cnt - 1)
|
||||
swap(console_cmdline[i],
|
||||
console_cmdline[console_cmdline_cnt - 1]);
|
||||
|
||||
preferred_console = console_cmdline_cnt - 1;
|
||||
|
||||
if (!brl_options)
|
||||
preferred_console = i;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
|
||||
braille_set_options(c, brl_options);
|
||||
|
||||
c->index = idx;
|
||||
console_cmdline_cnt++;
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
|
||||
}
|
||||
|
||||
/*
|
||||
* See if this console matches one we selected on the command line.
|
||||
*
|
||||
* There may be several entries in the console_cmdline array matching
|
||||
* with the same console, one with newcon->match(), another by
|
||||
* name/index:
|
||||
*
|
||||
* pl011,mmio,0x87e024000000,115200 -- added from SPCR
|
||||
* ttyAMA0 -- added from command line
|
||||
*
|
||||
* Traverse the console_cmdline array in reverse order to be
|
||||
* sure that if this console is preferred then it will be the first
|
||||
* matching entry. We use the invariant that is maintained in
|
||||
* __add_preferred_console().
|
||||
* See if this console matches one we selected on
|
||||
* the command line.
|
||||
*/
|
||||
for (i = console_cmdline_cnt - 1; i >= 0; i--) {
|
||||
c = console_cmdline + i;
|
||||
|
||||
for (i = 0, c = console_cmdline;
|
||||
i < MAX_CMDLINECONSOLES && c->name[0];
|
||||
i++, c++) {
|
||||
if (!newcon->match ||
|
||||
newcon->match(newcon, c->name, c->index, c->options) != 0) {
|
||||
/* default matching */
|
||||
|
@@ -263,7 +263,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
|
||||
|
||||
/*
|
||||
* Counts the new reader in the appropriate per-CPU element of the
|
||||
* srcu_struct. Must be called from process context.
|
||||
* srcu_struct.
|
||||
* Returns an index that must be passed to the matching srcu_read_unlock().
|
||||
*/
|
||||
int __srcu_read_lock(struct srcu_struct *sp)
|
||||
@@ -271,7 +271,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
|
||||
int idx;
|
||||
|
||||
idx = READ_ONCE(sp->completed) & 0x1;
|
||||
__this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
|
||||
this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
|
||||
smp_mb(); /* B */ /* Avoid leaking the critical section. */
|
||||
return idx;
|
||||
}
|
||||
@@ -281,7 +281,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
|
||||
* Removes the count for the old reader from the appropriate per-CPU
|
||||
* element of the srcu_struct. Note that this may well be a different
|
||||
* CPU than that which was incremented by the corresponding srcu_read_lock().
|
||||
* Must be called from process context.
|
||||
*/
|
||||
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
|
||||
{
|
||||
|
@@ -97,8 +97,9 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
|
||||
|
||||
/*
|
||||
* Counts the new reader in the appropriate per-CPU element of the
|
||||
* srcu_struct. Must be called from process context.
|
||||
* Returns an index that must be passed to the matching srcu_read_unlock().
|
||||
* srcu_struct. Can be invoked from irq/bh handlers, but the matching
|
||||
* __srcu_read_unlock() must be in the same handler instance. Returns an
|
||||
* index that must be passed to the matching srcu_read_unlock().
|
||||
*/
|
||||
int __srcu_read_lock(struct srcu_struct *sp)
|
||||
{
|
||||
@@ -112,7 +113,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
|
||||
|
||||
/*
|
||||
* Removes the count for the old reader from the appropriate element of
|
||||
* the srcu_struct. Must be called from process context.
|
||||
* the srcu_struct.
|
||||
*/
|
||||
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
|
||||
{
|
||||
|
@@ -357,7 +357,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
|
||||
|
||||
/*
|
||||
* Counts the new reader in the appropriate per-CPU element of the
|
||||
* srcu_struct. Must be called from process context.
|
||||
* srcu_struct.
|
||||
* Returns an index that must be passed to the matching srcu_read_unlock().
|
||||
*/
|
||||
int __srcu_read_lock(struct srcu_struct *sp)
|
||||
@@ -365,7 +365,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
|
||||
int idx;
|
||||
|
||||
idx = READ_ONCE(sp->srcu_idx) & 0x1;
|
||||
__this_cpu_inc(sp->sda->srcu_lock_count[idx]);
|
||||
this_cpu_inc(sp->sda->srcu_lock_count[idx]);
|
||||
smp_mb(); /* B */ /* Avoid leaking the critical section. */
|
||||
return idx;
|
||||
}
|
||||
@@ -375,7 +375,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
|
||||
* Removes the count for the old reader from the appropriate per-CPU
|
||||
* element of the srcu_struct. Note that this may well be a different
|
||||
* CPU than that which was incremented by the corresponding srcu_read_lock().
|
||||
* Must be called from process context.
|
||||
*/
|
||||
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
|
||||
{
|
||||
|
@@ -5605,7 +5605,7 @@ void idle_task_exit(void)
|
||||
BUG_ON(cpu_online(smp_processor_id()));
|
||||
|
||||
if (mm != &init_mm) {
|
||||
switch_mm_irqs_off(mm, &init_mm, current);
|
||||
switch_mm(mm, &init_mm, current);
|
||||
finish_arch_post_lock_switch();
|
||||
}
|
||||
mmdrop(mm);
|
||||
|
@@ -101,9 +101,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
|
||||
if (sg_policy->next_freq == next_freq)
|
||||
return;
|
||||
|
||||
if (sg_policy->next_freq > next_freq)
|
||||
next_freq = (sg_policy->next_freq + next_freq) >> 1;
|
||||
|
||||
sg_policy->next_freq = next_freq;
|
||||
sg_policy->last_freq_update_time = time;
|
||||
|
||||
|
@@ -3563,7 +3563,7 @@ static inline void check_schedstat_required(void)
|
||||
trace_sched_stat_runtime_enabled()) {
|
||||
printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
|
||||
"stat_blocked and stat_runtime require the "
|
||||
"kernel parameter schedstats=enabled or "
|
||||
"kernel parameter schedstats=enable or "
|
||||
"kernel.sched_schedstats=1\n");
|
||||
}
|
||||
#endif
|
||||
|
@@ -37,9 +37,11 @@ static int tick_broadcast_forced;
|
||||
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
|
||||
|
||||
#ifdef CONFIG_TICK_ONESHOT
|
||||
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
|
||||
static void tick_broadcast_clear_oneshot(int cpu);
|
||||
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
|
||||
#else
|
||||
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
|
||||
static inline void tick_broadcast_clear_oneshot(int cpu) { }
|
||||
static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
|
||||
#endif
|
||||
@@ -867,7 +869,7 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
|
||||
/**
|
||||
* tick_broadcast_setup_oneshot - setup the broadcast device
|
||||
*/
|
||||
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
||||
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
|
@@ -126,7 +126,6 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
||||
|
||||
/* Functions related to oneshot broadcasting */
|
||||
#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
|
||||
extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
|
||||
extern void tick_broadcast_switch_to_oneshot(void);
|
||||
extern void tick_shutdown_broadcast_oneshot(unsigned int cpu);
|
||||
extern int tick_broadcast_oneshot_active(void);
|
||||
@@ -134,7 +133,6 @@ extern void tick_check_oneshot_broadcast_this_cpu(void);
|
||||
bool tick_broadcast_oneshot_available(void);
|
||||
extern struct cpumask *tick_get_broadcast_oneshot_mask(void);
|
||||
#else /* !(BROADCAST && ONESHOT): */
|
||||
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
|
||||
static inline void tick_broadcast_switch_to_oneshot(void) { }
|
||||
static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { }
|
||||
static inline int tick_broadcast_oneshot_active(void) { return 0; }
|
||||
|
@@ -118,6 +118,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
||||
tk->offs_boot = ktime_add(tk->offs_boot, delta);
|
||||
}
|
||||
|
||||
/*
|
||||
* tk_clock_read - atomic clocksource read() helper
|
||||
*
|
||||
* This helper is necessary to use in the read paths because, while the
|
||||
* seqlock ensures we don't return a bad value while structures are updated,
|
||||
* it doesn't protect from potential crashes. There is the possibility that
|
||||
* the tkr's clocksource may change between the read reference, and the
|
||||
* clock reference passed to the read function. This can cause crashes if
|
||||
* the wrong clocksource is passed to the wrong read function.
|
||||
* This isn't necessary to use when holding the timekeeper_lock or doing
|
||||
* a read of the fast-timekeeper tkrs (which is protected by its own locking
|
||||
* and update logic).
|
||||
*/
|
||||
static inline u64 tk_clock_read(struct tk_read_base *tkr)
|
||||
{
|
||||
struct clocksource *clock = READ_ONCE(tkr->clock);
|
||||
|
||||
return clock->read(clock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_TIMEKEEPING
|
||||
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
|
||||
|
||||
@@ -175,7 +195,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
||||
*/
|
||||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
now = tkr->read(tkr->clock);
|
||||
now = tk_clock_read(tkr);
|
||||
last = tkr->cycle_last;
|
||||
mask = tkr->mask;
|
||||
max = tkr->clock->max_cycles;
|
||||
@@ -209,7 +229,7 @@ static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
|
||||
u64 cycle_now, delta;
|
||||
|
||||
/* read clocksource */
|
||||
cycle_now = tkr->read(tkr->clock);
|
||||
cycle_now = tk_clock_read(tkr);
|
||||
|
||||
/* calculate the delta since the last update_wall_time */
|
||||
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
|
||||
@@ -238,12 +258,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
||||
++tk->cs_was_changed_seq;
|
||||
old_clock = tk->tkr_mono.clock;
|
||||
tk->tkr_mono.clock = clock;
|
||||
tk->tkr_mono.read = clock->read;
|
||||
tk->tkr_mono.mask = clock->mask;
|
||||
tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
|
||||
tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
|
||||
|
||||
tk->tkr_raw.clock = clock;
|
||||
tk->tkr_raw.read = clock->read;
|
||||
tk->tkr_raw.mask = clock->mask;
|
||||
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
|
||||
|
||||
@@ -262,7 +280,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
||||
/* Go back from cycles -> shifted ns */
|
||||
tk->xtime_interval = interval * clock->mult;
|
||||
tk->xtime_remainder = ntpinterval - tk->xtime_interval;
|
||||
tk->raw_interval = (interval * clock->mult) >> clock->shift;
|
||||
tk->raw_interval = interval * clock->mult;
|
||||
|
||||
/* if changing clocks, convert xtime_nsec shift units */
|
||||
if (old_clock) {
|
||||
@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
|
||||
|
||||
now += timekeeping_delta_to_ns(tkr,
|
||||
clocksource_delta(
|
||||
tkr->read(tkr->clock),
|
||||
tk_clock_read(tkr),
|
||||
tkr->cycle_last,
|
||||
tkr->mask));
|
||||
} while (read_seqcount_retry(&tkf->seq, seq));
|
||||
@@ -461,6 +479,10 @@ static u64 dummy_clock_read(struct clocksource *cs)
|
||||
return cycles_at_suspend;
|
||||
}
|
||||
|
||||
static struct clocksource dummy_clock = {
|
||||
.read = dummy_clock_read,
|
||||
};
|
||||
|
||||
/**
|
||||
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
|
||||
* @tk: Timekeeper to snapshot.
|
||||
@@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
|
||||
struct tk_read_base *tkr = &tk->tkr_mono;
|
||||
|
||||
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
|
||||
cycles_at_suspend = tkr->read(tkr->clock);
|
||||
tkr_dummy.read = dummy_clock_read;
|
||||
cycles_at_suspend = tk_clock_read(tkr);
|
||||
tkr_dummy.clock = &dummy_clock;
|
||||
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
|
||||
|
||||
tkr = &tk->tkr_raw;
|
||||
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
|
||||
tkr_dummy.read = dummy_clock_read;
|
||||
tkr_dummy.clock = &dummy_clock;
|
||||
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
|
||||
}
|
||||
|
||||
@@ -649,11 +671,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
|
||||
*/
|
||||
static void timekeeping_forward_now(struct timekeeper *tk)
|
||||
{
|
||||
struct clocksource *clock = tk->tkr_mono.clock;
|
||||
u64 cycle_now, delta;
|
||||
u64 nsec;
|
||||
|
||||
cycle_now = tk->tkr_mono.read(clock);
|
||||
cycle_now = tk_clock_read(&tk->tkr_mono);
|
||||
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
||||
tk->tkr_mono.cycle_last = cycle_now;
|
||||
tk->tkr_raw.cycle_last = cycle_now;
|
||||
@@ -929,8 +950,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
|
||||
now = tk->tkr_mono.read(tk->tkr_mono.clock);
|
||||
now = tk_clock_read(&tk->tkr_mono);
|
||||
systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
|
||||
systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
|
||||
base_real = ktime_add(tk->tkr_mono.base,
|
||||
@@ -1108,7 +1128,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
|
||||
* Check whether the system counter value provided by the
|
||||
* device driver is on the current timekeeping interval.
|
||||
*/
|
||||
now = tk->tkr_mono.read(tk->tkr_mono.clock);
|
||||
now = tk_clock_read(&tk->tkr_mono);
|
||||
interval_start = tk->tkr_mono.cycle_last;
|
||||
if (!cycle_between(interval_start, cycles, now)) {
|
||||
clock_was_set_seq = tk->clock_was_set_seq;
|
||||
@@ -1629,7 +1649,7 @@ void timekeeping_resume(void)
|
||||
* The less preferred source will only be tried if there is no better
|
||||
* usable source. The rtc part is handled separately in rtc core code.
|
||||
*/
|
||||
cycle_now = tk->tkr_mono.read(clock);
|
||||
cycle_now = tk_clock_read(&tk->tkr_mono);
|
||||
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
|
||||
cycle_now > tk->tkr_mono.cycle_last) {
|
||||
u64 nsec, cyc_delta;
|
||||
@@ -1976,7 +1996,7 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
|
||||
u32 shift, unsigned int *clock_set)
|
||||
{
|
||||
u64 interval = tk->cycle_interval << shift;
|
||||
u64 raw_nsecs;
|
||||
u64 snsec_per_sec;
|
||||
|
||||
/* If the offset is smaller than a shifted interval, do nothing */
|
||||
if (offset < interval)
|
||||
@@ -1991,14 +2011,15 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
|
||||
*clock_set |= accumulate_nsecs_to_secs(tk);
|
||||
|
||||
/* Accumulate raw time */
|
||||
raw_nsecs = (u64)tk->raw_interval << shift;
|
||||
raw_nsecs += tk->raw_time.tv_nsec;
|
||||
if (raw_nsecs >= NSEC_PER_SEC) {
|
||||
u64 raw_secs = raw_nsecs;
|
||||
raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
|
||||
tk->raw_time.tv_sec += raw_secs;
|
||||
tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
|
||||
tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
|
||||
snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
|
||||
while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
|
||||
tk->tkr_raw.xtime_nsec -= snsec_per_sec;
|
||||
tk->raw_time.tv_sec++;
|
||||
}
|
||||
tk->raw_time.tv_nsec = raw_nsecs;
|
||||
tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
|
||||
tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
|
||||
|
||||
/* Accumulate error between NTP and clock interval */
|
||||
tk->ntp_error += tk->ntp_tick << shift;
|
||||
@@ -2030,7 +2051,7 @@ void update_wall_time(void)
|
||||
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
|
||||
offset = real_tk->cycle_interval;
|
||||
#else
|
||||
offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
|
||||
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
|
||||
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
||||
#endif
|
||||
|
||||
|
Reference in New Issue
Block a user