Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner: "This update provides the following changes: - The rework of the timer wheel which addresses the shortcomings of the current wheel (cascading, slow search for next expiring timer, etc). That's the first major change of the wheel in almost 20 years since Finn implemted it. - A large overhaul of the clocksource drivers init functions to consolidate the Device Tree initialization - Some more Y2038 updates - A capability fix for timerfd - Yet another clock chip driver - The usual pile of updates, comment improvements all over the place" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (130 commits) tick/nohz: Optimize nohz idle enter clockevents: Make clockevents_subsys static clocksource/drivers/time-armada-370-xp: Fix return value check timers: Implement optimization for same expiry time in mod_timer() timers: Split out index calculation timers: Only wake softirq if necessary timers: Forward the wheel clock whenever possible timers/nohz: Remove pointless tick_nohz_kick_tick() function timers: Optimize collect_expired_timers() for NOHZ timers: Move __run_timers() function timers: Remove set_timer_slack() leftovers timers: Switch to a non-cascading wheel timers: Reduce the CPU index space to 256k timers: Give a few structs and members proper names hlist: Add hlist_is_singular_node() helper signals: Use hrtimer for sigtimedwait() timers: Remove the deprecated mod_timer_pinned() API timers, net/ipv4/inet: Initialize connection request timers as pinned timers, drivers/tty/mips_ejtag: Initialize the poll timer as pinned timers, drivers/tty/metag_da: Initialize the poll timer as pinned ...
This commit is contained in:
@@ -2751,23 +2751,18 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
|
||||
* @ts: upper bound on process time suspension
|
||||
*/
|
||||
int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
|
||||
const struct timespec *ts)
|
||||
const struct timespec *ts)
|
||||
{
|
||||
ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
|
||||
struct task_struct *tsk = current;
|
||||
long timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
sigset_t mask = *which;
|
||||
int sig;
|
||||
int sig, ret = 0;
|
||||
|
||||
if (ts) {
|
||||
if (!timespec_valid(ts))
|
||||
return -EINVAL;
|
||||
timeout = timespec_to_jiffies(ts);
|
||||
/*
|
||||
* We can be close to the next tick, add another one
|
||||
* to ensure we will wait at least the time asked for.
|
||||
*/
|
||||
if (ts->tv_sec || ts->tv_nsec)
|
||||
timeout++;
|
||||
timeout = timespec_to_ktime(*ts);
|
||||
to = &timeout;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2778,7 +2773,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
|
||||
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
sig = dequeue_signal(tsk, &mask, info);
|
||||
if (!sig && timeout) {
|
||||
if (!sig && timeout.tv64) {
|
||||
/*
|
||||
* None ready, temporarily unblock those we're interested
|
||||
* while we are sleeping in so that we'll be awakened when
|
||||
@@ -2790,8 +2785,9 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(&tsk->sighand->siglock);
|
||||
|
||||
timeout = freezable_schedule_timeout_interruptible(timeout);
|
||||
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
|
||||
HRTIMER_MODE_REL);
|
||||
spin_lock_irq(&tsk->sighand->siglock);
|
||||
__set_task_blocked(tsk, &tsk->real_blocked);
|
||||
sigemptyset(&tsk->real_blocked);
|
||||
@@ -2801,7 +2797,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
|
||||
|
||||
if (sig)
|
||||
return sig;
|
||||
return timeout ? -EINTR : -EAGAIN;
|
||||
return ret ? -EINTR : -EAGAIN;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -30,7 +30,6 @@
|
||||
* struct alarm_base - Alarm timer bases
|
||||
* @lock: Lock for syncrhonized access to the base
|
||||
* @timerqueue: Timerqueue head managing the list of events
|
||||
* @timer: hrtimer used to schedule events while running
|
||||
* @gettime: Function to read the time correlating to the base
|
||||
* @base_clockid: clockid for the base
|
||||
*/
|
||||
|
@@ -645,7 +645,7 @@ void tick_cleanup_dead_cpu(int cpu)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct bus_type clockevents_subsys = {
|
||||
static struct bus_type clockevents_subsys = {
|
||||
.name = "clockevents",
|
||||
.dev_name = "clockevent",
|
||||
};
|
||||
|
@@ -669,10 +669,12 @@ static void clocksource_enqueue(struct clocksource *cs)
|
||||
struct list_head *entry = &clocksource_list;
|
||||
struct clocksource *tmp;
|
||||
|
||||
list_for_each_entry(tmp, &clocksource_list, list)
|
||||
list_for_each_entry(tmp, &clocksource_list, list) {
|
||||
/* Keep track of the place, where to insert */
|
||||
if (tmp->rating >= cs->rating)
|
||||
entry = &tmp->list;
|
||||
if (tmp->rating < cs->rating)
|
||||
break;
|
||||
entry = &tmp->list;
|
||||
}
|
||||
list_add(&cs->list, entry);
|
||||
}
|
||||
|
||||
|
@@ -177,7 +177,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
static inline
|
||||
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
|
||||
int pinned)
|
||||
|
@@ -43,13 +43,13 @@ static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
|
||||
int allowed_error_ns = usecs * 5;
|
||||
|
||||
for (i = 0; i < iters; ++i) {
|
||||
struct timespec ts1, ts2;
|
||||
s64 kt1, kt2;
|
||||
int time_passed;
|
||||
|
||||
ktime_get_ts(&ts1);
|
||||
kt1 = ktime_get_ns();
|
||||
udelay(usecs);
|
||||
ktime_get_ts(&ts2);
|
||||
time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1);
|
||||
kt2 = ktime_get_ns();
|
||||
time_passed = kt2 - kt1;
|
||||
|
||||
if (i == 0 || time_passed < min)
|
||||
min = time_passed;
|
||||
@@ -87,11 +87,11 @@ static int udelay_test_show(struct seq_file *s, void *v)
|
||||
if (usecs > 0 && iters > 0) {
|
||||
return udelay_test_single(s, usecs, iters);
|
||||
} else if (usecs == 0) {
|
||||
struct timespec ts;
|
||||
struct timespec64 ts;
|
||||
|
||||
ktime_get_ts(&ts);
|
||||
seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n",
|
||||
loops_per_jiffy, ts.tv_sec, ts.tv_nsec);
|
||||
ktime_get_ts64(&ts);
|
||||
seq_printf(s, "udelay() test (lpj=%ld kt=%lld.%09ld)\n",
|
||||
loops_per_jiffy, (s64)ts.tv_sec, ts.tv_nsec);
|
||||
seq_puts(s, "usage:\n");
|
||||
seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n");
|
||||
seq_puts(s, "cat " DEBUGFS_FILENAME "\n");
|
||||
|
@@ -75,6 +75,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
|
||||
}
|
||||
|
||||
static struct clock_event_device ce_broadcast_hrtimer = {
|
||||
.name = "bc_hrtimer",
|
||||
.set_state_shutdown = bc_shutdown,
|
||||
.set_next_ktime = bc_set_next,
|
||||
.features = CLOCK_EVT_FEAT_ONESHOT |
|
||||
|
@@ -164,3 +164,4 @@ static inline void timers_update_migration(bool update_nohz) { }
|
||||
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
|
||||
|
||||
extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
|
||||
void timer_clear_idle(void);
|
||||
|
@@ -31,7 +31,7 @@
|
||||
#include <trace/events/timer.h>
|
||||
|
||||
/*
|
||||
* Per cpu nohz control structure
|
||||
* Per-CPU nohz control structure
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
|
||||
|
||||
@@ -61,7 +61,7 @@ static void tick_do_update_jiffies64(ktime_t now)
|
||||
if (delta.tv64 < tick_period.tv64)
|
||||
return;
|
||||
|
||||
/* Reevalute with jiffies_lock held */
|
||||
/* Reevaluate with jiffies_lock held */
|
||||
write_seqlock(&jiffies_lock);
|
||||
|
||||
delta = ktime_sub(now, last_jiffies_update);
|
||||
@@ -116,8 +116,8 @@ static void tick_sched_do_timer(ktime_t now)
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
/*
|
||||
* Check if the do_timer duty was dropped. We don't care about
|
||||
* concurrency: This happens only when the cpu in charge went
|
||||
* into a long sleep. If two cpus happen to assign themself to
|
||||
* concurrency: This happens only when the CPU in charge went
|
||||
* into a long sleep. If two CPUs happen to assign themselves to
|
||||
* this duty, then the jiffies update is still serialized by
|
||||
* jiffies_lock.
|
||||
*/
|
||||
@@ -349,7 +349,7 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi
|
||||
/*
|
||||
* Re-evaluate the need for the tick as we switch the current task.
|
||||
* It might need the tick due to per task/process properties:
|
||||
* perf events, posix cpu timers, ...
|
||||
* perf events, posix CPU timers, ...
|
||||
*/
|
||||
void __tick_nohz_task_switch(void)
|
||||
{
|
||||
@@ -509,8 +509,8 @@ int tick_nohz_tick_stopped(void)
|
||||
*
|
||||
* In case the sched_tick was stopped on this CPU, we have to check if jiffies
|
||||
* must be updated. Otherwise an interrupt handler could use a stale jiffy
|
||||
* value. We do this unconditionally on any cpu, as we don't know whether the
|
||||
* cpu, which has the update task assigned is in a long sleep.
|
||||
* value. We do this unconditionally on any CPU, as we don't know whether the
|
||||
* CPU, which has the update task assigned is in a long sleep.
|
||||
*/
|
||||
static void tick_nohz_update_jiffies(ktime_t now)
|
||||
{
|
||||
@@ -526,7 +526,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
|
||||
}
|
||||
|
||||
/*
|
||||
* Updates the per cpu time idle statistics counters
|
||||
* Updates the per-CPU time idle statistics counters
|
||||
*/
|
||||
static void
|
||||
update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
|
||||
@@ -566,12 +566,12 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
|
||||
}
|
||||
|
||||
/**
|
||||
* get_cpu_idle_time_us - get the total idle time of a cpu
|
||||
* get_cpu_idle_time_us - get the total idle time of a CPU
|
||||
* @cpu: CPU number to query
|
||||
* @last_update_time: variable to store update time in. Do not update
|
||||
* counters if NULL.
|
||||
*
|
||||
* Return the cummulative idle time (since boot) for a given
|
||||
* Return the cumulative idle time (since boot) for a given
|
||||
* CPU, in microseconds.
|
||||
*
|
||||
* This time is measured via accounting rather than sampling,
|
||||
@@ -607,12 +607,12 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
|
||||
EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
|
||||
|
||||
/**
|
||||
* get_cpu_iowait_time_us - get the total iowait time of a cpu
|
||||
* get_cpu_iowait_time_us - get the total iowait time of a CPU
|
||||
* @cpu: CPU number to query
|
||||
* @last_update_time: variable to store update time in. Do not update
|
||||
* counters if NULL.
|
||||
*
|
||||
* Return the cummulative iowait time (since boot) for a given
|
||||
* Return the cumulative iowait time (since boot) for a given
|
||||
* CPU, in microseconds.
|
||||
*
|
||||
* This time is measured via accounting rather than sampling,
|
||||
@@ -700,6 +700,12 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
delta = next_tick - basemono;
|
||||
if (delta <= (u64)TICK_NSEC) {
|
||||
tick.tv64 = 0;
|
||||
|
||||
/*
|
||||
* Tell the timer code that the base is not idle, i.e. undo
|
||||
* the effect of get_next_timer_interrupt():
|
||||
*/
|
||||
timer_clear_idle();
|
||||
/*
|
||||
* We've not stopped the tick yet, and there's a timer in the
|
||||
* next period, so no point in stopping it either, bail.
|
||||
@@ -726,14 +732,14 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
}
|
||||
|
||||
/*
|
||||
* If this cpu is the one which updates jiffies, then give up
|
||||
* the assignment and let it be taken by the cpu which runs
|
||||
* the tick timer next, which might be this cpu as well. If we
|
||||
* If this CPU is the one which updates jiffies, then give up
|
||||
* the assignment and let it be taken by the CPU which runs
|
||||
* the tick timer next, which might be this CPU as well. If we
|
||||
* don't drop this here the jiffies might be stale and
|
||||
* do_timer() never invoked. Keep track of the fact that it
|
||||
* was the one which had the do_timer() duty last. If this cpu
|
||||
* was the one which had the do_timer() duty last. If this CPU
|
||||
* is the one which had the do_timer() duty last, we limit the
|
||||
* sleep time to the timekeeping max_deferement value.
|
||||
* sleep time to the timekeeping max_deferment value.
|
||||
* Otherwise we can sleep as long as we want.
|
||||
*/
|
||||
delta = timekeeping_max_deferment();
|
||||
@@ -809,6 +815,12 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
|
||||
tick_do_update_jiffies64(now);
|
||||
cpu_load_update_nohz_stop();
|
||||
|
||||
/*
|
||||
* Clear the timer idle flag, so we avoid IPIs on remote queueing and
|
||||
* the clock forward checks in the enqueue path:
|
||||
*/
|
||||
timer_clear_idle();
|
||||
|
||||
calc_load_exit_idle();
|
||||
touch_softlockup_watchdog_sched();
|
||||
/*
|
||||
@@ -841,9 +853,9 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
|
||||
static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
|
||||
{
|
||||
/*
|
||||
* If this cpu is offline and it is the one which updates
|
||||
* If this CPU is offline and it is the one which updates
|
||||
* jiffies, then give up the assignment and let it be taken by
|
||||
* the cpu which runs the tick timer next. If we don't drop
|
||||
* the CPU which runs the tick timer next. If we don't drop
|
||||
* this here the jiffies might be stale and do_timer() never
|
||||
* invoked.
|
||||
*/
|
||||
@@ -896,11 +908,10 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
|
||||
ktime_t now, expires;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
now = tick_nohz_start_idle(ts);
|
||||
|
||||
if (can_stop_idle_tick(cpu, ts)) {
|
||||
int was_stopped = ts->tick_stopped;
|
||||
|
||||
now = tick_nohz_start_idle(ts);
|
||||
ts->idle_calls++;
|
||||
|
||||
expires = tick_nohz_stop_sched_tick(ts, now, cpu);
|
||||
@@ -933,11 +944,11 @@ void tick_nohz_idle_enter(void)
|
||||
WARN_ON_ONCE(irqs_disabled());
|
||||
|
||||
/*
|
||||
* Update the idle state in the scheduler domain hierarchy
|
||||
* when tick_nohz_stop_sched_tick() is called from the idle loop.
|
||||
* State will be updated to busy during the first busy tick after
|
||||
* exiting idle.
|
||||
*/
|
||||
* Update the idle state in the scheduler domain hierarchy
|
||||
* when tick_nohz_stop_sched_tick() is called from the idle loop.
|
||||
* State will be updated to busy during the first busy tick after
|
||||
* exiting idle.
|
||||
*/
|
||||
set_cpu_sd_state_idle();
|
||||
|
||||
local_irq_disable();
|
||||
@@ -1092,35 +1103,6 @@ static void tick_nohz_switch_to_nohz(void)
|
||||
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
|
||||
}
|
||||
|
||||
/*
|
||||
* When NOHZ is enabled and the tick is stopped, we need to kick the
|
||||
* tick timer from irq_enter() so that the jiffies update is kept
|
||||
* alive during long running softirqs. That's ugly as hell, but
|
||||
* correctness is key even if we need to fix the offending softirq in
|
||||
* the first place.
|
||||
*
|
||||
* Note, this is different to tick_nohz_restart. We just kick the
|
||||
* timer and do not touch the other magic bits which need to be done
|
||||
* when idle is left.
|
||||
*/
|
||||
static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
#if 0
|
||||
/* Switch back to 2.6.27 behaviour */
|
||||
ktime_t delta;
|
||||
|
||||
/*
|
||||
* Do not touch the tick device, when the next expiry is either
|
||||
* already reached or less/equal than the tick period.
|
||||
*/
|
||||
delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
|
||||
if (delta.tv64 <= tick_period.tv64)
|
||||
return;
|
||||
|
||||
tick_nohz_restart(ts, now);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tick_nohz_irq_enter(void)
|
||||
{
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
@@ -1131,10 +1113,8 @@ static inline void tick_nohz_irq_enter(void)
|
||||
now = ktime_get();
|
||||
if (ts->idle_active)
|
||||
tick_nohz_stop_idle(ts, now);
|
||||
if (ts->tick_stopped) {
|
||||
if (ts->tick_stopped)
|
||||
tick_nohz_update_jiffies(now);
|
||||
tick_nohz_kick_tick(ts, now);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
@@ -1211,7 +1191,7 @@ void tick_setup_sched_timer(void)
|
||||
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
ts->sched_timer.function = tick_sched_timer;
|
||||
|
||||
/* Get the next period (per cpu) */
|
||||
/* Get the next period (per-CPU) */
|
||||
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
|
||||
|
||||
/* Offset the tick to avert jiffies_lock contention. */
|
||||
|
@@ -67,20 +67,21 @@ static const unsigned short __mon_yday[2][13] = {
|
||||
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
|
||||
|
||||
/**
|
||||
* time_to_tm - converts the calendar time to local broken-down time
|
||||
* time64_to_tm - converts the calendar time to local broken-down time
|
||||
*
|
||||
* @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
|
||||
* Coordinated Universal Time (UTC).
|
||||
* @offset offset seconds adding to totalsecs.
|
||||
* @result pointer to struct tm variable to receive broken-down time
|
||||
*/
|
||||
void time_to_tm(time_t totalsecs, int offset, struct tm *result)
|
||||
void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
|
||||
{
|
||||
long days, rem, y;
|
||||
int remainder;
|
||||
const unsigned short *ip;
|
||||
|
||||
days = totalsecs / SECS_PER_DAY;
|
||||
rem = totalsecs % SECS_PER_DAY;
|
||||
days = div_s64_rem(totalsecs, SECS_PER_DAY, &remainder);
|
||||
rem = remainder;
|
||||
rem += offset;
|
||||
while (rem < 0) {
|
||||
rem += SECS_PER_DAY;
|
||||
@@ -124,4 +125,4 @@ void time_to_tm(time_t totalsecs, int offset, struct tm *result)
|
||||
result->tm_mon = y;
|
||||
result->tm_mday = days + 1;
|
||||
}
|
||||
EXPORT_SYMBOL(time_to_tm);
|
||||
EXPORT_SYMBOL(time64_to_tm);
|
||||
|
@@ -480,10 +480,12 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
|
||||
* users are removed, this can be killed.
|
||||
*/
|
||||
remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
|
||||
tk->tkr_mono.xtime_nsec -= remainder;
|
||||
tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
|
||||
tk->ntp_error += remainder << tk->ntp_error_shift;
|
||||
tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
|
||||
if (remainder != 0) {
|
||||
tk->tkr_mono.xtime_nsec -= remainder;
|
||||
tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
|
||||
tk->ntp_error += remainder << tk->ntp_error_shift;
|
||||
tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define old_vsyscall_fixup(tk)
|
||||
|
1115
kernel/time/timer.c
1115
kernel/time/timer.c
File diff suppressed because it is too large
Load Diff
@@ -279,7 +279,7 @@ static void print_name_offset(struct seq_file *m, unsigned long addr)
|
||||
|
||||
static int tstats_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct timespec period;
|
||||
struct timespec64 period;
|
||||
struct entry *entry;
|
||||
unsigned long ms;
|
||||
long events = 0;
|
||||
@@ -295,11 +295,11 @@ static int tstats_show(struct seq_file *m, void *v)
|
||||
|
||||
time = ktime_sub(time_stop, time_start);
|
||||
|
||||
period = ktime_to_timespec(time);
|
||||
period = ktime_to_timespec64(time);
|
||||
ms = period.tv_nsec / 1000000;
|
||||
|
||||
seq_puts(m, "Timer Stats Version: v0.3\n");
|
||||
seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
|
||||
seq_printf(m, "Sample period: %ld.%03ld s\n", (long)period.tv_sec, ms);
|
||||
if (atomic_read(&overflow_count))
|
||||
seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
|
||||
seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
|
||||
|
Reference in New Issue
Block a user