Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer and time updates from Thomas Gleixner: "A rather large update of timers, timekeeping & co - Core timekeeping code is year-2038 safe now for 32bit machines. Now we just need to fix all in kernel users and the gazillion of user space interfaces which rely on timespec/timeval :) - Better cache layout for the timekeeping internal data structures. - Proper nanosecond based interfaces for in kernel users. - Tree wide cleanup of code which wants nanoseconds but does hoops and loops to convert back and forth from timespecs. Some of it definitely belongs into the ugly code museum. - Consolidation of the timekeeping interface zoo. - A fast NMI safe accessor to clock monotonic for tracing. This is a long standing request to support correlated user/kernel space traces. With proper NTP frequency correction it's also suitable for correlation of traces accross separate machines. - Checkpoint/restart support for timerfd. - A few NOHZ[_FULL] improvements in the [hr]timer code. - Code move from kernel to kernel/time of all time* related code. - New clocksource/event drivers from the ARM universe. I'm really impressed that despite an architected timer in the newer chips SoC manufacturers insist on inventing new and differently broken SoC specific timers. [ Ed. "Impressed"? I don't think that word means what you think it means ] - Another round of code move from arch to drivers. Looks like most of the legacy mess in ARM regarding timers is sorted out except for a few obnoxious strongholds. - The usual updates and fixlets all over the place" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits) timekeeping: Fixup typo in update_vsyscall_old definition clocksource: document some basic timekeeping concepts timekeeping: Use cached ntp_tick_length when accumulating error timekeeping: Rework frequency adjustments to work better w/ nohz timekeeping: Minor fixup for timespec64->timespec assignment ftrace: Provide trace clocks monotonic timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC seqcount: Add raw_write_seqcount_latch() seqcount: Provide raw_read_seqcount() timekeeping: Use tk_read_base as argument for timekeeping_get_ns() timekeeping: Create struct tk_read_base and use it in struct timekeeper timekeeping: Restructure the timekeeper some more clocksource: Get rid of cycle_last clocksource: Move cycle_last validation to core code clocksource: Make delta calculation a function wireless: ath9k: Get rid of timespec conversions drm: vmwgfx: Use nsec based interfaces drm: i915: Use nsec based interfaces timekeeping: Provide ktime_get_raw() hangcheck-timer: Use ktime_get_ns() ...
This commit is contained in:
@@ -3,12 +3,11 @@
|
||||
#
|
||||
|
||||
obj-y = fork.o exec_domain.o panic.o \
|
||||
cpu.o exit.o itimer.o time.o softirq.o resource.o \
|
||||
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
|
||||
cpu.o exit.o softirq.o resource.o \
|
||||
sysctl.o sysctl_binary.o capability.o ptrace.o user.o \
|
||||
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
|
||||
extable.o params.o posix-timers.o \
|
||||
kthread.o sys_ni.o posix-cpu-timers.o \
|
||||
hrtimer.o nsproxy.o \
|
||||
extable.o params.o \
|
||||
kthread.o sys_ni.o nsproxy.o \
|
||||
notifier.o ksysfs.o cred.o reboot.o \
|
||||
async.o range.o groups.o smpboot.o
|
||||
|
||||
@@ -110,22 +109,6 @@ targets += config_data.h
|
||||
$(obj)/config_data.h: $(obj)/config_data.gz FORCE
|
||||
$(call filechk,ikconfiggz)
|
||||
|
||||
$(obj)/time.o: $(obj)/timeconst.h
|
||||
|
||||
quiet_cmd_hzfile = HZFILE $@
|
||||
cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
|
||||
|
||||
targets += hz.bc
|
||||
$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
|
||||
$(call if_changed,hzfile)
|
||||
|
||||
quiet_cmd_bc = BC $@
|
||||
cmd_bc = bc -q $(filter-out FORCE,$^) > $@
|
||||
|
||||
targets += timeconst.h
|
||||
$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
|
||||
$(call if_changed,bc)
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# Roll all the X.509 certificates that we can find together and pull them into
|
||||
|
@@ -458,9 +458,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
|
||||
acct_t ac;
|
||||
mm_segment_t fs;
|
||||
unsigned long flim;
|
||||
u64 elapsed;
|
||||
u64 run_time;
|
||||
struct timespec uptime;
|
||||
u64 elapsed, run_time;
|
||||
struct tty_struct *tty;
|
||||
const struct cred *orig_cred;
|
||||
|
||||
@@ -484,10 +482,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
|
||||
strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm));
|
||||
|
||||
/* calculate run_time in nsec*/
|
||||
do_posix_clock_monotonic_gettime(&uptime);
|
||||
run_time = (u64)uptime.tv_sec*NSEC_PER_SEC + uptime.tv_nsec;
|
||||
run_time -= (u64)current->group_leader->start_time.tv_sec * NSEC_PER_SEC
|
||||
+ current->group_leader->start_time.tv_nsec;
|
||||
run_time = ktime_get_ns();
|
||||
run_time -= current->group_leader->start_time;
|
||||
/* convert nsec -> AHZ */
|
||||
elapsed = nsec_to_AHZ(run_time);
|
||||
#if ACCT_VERSION==3
|
||||
|
@@ -2472,7 +2472,7 @@ static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
|
||||
static void kdb_sysinfo(struct sysinfo *val)
|
||||
{
|
||||
struct timespec uptime;
|
||||
do_posix_clock_monotonic_gettime(&uptime);
|
||||
ktime_get_ts(&uptime);
|
||||
memset(val, 0, sizeof(*val));
|
||||
val->uptime = uptime.tv_sec;
|
||||
val->loads[0] = avenrun[0];
|
||||
|
@@ -46,42 +46,25 @@ void __delayacct_tsk_init(struct task_struct *tsk)
|
||||
}
|
||||
|
||||
/*
|
||||
* Start accounting for a delay statistic using
|
||||
* its starting timestamp (@start)
|
||||
* Finish delay accounting for a statistic using its timestamps (@start),
|
||||
* accumalator (@total) and @count
|
||||
*/
|
||||
|
||||
static inline void delayacct_start(struct timespec *start)
|
||||
static void delayacct_end(u64 *start, u64 *total, u32 *count)
|
||||
{
|
||||
do_posix_clock_monotonic_gettime(start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish delay accounting for a statistic using
|
||||
* its timestamps (@start, @end), accumalator (@total) and @count
|
||||
*/
|
||||
|
||||
static void delayacct_end(struct timespec *start, struct timespec *end,
|
||||
u64 *total, u32 *count)
|
||||
{
|
||||
struct timespec ts;
|
||||
s64 ns;
|
||||
s64 ns = ktime_get_ns() - *start;
|
||||
unsigned long flags;
|
||||
|
||||
do_posix_clock_monotonic_gettime(end);
|
||||
ts = timespec_sub(*end, *start);
|
||||
ns = timespec_to_ns(&ts);
|
||||
if (ns < 0)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(¤t->delays->lock, flags);
|
||||
*total += ns;
|
||||
(*count)++;
|
||||
spin_unlock_irqrestore(¤t->delays->lock, flags);
|
||||
if (ns > 0) {
|
||||
spin_lock_irqsave(¤t->delays->lock, flags);
|
||||
*total += ns;
|
||||
(*count)++;
|
||||
spin_unlock_irqrestore(¤t->delays->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void __delayacct_blkio_start(void)
|
||||
{
|
||||
delayacct_start(¤t->delays->blkio_start);
|
||||
current->delays->blkio_start = ktime_get_ns();
|
||||
}
|
||||
|
||||
void __delayacct_blkio_end(void)
|
||||
@@ -89,35 +72,29 @@ void __delayacct_blkio_end(void)
|
||||
if (current->delays->flags & DELAYACCT_PF_SWAPIN)
|
||||
/* Swapin block I/O */
|
||||
delayacct_end(¤t->delays->blkio_start,
|
||||
¤t->delays->blkio_end,
|
||||
¤t->delays->swapin_delay,
|
||||
¤t->delays->swapin_count);
|
||||
else /* Other block I/O */
|
||||
delayacct_end(¤t->delays->blkio_start,
|
||||
¤t->delays->blkio_end,
|
||||
¤t->delays->blkio_delay,
|
||||
¤t->delays->blkio_count);
|
||||
}
|
||||
|
||||
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
||||
{
|
||||
s64 tmp;
|
||||
unsigned long t1;
|
||||
unsigned long long t2, t3;
|
||||
unsigned long flags;
|
||||
struct timespec ts;
|
||||
cputime_t utime, stime, stimescaled, utimescaled;
|
||||
unsigned long long t2, t3;
|
||||
unsigned long flags, t1;
|
||||
s64 tmp;
|
||||
|
||||
tmp = (s64)d->cpu_run_real_total;
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
cputime_to_timespec(utime + stime, &ts);
|
||||
tmp += timespec_to_ns(&ts);
|
||||
tmp = (s64)d->cpu_run_real_total;
|
||||
tmp += cputime_to_nsecs(utime + stime);
|
||||
d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
|
||||
|
||||
tmp = (s64)d->cpu_scaled_run_real_total;
|
||||
task_cputime_scaled(tsk, &utimescaled, &stimescaled);
|
||||
cputime_to_timespec(utimescaled + stimescaled, &ts);
|
||||
tmp += timespec_to_ns(&ts);
|
||||
tmp = (s64)d->cpu_scaled_run_real_total;
|
||||
tmp += cputime_to_nsecs(utimescaled + stimescaled);
|
||||
d->cpu_scaled_run_real_total =
|
||||
(tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
|
||||
|
||||
@@ -169,13 +146,12 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
|
||||
|
||||
void __delayacct_freepages_start(void)
|
||||
{
|
||||
delayacct_start(¤t->delays->freepages_start);
|
||||
current->delays->freepages_start = ktime_get_ns();
|
||||
}
|
||||
|
||||
void __delayacct_freepages_end(void)
|
||||
{
|
||||
delayacct_end(¤t->delays->freepages_start,
|
||||
¤t->delays->freepages_end,
|
||||
¤t->delays->freepages_delay,
|
||||
¤t->delays->freepages_count);
|
||||
}
|
||||
|
@@ -1261,9 +1261,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
|
||||
posix_cpu_timers_init(p);
|
||||
|
||||
do_posix_clock_monotonic_gettime(&p->start_time);
|
||||
p->real_start_time = p->start_time;
|
||||
monotonic_to_bootbased(&p->real_start_time);
|
||||
p->start_time = ktime_get_ns();
|
||||
p->real_start_time = ktime_get_boot_ns();
|
||||
p->io_context = NULL;
|
||||
p->audit_context = NULL;
|
||||
if (clone_flags & CLONE_THREAD)
|
||||
|
@@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG
|
||||
config ARCH_CLOCKSOURCE_DATA
|
||||
bool
|
||||
|
||||
# Clocksources require validation of the clocksource against the last
|
||||
# cycle update - x86/TSC misfeature
|
||||
config CLOCKSOURCE_VALIDATE_LAST_CYCLE
|
||||
bool
|
||||
|
||||
# Timekeeping vsyscall support
|
||||
config GENERIC_TIME_VSYSCALL
|
||||
bool
|
||||
@@ -20,10 +25,6 @@ config GENERIC_TIME_VSYSCALL
|
||||
config GENERIC_TIME_VSYSCALL_OLD
|
||||
bool
|
||||
|
||||
# ktime_t scalar 64bit nsec representation
|
||||
config KTIME_SCALAR
|
||||
bool
|
||||
|
||||
# Old style timekeeping
|
||||
config ARCH_USES_GETTIMEOFFSET
|
||||
bool
|
||||
|
@@ -1,3 +1,4 @@
|
||||
obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o
|
||||
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
|
||||
obj-y += timeconv.o posix-clock.o alarmtimer.o
|
||||
|
||||
@@ -12,3 +13,21 @@ obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o
|
||||
obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o
|
||||
obj-$(CONFIG_TIMER_STATS) += timer_stats.o
|
||||
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
|
||||
obj-$(CONFIG_TEST_UDELAY) += udelay_test.o
|
||||
|
||||
$(obj)/time.o: $(obj)/timeconst.h
|
||||
|
||||
quiet_cmd_hzfile = HZFILE $@
|
||||
cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
|
||||
|
||||
targets += hz.bc
|
||||
$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
|
||||
$(call if_changed,hzfile)
|
||||
|
||||
quiet_cmd_bc = BC $@
|
||||
cmd_bc = bc -q $(filter-out FORCE,$^) > $@
|
||||
|
||||
targets += timeconst.h
|
||||
$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
|
||||
$(call if_changed,bc)
|
||||
|
||||
|
@@ -32,6 +32,7 @@
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "tick-internal.h"
|
||||
#include "timekeeping_internal.h"
|
||||
|
||||
void timecounter_init(struct timecounter *tc,
|
||||
const struct cyclecounter *cc,
|
||||
@@ -249,7 +250,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
|
||||
static void clocksource_watchdog(unsigned long data)
|
||||
{
|
||||
struct clocksource *cs;
|
||||
cycle_t csnow, wdnow;
|
||||
cycle_t csnow, wdnow, delta;
|
||||
int64_t wd_nsec, cs_nsec;
|
||||
int next_cpu, reset_pending;
|
||||
|
||||
@@ -282,11 +283,12 @@ static void clocksource_watchdog(unsigned long data)
|
||||
continue;
|
||||
}
|
||||
|
||||
wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
|
||||
watchdog->mult, watchdog->shift);
|
||||
delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
|
||||
wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
|
||||
watchdog->shift);
|
||||
|
||||
cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
|
||||
cs->mask, cs->mult, cs->shift);
|
||||
delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
|
||||
cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
|
||||
cs->cs_last = csnow;
|
||||
cs->wd_last = wdnow;
|
||||
|
||||
|
@@ -54,6 +54,8 @@
|
||||
|
||||
#include <trace/events/timer.h>
|
||||
|
||||
#include "timekeeping.h"
|
||||
|
||||
/*
|
||||
* The timer bases:
|
||||
*
|
||||
@@ -114,21 +116,18 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
|
||||
*/
|
||||
static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
|
||||
{
|
||||
ktime_t xtim, mono, boot;
|
||||
struct timespec xts, tom, slp;
|
||||
s32 tai_offset;
|
||||
ktime_t xtim, mono, boot, tai;
|
||||
ktime_t off_real, off_boot, off_tai;
|
||||
|
||||
get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
|
||||
tai_offset = timekeeping_get_tai_offset();
|
||||
mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
|
||||
boot = ktime_add(mono, off_boot);
|
||||
xtim = ktime_add(mono, off_real);
|
||||
tai = ktime_add(xtim, off_tai);
|
||||
|
||||
xtim = timespec_to_ktime(xts);
|
||||
mono = ktime_add(xtim, timespec_to_ktime(tom));
|
||||
boot = ktime_add(mono, timespec_to_ktime(slp));
|
||||
base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
|
||||
base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
|
||||
base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
|
||||
base->clock_base[HRTIMER_BASE_TAI].softirq_time =
|
||||
ktime_add(xtim, ktime_set(tai_offset, 0));
|
||||
base->clock_base[HRTIMER_BASE_TAI].softirq_time = tai;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -264,60 +263,6 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
||||
* too large for inlining:
|
||||
*/
|
||||
#if BITS_PER_LONG < 64
|
||||
# ifndef CONFIG_KTIME_SCALAR
|
||||
/**
|
||||
* ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
|
||||
* @kt: addend
|
||||
* @nsec: the scalar nsec value to add
|
||||
*
|
||||
* Returns the sum of kt and nsec in ktime_t format
|
||||
*/
|
||||
ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
|
||||
{
|
||||
ktime_t tmp;
|
||||
|
||||
if (likely(nsec < NSEC_PER_SEC)) {
|
||||
tmp.tv64 = nsec;
|
||||
} else {
|
||||
unsigned long rem = do_div(nsec, NSEC_PER_SEC);
|
||||
|
||||
/* Make sure nsec fits into long */
|
||||
if (unlikely(nsec > KTIME_SEC_MAX))
|
||||
return (ktime_t){ .tv64 = KTIME_MAX };
|
||||
|
||||
tmp = ktime_set((long)nsec, rem);
|
||||
}
|
||||
|
||||
return ktime_add(kt, tmp);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(ktime_add_ns);
|
||||
|
||||
/**
|
||||
* ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
|
||||
* @kt: minuend
|
||||
* @nsec: the scalar nsec value to subtract
|
||||
*
|
||||
* Returns the subtraction of @nsec from @kt in ktime_t format
|
||||
*/
|
||||
ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
|
||||
{
|
||||
ktime_t tmp;
|
||||
|
||||
if (likely(nsec < NSEC_PER_SEC)) {
|
||||
tmp.tv64 = nsec;
|
||||
} else {
|
||||
unsigned long rem = do_div(nsec, NSEC_PER_SEC);
|
||||
|
||||
tmp = ktime_set((long)nsec, rem);
|
||||
}
|
||||
|
||||
return ktime_sub(kt, tmp);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(ktime_sub_ns);
|
||||
# endif /* !CONFIG_KTIME_SCALAR */
|
||||
|
||||
/*
|
||||
* Divide a ktime value by a nanosecond value
|
||||
*/
|
||||
@@ -337,6 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div)
|
||||
|
||||
return dclc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_divns);
|
||||
#endif /* BITS_PER_LONG >= 64 */
|
||||
|
||||
/*
|
||||
@@ -602,6 +548,11 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
|
||||
* timers, we have to check, whether it expires earlier than the timer for
|
||||
* which the clock event device was armed.
|
||||
*
|
||||
* Note, that in case the state has HRTIMER_STATE_CALLBACK set, no reprogramming
|
||||
* and no expiry check happens. The timer gets enqueued into the rbtree. The
|
||||
* reprogramming and expiry check is done in the hrtimer_interrupt or in the
|
||||
* softirq.
|
||||
*
|
||||
* Called with interrupts disabled and base->cpu_base.lock held
|
||||
*/
|
||||
static int hrtimer_reprogram(struct hrtimer *timer,
|
||||
@@ -662,25 +613,13 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
|
||||
base->hres_active = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* When High resolution timers are active, try to reprogram. Note, that in case
|
||||
* the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
|
||||
* check happens. The timer gets enqueued into the rbtree. The reprogramming
|
||||
* and expiry check is done in the hrtimer_interrupt or in the softirq.
|
||||
*/
|
||||
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
||||
struct hrtimer_clock_base *base)
|
||||
{
|
||||
return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
|
||||
}
|
||||
|
||||
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
|
||||
{
|
||||
ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
|
||||
ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
|
||||
ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
|
||||
|
||||
return ktime_get_update_offsets(offs_real, offs_boot, offs_tai);
|
||||
return ktime_get_update_offsets_now(offs_real, offs_boot, offs_tai);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -755,8 +694,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; }
|
||||
static inline int hrtimer_switch_to_hres(void) { return 0; }
|
||||
static inline void
|
||||
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
|
||||
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
||||
struct hrtimer_clock_base *base)
|
||||
static inline int hrtimer_reprogram(struct hrtimer *timer,
|
||||
struct hrtimer_clock_base *base)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@@ -1013,14 +952,25 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
|
||||
leftmost = enqueue_hrtimer(timer, new_base);
|
||||
|
||||
/*
|
||||
* Only allow reprogramming if the new base is on this CPU.
|
||||
* (it might still be on another CPU if the timer was pending)
|
||||
*
|
||||
* XXX send_remote_softirq() ?
|
||||
*/
|
||||
if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
|
||||
&& hrtimer_enqueue_reprogram(timer, new_base)) {
|
||||
if (!leftmost) {
|
||||
unlock_hrtimer_base(timer, &flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!hrtimer_is_hres_active(timer)) {
|
||||
/*
|
||||
* Kick to reschedule the next tick to handle the new timer
|
||||
* on dynticks target.
|
||||
*/
|
||||
wake_up_nohz_cpu(new_base->cpu_base->cpu);
|
||||
} else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) &&
|
||||
hrtimer_reprogram(timer, new_base)) {
|
||||
/*
|
||||
* Only allow reprogramming if the new base is on this CPU.
|
||||
* (it might still be on another CPU if the timer was pending)
|
||||
*
|
||||
* XXX send_remote_softirq() ?
|
||||
*/
|
||||
if (wakeup) {
|
||||
/*
|
||||
* We need to drop cpu_base->lock to avoid a
|
||||
@@ -1680,6 +1630,7 @@ static void init_hrtimers_cpu(int cpu)
|
||||
timerqueue_init_head(&cpu_base->clock_base[i].active);
|
||||
}
|
||||
|
||||
cpu_base->cpu = cpu;
|
||||
hrtimer_init_hres(cpu_base);
|
||||
}
|
||||
|
@@ -466,7 +466,8 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
|
||||
|
||||
static void sync_cmos_clock(struct work_struct *work)
|
||||
{
|
||||
struct timespec now, next;
|
||||
struct timespec64 now;
|
||||
struct timespec next;
|
||||
int fail = 1;
|
||||
|
||||
/*
|
||||
@@ -485,9 +486,9 @@ static void sync_cmos_clock(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
getnstimeofday(&now);
|
||||
getnstimeofday64(&now);
|
||||
if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
|
||||
struct timespec adjust = now;
|
||||
struct timespec adjust = timespec64_to_timespec(now);
|
||||
|
||||
fail = -ENODEV;
|
||||
if (persistent_clock_is_local)
|
||||
@@ -531,7 +532,7 @@ void ntp_notify_cmos_timer(void) { }
|
||||
/*
|
||||
* Propagate a new txc->status value into the NTP state:
|
||||
*/
|
||||
static inline void process_adj_status(struct timex *txc, struct timespec *ts)
|
||||
static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
|
||||
{
|
||||
if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
|
||||
time_state = TIME_OK;
|
||||
@@ -554,7 +555,7 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
|
||||
|
||||
|
||||
static inline void process_adjtimex_modes(struct timex *txc,
|
||||
struct timespec *ts,
|
||||
struct timespec64 *ts,
|
||||
s32 *time_tai)
|
||||
{
|
||||
if (txc->modes & ADJ_STATUS)
|
||||
@@ -640,7 +641,7 @@ int ntp_validate_timex(struct timex *txc)
|
||||
* adjtimex mainly allows reading (and writing, if superuser) of
|
||||
* kernel time-keeping variables. used by xntpd.
|
||||
*/
|
||||
int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
|
||||
int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
|
||||
{
|
||||
int result;
|
||||
|
||||
@@ -684,7 +685,7 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
|
||||
/* fill PPS status fields */
|
||||
pps_fill_timex(txc);
|
||||
|
||||
txc->time.tv_sec = ts->tv_sec;
|
||||
txc->time.tv_sec = (time_t)ts->tv_sec;
|
||||
txc->time.tv_usec = ts->tv_nsec;
|
||||
if (!(time_status & STA_NANO))
|
||||
txc->time.tv_usec /= NSEC_PER_USEC;
|
||||
|
@@ -7,6 +7,6 @@ extern void ntp_clear(void);
|
||||
extern u64 ntp_tick_length(void);
|
||||
extern int second_overflow(unsigned long secs);
|
||||
extern int ntp_validate_timex(struct timex *);
|
||||
extern int __do_adjtimex(struct timex *, struct timespec *, s32 *);
|
||||
extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
|
||||
extern void __hardpps(const struct timespec *, const struct timespec *);
|
||||
#endif /* _LINUX_NTP_INTERNAL_H */
|
||||
|
@@ -49,6 +49,8 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#include "timekeeping.h"
|
||||
|
||||
/*
|
||||
* Management arrays for POSIX timers. Timers are now kept in static hash table
|
||||
* with 512 entries.
|
@@ -4,6 +4,8 @@
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#include "timekeeping.h"
|
||||
|
||||
extern seqlock_t jiffies_lock;
|
||||
|
||||
#define CS_NAME_LEN 32
|
||||
|
@@ -42,6 +42,7 @@
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#include "timeconst.h"
|
||||
#include "timekeeping.h"
|
||||
|
||||
/*
|
||||
* The timezone where the local system is located. Used as a default by some
|
||||
@@ -420,6 +421,68 @@ struct timeval ns_to_timeval(const s64 nsec)
|
||||
}
|
||||
EXPORT_SYMBOL(ns_to_timeval);
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
/**
|
||||
* set_normalized_timespec - set timespec sec and nsec parts and normalize
|
||||
*
|
||||
* @ts: pointer to timespec variable to be set
|
||||
* @sec: seconds to set
|
||||
* @nsec: nanoseconds to set
|
||||
*
|
||||
* Set seconds and nanoseconds field of a timespec variable and
|
||||
* normalize to the timespec storage format
|
||||
*
|
||||
* Note: The tv_nsec part is always in the range of
|
||||
* 0 <= tv_nsec < NSEC_PER_SEC
|
||||
* For negative values only the tv_sec field is negative !
|
||||
*/
|
||||
void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
|
||||
{
|
||||
while (nsec >= NSEC_PER_SEC) {
|
||||
/*
|
||||
* The following asm() prevents the compiler from
|
||||
* optimising this loop into a modulo operation. See
|
||||
* also __iter_div_u64_rem() in include/linux/time.h
|
||||
*/
|
||||
asm("" : "+rm"(nsec));
|
||||
nsec -= NSEC_PER_SEC;
|
||||
++sec;
|
||||
}
|
||||
while (nsec < 0) {
|
||||
asm("" : "+rm"(nsec));
|
||||
nsec += NSEC_PER_SEC;
|
||||
--sec;
|
||||
}
|
||||
ts->tv_sec = sec;
|
||||
ts->tv_nsec = nsec;
|
||||
}
|
||||
EXPORT_SYMBOL(set_normalized_timespec64);
|
||||
|
||||
/**
|
||||
* ns_to_timespec64 - Convert nanoseconds to timespec64
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
*
|
||||
* Returns the timespec64 representation of the nsec parameter.
|
||||
*/
|
||||
struct timespec64 ns_to_timespec64(const s64 nsec)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
s32 rem;
|
||||
|
||||
if (!nsec)
|
||||
return (struct timespec64) {0, 0};
|
||||
|
||||
ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
|
||||
if (unlikely(rem < 0)) {
|
||||
ts.tv_sec--;
|
||||
rem += NSEC_PER_SEC;
|
||||
}
|
||||
ts.tv_nsec = rem;
|
||||
|
||||
return ts;
|
||||
}
|
||||
EXPORT_SYMBOL(ns_to_timespec64);
|
||||
#endif
|
||||
/*
|
||||
* When we convert to jiffies then we interpret incoming values
|
||||
* the following way:
|
||||
@@ -694,6 +757,7 @@ unsigned long nsecs_to_jiffies(u64 n)
|
||||
{
|
||||
return (unsigned long)nsecs_to_jiffies64(n);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
|
||||
|
||||
/*
|
||||
* Add two timespec values and do a safety check for overflow.
|
File diff suppressed because it is too large
Load Diff
20
kernel/time/timekeeping.h
Normal file
20
kernel/time/timekeeping.h
Normal file
@@ -0,0 +1,20 @@
|
||||
#ifndef _KERNEL_TIME_TIMEKEEPING_H
|
||||
#define _KERNEL_TIME_TIMEKEEPING_H
|
||||
/*
|
||||
* Internal interfaces for kernel/time/
|
||||
*/
|
||||
extern ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real,
|
||||
ktime_t *offs_boot,
|
||||
ktime_t *offs_tai);
|
||||
extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real,
|
||||
ktime_t *offs_boot,
|
||||
ktime_t *offs_tai);
|
||||
|
||||
extern int timekeeping_valid_for_hres(void);
|
||||
extern u64 timekeeping_max_deferment(void);
|
||||
extern int timekeeping_inject_offset(struct timespec *ts);
|
||||
extern s32 timekeeping_get_tai_offset(void);
|
||||
extern void timekeeping_set_tai_offset(s32 tai_offset);
|
||||
extern void timekeeping_clocktai(struct timespec *ts);
|
||||
|
||||
#endif
|
@@ -67,7 +67,7 @@ static int __init tk_debug_sleep_time_init(void)
|
||||
}
|
||||
late_initcall(tk_debug_sleep_time_init);
|
||||
|
||||
void tk_debug_account_sleep_time(struct timespec *t)
|
||||
void tk_debug_account_sleep_time(struct timespec64 *t)
|
||||
{
|
||||
sleep_time_bin[fls(t->tv_sec)]++;
|
||||
}
|
||||
|
@@ -3,12 +3,27 @@
|
||||
/*
|
||||
* timekeeping debug functions
|
||||
*/
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
extern void tk_debug_account_sleep_time(struct timespec *t);
|
||||
extern void tk_debug_account_sleep_time(struct timespec64 *t);
|
||||
#else
|
||||
#define tk_debug_account_sleep_time(x)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE
|
||||
static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
|
||||
{
|
||||
cycle_t ret = (now - last) & mask;
|
||||
|
||||
return (s64) ret > 0 ? ret : 0;
|
||||
}
|
||||
#else
|
||||
static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask)
|
||||
{
|
||||
return (now - last) & mask;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _TIMEKEEPING_INTERNAL_H */
|
||||
|
@@ -82,6 +82,7 @@ struct tvec_base {
|
||||
unsigned long next_timer;
|
||||
unsigned long active_timers;
|
||||
unsigned long all_timers;
|
||||
int cpu;
|
||||
struct tvec_root tv1;
|
||||
struct tvec tv2;
|
||||
struct tvec tv3;
|
||||
@@ -409,6 +410,22 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
|
||||
base->next_timer = timer->expires;
|
||||
}
|
||||
base->all_timers++;
|
||||
|
||||
/*
|
||||
* Check whether the other CPU is in dynticks mode and needs
|
||||
* to be triggered to reevaluate the timer wheel.
|
||||
* We are protected against the other CPU fiddling
|
||||
* with the timer by holding the timer base lock. This also
|
||||
* makes sure that a CPU on the way to stop its tick can not
|
||||
* evaluate the timer wheel.
|
||||
*
|
||||
* Spare the IPI for deferrable timers on idle targets though.
|
||||
* The next busy ticks will take care of it. Except full dynticks
|
||||
* require special care against races with idle_cpu(), lets deal
|
||||
* with that later.
|
||||
*/
|
||||
if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
|
||||
wake_up_nohz_cpu(base->cpu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMER_STATS
|
||||
@@ -948,22 +965,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
|
||||
timer_set_base(timer, base);
|
||||
debug_activate(timer, timer->expires);
|
||||
internal_add_timer(base, timer);
|
||||
/*
|
||||
* Check whether the other CPU is in dynticks mode and needs
|
||||
* to be triggered to reevaluate the timer wheel.
|
||||
* We are protected against the other CPU fiddling
|
||||
* with the timer by holding the timer base lock. This also
|
||||
* makes sure that a CPU on the way to stop its tick can not
|
||||
* evaluate the timer wheel.
|
||||
*
|
||||
* Spare the IPI for deferrable timers on idle targets though.
|
||||
* The next busy ticks will take care of it. Except full dynticks
|
||||
* require special care against races with idle_cpu(), lets deal
|
||||
* with that later.
|
||||
*/
|
||||
if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu))
|
||||
wake_up_nohz_cpu(cpu);
|
||||
|
||||
spin_unlock_irqrestore(&base->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(add_timer_on);
|
||||
@@ -1568,6 +1569,7 @@ static int init_timers_cpu(int cpu)
|
||||
}
|
||||
spin_lock_init(&base->lock);
|
||||
tvec_base_done[cpu] = 1;
|
||||
base->cpu = cpu;
|
||||
} else {
|
||||
base = per_cpu(tvec_bases, cpu);
|
||||
}
|
168
kernel/time/udelay_test.c
Normal file
168
kernel/time/udelay_test.c
Normal file
@@ -0,0 +1,168 @@
|
||||
/*
|
||||
* udelay() test kernel module
|
||||
*
|
||||
* Test is executed by writing and reading to /sys/kernel/debug/udelay_test
|
||||
* Tests are configured by writing: USECS ITERATIONS
|
||||
* Tests are executed by reading from the same file.
|
||||
* Specifying usecs of 0 or negative values will run multiples tests.
|
||||
*
|
||||
* Copyright (C) 2014 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#define DEFAULT_ITERATIONS 100
|
||||
|
||||
#define DEBUGFS_FILENAME "udelay_test"
|
||||
|
||||
static DEFINE_MUTEX(udelay_test_lock);
|
||||
static struct dentry *udelay_test_debugfs_file;
|
||||
static int udelay_test_usecs;
|
||||
static int udelay_test_iterations = DEFAULT_ITERATIONS;
|
||||
|
||||
static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
|
||||
{
|
||||
int min = 0, max = 0, fail_count = 0;
|
||||
uint64_t sum = 0;
|
||||
uint64_t avg;
|
||||
int i;
|
||||
/* Allow udelay to be up to 0.5% fast */
|
||||
int allowed_error_ns = usecs * 5;
|
||||
|
||||
for (i = 0; i < iters; ++i) {
|
||||
struct timespec ts1, ts2;
|
||||
int time_passed;
|
||||
|
||||
ktime_get_ts(&ts1);
|
||||
udelay(usecs);
|
||||
ktime_get_ts(&ts2);
|
||||
time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1);
|
||||
|
||||
if (i == 0 || time_passed < min)
|
||||
min = time_passed;
|
||||
if (i == 0 || time_passed > max)
|
||||
max = time_passed;
|
||||
if ((time_passed + allowed_error_ns) / 1000 < usecs)
|
||||
++fail_count;
|
||||
WARN_ON(time_passed < 0);
|
||||
sum += time_passed;
|
||||
}
|
||||
|
||||
avg = sum;
|
||||
do_div(avg, iters);
|
||||
seq_printf(s, "%d usecs x %d: exp=%d allowed=%d min=%d avg=%lld max=%d",
|
||||
usecs, iters, usecs * 1000,
|
||||
(usecs * 1000) - allowed_error_ns, min, avg, max);
|
||||
if (fail_count)
|
||||
seq_printf(s, " FAIL=%d", fail_count);
|
||||
seq_puts(s, "\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int udelay_test_show(struct seq_file *s, void *v)
|
||||
{
|
||||
int usecs;
|
||||
int iters;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&udelay_test_lock);
|
||||
usecs = udelay_test_usecs;
|
||||
iters = udelay_test_iterations;
|
||||
mutex_unlock(&udelay_test_lock);
|
||||
|
||||
if (usecs > 0 && iters > 0) {
|
||||
return udelay_test_single(s, usecs, iters);
|
||||
} else if (usecs == 0) {
|
||||
struct timespec ts;
|
||||
|
||||
ktime_get_ts(&ts);
|
||||
seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n",
|
||||
loops_per_jiffy, ts.tv_sec, ts.tv_nsec);
|
||||
seq_puts(s, "usage:\n");
|
||||
seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n");
|
||||
seq_puts(s, "cat " DEBUGFS_FILENAME "\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int udelay_test_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, udelay_test_show, inode->i_private);
|
||||
}
|
||||
|
||||
static ssize_t udelay_test_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
char lbuf[32];
|
||||
int ret;
|
||||
int usecs;
|
||||
int iters;
|
||||
|
||||
if (count >= sizeof(lbuf))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(lbuf, buf, count))
|
||||
return -EFAULT;
|
||||
lbuf[count] = '\0';
|
||||
|
||||
ret = sscanf(lbuf, "%d %d", &usecs, &iters);
|
||||
if (ret < 1)
|
||||
return -EINVAL;
|
||||
else if (ret < 2)
|
||||
iters = DEFAULT_ITERATIONS;
|
||||
|
||||
mutex_lock(&udelay_test_lock);
|
||||
udelay_test_usecs = usecs;
|
||||
udelay_test_iterations = iters;
|
||||
mutex_unlock(&udelay_test_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations udelay_test_debugfs_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = udelay_test_open,
|
||||
.read = seq_read,
|
||||
.write = udelay_test_write,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int __init udelay_test_init(void)
|
||||
{
|
||||
mutex_lock(&udelay_test_lock);
|
||||
udelay_test_debugfs_file = debugfs_create_file(DEBUGFS_FILENAME,
|
||||
S_IRUSR, NULL, NULL, &udelay_test_debugfs_ops);
|
||||
mutex_unlock(&udelay_test_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(udelay_test_init);
|
||||
|
||||
static void __exit udelay_test_exit(void)
|
||||
{
|
||||
mutex_lock(&udelay_test_lock);
|
||||
debugfs_remove(udelay_test_debugfs_file);
|
||||
mutex_unlock(&udelay_test_lock);
|
||||
}
|
||||
|
||||
module_exit(udelay_test_exit);
|
||||
|
||||
MODULE_AUTHOR("David Riley <davidriley@chromium.org>");
|
||||
MODULE_LICENSE("GPL");
|
@@ -820,11 +820,12 @@ static struct {
|
||||
const char *name;
|
||||
int in_ns; /* is this clock in nanoseconds? */
|
||||
} trace_clocks[] = {
|
||||
{ trace_clock_local, "local", 1 },
|
||||
{ trace_clock_global, "global", 1 },
|
||||
{ trace_clock_counter, "counter", 0 },
|
||||
{ trace_clock_jiffies, "uptime", 0 },
|
||||
{ trace_clock, "perf", 1 },
|
||||
{ trace_clock_local, "local", 1 },
|
||||
{ trace_clock_global, "global", 1 },
|
||||
{ trace_clock_counter, "counter", 0 },
|
||||
{ trace_clock_jiffies, "uptime", 0 },
|
||||
{ trace_clock, "perf", 1 },
|
||||
{ ktime_get_mono_fast_ns, "mono", 1 },
|
||||
ARCH_TRACE_CLOCKS
|
||||
};
|
||||
|
||||
|
@@ -31,20 +31,19 @@ void bacct_add_tsk(struct user_namespace *user_ns,
|
||||
struct taskstats *stats, struct task_struct *tsk)
|
||||
{
|
||||
const struct cred *tcred;
|
||||
struct timespec uptime, ts;
|
||||
cputime_t utime, stime, utimescaled, stimescaled;
|
||||
u64 ac_etime;
|
||||
u64 delta;
|
||||
|
||||
BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
|
||||
|
||||
/* calculate task elapsed time in timespec */
|
||||
do_posix_clock_monotonic_gettime(&uptime);
|
||||
ts = timespec_sub(uptime, tsk->start_time);
|
||||
/* rebase elapsed time to usec (should never be negative) */
|
||||
ac_etime = timespec_to_ns(&ts);
|
||||
do_div(ac_etime, NSEC_PER_USEC);
|
||||
stats->ac_etime = ac_etime;
|
||||
stats->ac_btime = get_seconds() - ts.tv_sec;
|
||||
/* calculate task elapsed time in nsec */
|
||||
delta = ktime_get_ns() - tsk->start_time;
|
||||
/* Convert to micro seconds */
|
||||
do_div(delta, NSEC_PER_USEC);
|
||||
stats->ac_etime = delta;
|
||||
/* Convert to seconds for btime */
|
||||
do_div(delta, USEC_PER_SEC);
|
||||
stats->ac_btime = get_seconds() - delta;
|
||||
if (thread_group_leader(tsk)) {
|
||||
stats->ac_exitcode = tsk->exit_code;
|
||||
if (tsk->flags & PF_FORKNOEXEC)
|
||||
|
Reference in New Issue
Block a user