Merge tag 'v5.4-rc7' into sched/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2019-11-11 08:34:59 +01:00
1726 changed files with 20531 additions and 12573 deletions

View File

@@ -164,7 +164,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
struct hrtimer_clock_base *base;
for (;;) {
base = timer->base;
base = READ_ONCE(timer->base);
if (likely(base != &migration_base)) {
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
if (likely(base == timer->base))
@@ -244,7 +244,7 @@ again:
return base;
/* See the comment in lock_hrtimer_base() */
timer->base = &migration_base;
WRITE_ONCE(timer->base, &migration_base);
raw_spin_unlock(&base->cpu_base->lock);
raw_spin_lock(&new_base->cpu_base->lock);
@@ -253,10 +253,10 @@ again:
raw_spin_unlock(&new_base->cpu_base->lock);
raw_spin_lock(&base->cpu_base->lock);
new_cpu_base = this_cpu_base;
timer->base = base;
WRITE_ONCE(timer->base, base);
goto again;
}
timer->base = new_base;
WRITE_ONCE(timer->base, new_base);
} else {
if (new_cpu_base != this_cpu_base &&
hrtimer_check_target(timer, new_base)) {

View File

@@ -266,7 +266,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
/**
* thread_group_sample_cputime - Sample cputime for a given task
* @tsk: Task for which cputime needs to be started
* @iimes: Storage for time samples
* @samples: Storage for time samples
*
* Called from sys_getitimer() to calculate the expiry time of an active
* timer. That means group cputime accounting is already active. Called
@@ -1038,12 +1038,12 @@ unlock:
* member of @pct->bases[CLK].nextevt. False otherwise
*/
static inline bool
task_cputimers_expired(const u64 *sample, struct posix_cputimers *pct)
task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
{
int i;
for (i = 0; i < CPUCLOCK_MAX; i++) {
if (sample[i] >= pct->bases[i].nextevt)
if (samples[i] >= pct->bases[i].nextevt)
return true;
}
return false;

View File

@@ -17,6 +17,8 @@
#include <linux/seqlock.h>
#include <linux/bitops.h>
#include "timekeeping.h"
/**
* struct clock_read_data - data required to read from sched_clock()
*

View File

@@ -42,39 +42,39 @@ static int bc_shutdown(struct clock_event_device *evt)
*/
static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
{
int bc_moved;
/*
* We try to cancel the timer first. If the callback is on
* flight on some other cpu then we let it handle it. If we
* were able to cancel the timer nothing can rearm it as we
* own broadcast_lock.
* This is called either from enter/exit idle code or from the
* broadcast handler. In all cases tick_broadcast_lock is held.
*
* However we can also be called from the event handler of
* ce_broadcast_hrtimer itself when it expires. We cannot
* restart the timer because we are in the callback, but we
* can set the expiry time and let the callback return
* HRTIMER_RESTART.
* hrtimer_cancel() cannot be called here neither from the
* broadcast handler nor from the enter/exit idle code. The idle
* code can run into the problem described in bc_shutdown() and the
* broadcast handler cannot wait for itself to complete for obvious
* reasons.
*
* Since we are in the idle loop at this point and because
* hrtimer_{start/cancel} functions call into tracing,
* calls to these functions must be bound within RCU_NONIDLE.
* Each caller tries to arm the hrtimer on its own CPU, but if the
* hrtimer callbback function is currently running, then
* hrtimer_start() cannot move it and the timer stays on the CPU on
* which it is assigned at the moment.
*
* As this can be called from idle code, the hrtimer_start()
* invocation has to be wrapped with RCU_NONIDLE() as
* hrtimer_start() can call into tracing.
*/
RCU_NONIDLE(
{
bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
if (bc_moved) {
hrtimer_start(&bctimer, expires,
HRTIMER_MODE_ABS_PINNED_HARD);
}
}
);
if (bc_moved) {
/* Bind the "device" to the cpu */
bc->bound_on = smp_processor_id();
} else if (bc->bound_on == smp_processor_id()) {
hrtimer_set_expires(&bctimer, expires);
}
RCU_NONIDLE( {
hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD);
/*
* The core tick broadcast mode expects bc->bound_on to be set
* correctly to prevent a CPU which has the broadcast hrtimer
* armed from going deep idle.
*
* As tick_broadcast_lock is held, nothing can change the cpu
* base which was just established in hrtimer_start() above. So
* the below access is safe even without holding the hrtimer
* base lock.
*/
bc->bound_on = bctimer.base->cpu_base->cpu;
} );
return 0;
}
@@ -100,10 +100,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
{
ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
return HRTIMER_RESTART;
return HRTIMER_NORESTART;
}

View File

@@ -110,8 +110,7 @@ void update_vsyscall(struct timekeeper *tk)
nsec = nsec + tk->wall_to_monotonic.tv_nsec;
vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
if (__arch_use_vsyscall(vdata))
update_vdso_data(vdata, tk);
update_vdso_data(vdata, tk);
__arch_update_vsyscall(vdata, tk);
@@ -124,10 +123,8 @@ void update_vsyscall_tz(void)
{
struct vdso_data *vdata = __arch_get_k_vdso_data();
if (__arch_use_vsyscall(vdata)) {
vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
}
vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
__arch_sync_vdso_data(vdata);
}