Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull NOHZ updates from Ingo Molnar: "The main changes, mostly written by Frederic Weisbecker, include: - Fix some jiffies based cputime assumptions. (No real harm because the concerned code isn't used by full dynticks.) - Simplify jiffies <-> usecs conversions. Remove dead code. - Remove early hacks on nohz full code that avoided messing up idle nohz internals. Now nohz integrates well full and idle and such hack have become needless. - Restart nohz full tick from irq exit. (A simplification and a preparation for future optimization on scheduler kick to nohz full) - Code cleanups. - Tile driver isolation enhancement on top of nohz. (Chris Metcalf)" * 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: nohz: Remove useless argument on tick_nohz_task_switch() nohz: Move tick_nohz_restart_sched_tick() above its users nohz: Restart nohz full tick from irq exit nohz: Remove idle task special case nohz: Prevent tilegx network driver interrupts alpha: Fix jiffies based cputime assumption apm32: Fix cputime == jiffies assumption jiffies: Remove HZ > USEC_PER_SEC special case
This commit is contained in:
@@ -2543,7 +2543,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
|
||||
put_task_struct(prev);
|
||||
}
|
||||
|
||||
tick_nohz_task_switch(current);
|
||||
tick_nohz_task_switch();
|
||||
return rq;
|
||||
}
|
||||
|
||||
|
@@ -197,27 +197,9 @@ static bool can_stop_full_tick(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
|
||||
|
||||
/*
|
||||
* Re-evaluate the need for the tick on the current CPU
|
||||
* and restart it if necessary.
|
||||
*/
|
||||
void __tick_nohz_full_check(void)
|
||||
{
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
|
||||
if (tick_nohz_full_cpu(smp_processor_id())) {
|
||||
if (ts->tick_stopped && !is_idle_task(current)) {
|
||||
if (!can_stop_full_tick())
|
||||
tick_nohz_restart_sched_tick(ts, ktime_get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void nohz_full_kick_work_func(struct irq_work *work)
|
||||
{
|
||||
__tick_nohz_full_check();
|
||||
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
|
||||
@@ -252,7 +234,7 @@ void tick_nohz_full_kick_cpu(int cpu)
|
||||
|
||||
static void nohz_full_kick_ipi(void *info)
|
||||
{
|
||||
__tick_nohz_full_check();
|
||||
/* Empty, the tick restart happens on tick_nohz_irq_exit() */
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -276,7 +258,7 @@ void tick_nohz_full_kick_all(void)
|
||||
* It might need the tick due to per task/process properties:
|
||||
* perf events, posix cpu timers, ...
|
||||
*/
|
||||
void __tick_nohz_task_switch(struct task_struct *tsk)
|
||||
void __tick_nohz_task_switch(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -705,21 +687,38 @@ out:
|
||||
return tick;
|
||||
}
|
||||
|
||||
static void tick_nohz_full_stop_tick(struct tick_sched *ts)
|
||||
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
/* Update jiffies first */
|
||||
tick_do_update_jiffies64(now);
|
||||
update_cpu_load_nohz();
|
||||
|
||||
calc_load_exit_idle();
|
||||
touch_softlockup_watchdog();
|
||||
/*
|
||||
* Cancel the scheduled timer and restore the tick
|
||||
*/
|
||||
ts->tick_stopped = 0;
|
||||
ts->idle_exittime = now;
|
||||
|
||||
tick_nohz_restart(ts, now);
|
||||
}
|
||||
|
||||
static void tick_nohz_full_update_tick(struct tick_sched *ts)
|
||||
{
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
|
||||
if (!tick_nohz_full_cpu(cpu))
|
||||
return;
|
||||
|
||||
if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
|
||||
return;
|
||||
|
||||
if (!can_stop_full_tick())
|
||||
return;
|
||||
|
||||
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
|
||||
if (can_stop_full_tick())
|
||||
tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
|
||||
else if (ts->tick_stopped)
|
||||
tick_nohz_restart_sched_tick(ts, ktime_get());
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -849,7 +848,7 @@ void tick_nohz_irq_exit(void)
|
||||
if (ts->inidle)
|
||||
__tick_nohz_idle_enter(ts);
|
||||
else
|
||||
tick_nohz_full_stop_tick(ts);
|
||||
tick_nohz_full_update_tick(ts);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -864,23 +863,6 @@ ktime_t tick_nohz_get_sleep_length(void)
|
||||
return ts->sleep_length;
|
||||
}
|
||||
|
||||
static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
|
||||
{
|
||||
/* Update jiffies first */
|
||||
tick_do_update_jiffies64(now);
|
||||
update_cpu_load_nohz();
|
||||
|
||||
calc_load_exit_idle();
|
||||
touch_softlockup_watchdog();
|
||||
/*
|
||||
* Cancel the scheduled timer and restore the tick
|
||||
*/
|
||||
ts->tick_stopped = 0;
|
||||
ts->idle_exittime = now;
|
||||
|
||||
tick_nohz_restart(ts, now);
|
||||
}
|
||||
|
||||
static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
|
||||
{
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
|
@@ -268,10 +268,14 @@ EXPORT_SYMBOL(jiffies_to_msecs);
|
||||
|
||||
unsigned int jiffies_to_usecs(const unsigned long j)
|
||||
{
|
||||
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
|
||||
/*
|
||||
* Hz usually doesn't go much further MSEC_PER_SEC.
|
||||
* jiffies_to_usecs() and usecs_to_jiffies() depend on that.
|
||||
*/
|
||||
BUILD_BUG_ON(HZ > USEC_PER_SEC);
|
||||
|
||||
#if !(USEC_PER_SEC % HZ)
|
||||
return (USEC_PER_SEC / HZ) * j;
|
||||
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
|
||||
return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
|
||||
#else
|
||||
# if BITS_PER_LONG == 32
|
||||
return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
|
||||
|
Reference in New Issue
Block a user