Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull NOHZ updates from Ingo Molnar: "NOHZ enhancements, by Frederic Weisbecker, which reorganizes/refactors the NOHZ 'can the tick be stopped?' infrastructure and related code to be data driven, and harmonizes the naming and handling of all the various properties" [ This makes the ugly "fetch_or()" macro that the scheduler used internally a new generic helper, and does a bad job at it. I'm pulling it, but I've asked Ingo and Frederic to get this fixed up ] * 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched-clock: Migrate to use new tick dependency mask model posix-cpu-timers: Migrate to use new tick dependency mask model sched: Migrate sched to use new tick dependency mask model sched: Account rr tasks perf: Migrate perf to use new tick dependency mask model nohz: Use enum code for tick stop failure tracing message nohz: New tick dependency mask nohz: Implement wide kick on top of irq work atomic: Export fetch_or()
This commit is contained in:
@@ -450,6 +450,7 @@ static inline int rt_bandwidth_enabled(void)
|
||||
struct rt_rq {
|
||||
struct rt_prio_array active;
|
||||
unsigned int rt_nr_running;
|
||||
unsigned int rr_nr_running;
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
struct {
|
||||
int curr; /* highest queued rt task prio */
|
||||
@@ -1313,6 +1314,35 @@ unsigned long to_ratio(u64 period, u64 runtime);
|
||||
|
||||
extern void init_entity_runnable_average(struct sched_entity *se);
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
extern bool sched_can_stop_tick(struct rq *rq);
|
||||
|
||||
/*
|
||||
* Tick may be needed by tasks in the runqueue depending on their policy and
|
||||
* requirements. If tick is needed, lets send the target an IPI to kick it out of
|
||||
* nohz mode if necessary.
|
||||
*/
|
||||
static inline void sched_update_tick_dependency(struct rq *rq)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!tick_nohz_full_enabled())
|
||||
return;
|
||||
|
||||
cpu = cpu_of(rq);
|
||||
|
||||
if (!tick_nohz_full_cpu(cpu))
|
||||
return;
|
||||
|
||||
if (sched_can_stop_tick(rq))
|
||||
tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
|
||||
else
|
||||
tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
|
||||
}
|
||||
#else
|
||||
static inline void sched_update_tick_dependency(struct rq *rq) { }
|
||||
#endif
|
||||
|
||||
static inline void add_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
unsigned prev_nr = rq->nr_running;
|
||||
@@ -1324,26 +1354,16 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
|
||||
if (!rq->rd->overload)
|
||||
rq->rd->overload = true;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
if (tick_nohz_full_cpu(rq->cpu)) {
|
||||
/*
|
||||
* Tick is needed if more than one task runs on a CPU.
|
||||
* Send the target an IPI to kick it out of nohz mode.
|
||||
*
|
||||
* We assume that IPI implies full memory barrier and the
|
||||
* new value of rq->nr_running is visible on reception
|
||||
* from the target.
|
||||
*/
|
||||
tick_nohz_full_kick_cpu(rq->cpu);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
sched_update_tick_dependency(rq);
|
||||
}
|
||||
|
||||
static inline void sub_nr_running(struct rq *rq, unsigned count)
|
||||
{
|
||||
rq->nr_running -= count;
|
||||
/* Check if we still need preemption */
|
||||
sched_update_tick_dependency(rq);
|
||||
}
|
||||
|
||||
static inline void rq_last_tick_reset(struct rq *rq)
|
||||
|
Reference in New Issue
Block a user