Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "Main changes: - scheduler side full-dynticks (user-space execution is undisturbed and receives no timer IRQs) preparation changes that convert the cputime accounting code to be full-dynticks ready, from Frederic Weisbecker. - Initial sched.h split-up changes, by Clark Williams - select_idle_sibling() performance improvement by Mike Galbraith: " 1 tbench pair (worst case) in a 10 core + SMT package: pre 15.22 MB/sec 1 procs post 252.01 MB/sec 1 procs " - sched_rr_get_interval() ABI fix/change. We think this detail is not used by apps (so it's not an ABI in practice), but lets keep it under observation. - misc RT scheduling cleanups, optimizations" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) sched/rt: Add <linux/sched/rt.h> header to <linux/init_task.h> cputime: Remove irqsave from seqlock readers sched, powerpc: Fix sched.h split-up build failure cputime: Restore CPU_ACCOUNTING config defaults for PPC64 sched/rt: Move rt specific bits into new header file sched/rt: Add a tuning knob to allow changing SCHED_RR timeslice sched: Move sched.h sysctl bits into separate header sched: Fix signedness bug in yield_to() sched: Fix select_idle_sibling() bouncing cow syndrome sched/rt: Further simplify pick_rt_task() sched/rt: Do not account zero delta_exec in update_curr_rt() cputime: Safely read cputime of full dynticks CPUs kvm: Prepare to add generic guest entry/exit callbacks cputime: Use accessors to read task cputime stats cputime: Allow dynamic switch between tick/virtual based cputime accounting cputime: Generic on-demand virtual cputime accounting cputime: Move default nsecs_to_cputime() to jiffies based cputime file cputime: Librarize per nsecs resolution cputime definitions cputime: Avoid multiplication overflow on utime scaling context_tracking: Export context state for generic vtime ... Fix up conflict in kernel/context_tracking.c due to comment additions.
This commit is contained in:
@@ -4371,7 +4371,7 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
|
||||
struct task_struct *curr = current;
|
||||
struct rq *rq, *p_rq;
|
||||
unsigned long flags;
|
||||
bool yielded = 0;
|
||||
int yielded = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
rq = this_rq();
|
||||
@@ -4667,6 +4667,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||
*/
|
||||
idle->sched_class = &idle_sched_class;
|
||||
ftrace_graph_init_idle_task(idle, cpu);
|
||||
vtime_init_idle(idle);
|
||||
#if defined(CONFIG_SMP)
|
||||
sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
|
||||
#endif
|
||||
@@ -7508,6 +7509,25 @@ static int sched_rt_global_constraints(void)
|
||||
}
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
int sched_rr_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
static DEFINE_MUTEX(mutex);
|
||||
|
||||
mutex_lock(&mutex);
|
||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
/* make sure that internally we keep jiffies */
|
||||
/* also, writing zero resets timeslice to default */
|
||||
if (!ret && write) {
|
||||
sched_rr_timeslice = sched_rr_timeslice <= 0 ?
|
||||
RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
|
||||
}
|
||||
mutex_unlock(&mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sched_rt_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
|
@@ -28,6 +28,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/rt.h>
|
||||
#include "cpupri.h"
|
||||
|
||||
/* Convert between a 140 based task->prio, and our 102 based cpupri */
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#include <linux/tsacct_kern.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/static_key.h>
|
||||
#include <linux/context_tracking.h>
|
||||
#include "sched.h"
|
||||
|
||||
|
||||
@@ -163,7 +164,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
|
||||
task_group_account_field(p, index, (__force u64) cputime);
|
||||
|
||||
/* Account for user time used */
|
||||
acct_update_integrals(p);
|
||||
acct_account_cputime(p);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -213,7 +214,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
|
||||
task_group_account_field(p, index, (__force u64) cputime);
|
||||
|
||||
/* Account for system time used */
|
||||
acct_update_integrals(p);
|
||||
acct_account_cputime(p);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -295,6 +296,7 @@ static __always_inline bool steal_account_process_tick(void)
|
||||
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
{
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
cputime_t utime, stime;
|
||||
struct task_struct *t;
|
||||
|
||||
times->utime = sig->utime;
|
||||
@@ -308,16 +310,15 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||
|
||||
t = tsk;
|
||||
do {
|
||||
times->utime += t->utime;
|
||||
times->stime += t->stime;
|
||||
task_cputime(tsk, &utime, &stime);
|
||||
times->utime += utime;
|
||||
times->stime += stime;
|
||||
times->sum_exec_runtime += task_sched_runtime(t);
|
||||
} while_each_thread(tsk, t);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
/*
|
||||
* Account a tick to a process and cpustat
|
||||
@@ -382,11 +383,12 @@ static void irqtime_account_idle_ticks(int ticks)
|
||||
irqtime_account_process_tick(current, 0, rq);
|
||||
}
|
||||
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
static void irqtime_account_idle_ticks(int ticks) {}
|
||||
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||
static inline void irqtime_account_idle_ticks(int ticks) {}
|
||||
static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||
struct rq *rq) {}
|
||||
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
|
||||
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
/*
|
||||
* Account a single tick of cpu time.
|
||||
* @p: the process that the cpu time gets accounted to
|
||||
@@ -397,6 +399,9 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
||||
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
||||
struct rq *rq = this_rq();
|
||||
|
||||
if (vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
if (sched_clock_irqtime) {
|
||||
irqtime_account_process_tick(p, user_tick, rq);
|
||||
return;
|
||||
@@ -438,8 +443,7 @@ void account_idle_ticks(unsigned long ticks)
|
||||
|
||||
account_idle_time(jiffies_to_cputime(ticks));
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||
|
||||
/*
|
||||
* Use precise platform statistics if available:
|
||||
@@ -461,25 +465,20 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
|
||||
*st = cputime.stime;
|
||||
}
|
||||
|
||||
void vtime_account_system_irqsafe(struct task_struct *tsk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
vtime_account_system(tsk);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe);
|
||||
|
||||
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
|
||||
void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
if (is_idle_task(prev))
|
||||
vtime_account_idle(prev);
|
||||
else
|
||||
vtime_account_system(prev);
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
vtime_account_user(prev);
|
||||
#endif
|
||||
arch_vtime_task_switch(prev);
|
||||
}
|
||||
#endif
|
||||
@@ -493,27 +492,40 @@ void vtime_task_switch(struct task_struct *prev)
|
||||
* vtime_account().
|
||||
*/
|
||||
#ifndef __ARCH_HAS_VTIME_ACCOUNT
|
||||
void vtime_account(struct task_struct *tsk)
|
||||
void vtime_account_irq_enter(struct task_struct *tsk)
|
||||
{
|
||||
if (in_interrupt() || !is_idle_task(tsk))
|
||||
vtime_account_system(tsk);
|
||||
else
|
||||
vtime_account_idle(tsk);
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
if (!in_interrupt()) {
|
||||
/*
|
||||
* If we interrupted user, context_tracking_in_user()
|
||||
* is 1 because the context tracking don't hook
|
||||
* on irq entry/exit. This way we know if
|
||||
* we need to flush user time on kernel entry.
|
||||
*/
|
||||
if (context_tracking_in_user()) {
|
||||
vtime_account_user(tsk);
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_idle_task(tsk)) {
|
||||
vtime_account_idle(tsk);
|
||||
return;
|
||||
}
|
||||
}
|
||||
vtime_account_system(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vtime_account);
|
||||
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
|
||||
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
|
||||
|
||||
#else
|
||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
#ifndef nsecs_to_cputime
|
||||
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
|
||||
#endif
|
||||
|
||||
static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
|
||||
static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total)
|
||||
{
|
||||
u64 temp = (__force u64) rtime;
|
||||
|
||||
temp *= (__force u64) utime;
|
||||
temp *= (__force u64) stime;
|
||||
|
||||
if (sizeof(cputime_t) == 4)
|
||||
temp = div_u64(temp, (__force u32) total);
|
||||
@@ -531,10 +543,10 @@ static void cputime_adjust(struct task_cputime *curr,
|
||||
struct cputime *prev,
|
||||
cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
cputime_t rtime, utime, total;
|
||||
cputime_t rtime, stime, total;
|
||||
|
||||
utime = curr->utime;
|
||||
total = utime + curr->stime;
|
||||
stime = curr->stime;
|
||||
total = stime + curr->utime;
|
||||
|
||||
/*
|
||||
* Tick based cputime accounting depend on random scheduling
|
||||
@@ -549,17 +561,17 @@ static void cputime_adjust(struct task_cputime *curr,
|
||||
rtime = nsecs_to_cputime(curr->sum_exec_runtime);
|
||||
|
||||
if (total)
|
||||
utime = scale_utime(utime, rtime, total);
|
||||
stime = scale_stime(stime, rtime, total);
|
||||
else
|
||||
utime = rtime;
|
||||
stime = rtime;
|
||||
|
||||
/*
|
||||
* If the tick based count grows faster than the scheduler one,
|
||||
* the result of the scaling may go backward.
|
||||
* Let's enforce monotonicity.
|
||||
*/
|
||||
prev->utime = max(prev->utime, utime);
|
||||
prev->stime = max(prev->stime, rtime - prev->utime);
|
||||
prev->stime = max(prev->stime, stime);
|
||||
prev->utime = max(prev->utime, rtime - prev->stime);
|
||||
|
||||
*ut = prev->utime;
|
||||
*st = prev->stime;
|
||||
@@ -568,11 +580,10 @@ static void cputime_adjust(struct task_cputime *curr,
|
||||
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
struct task_cputime cputime = {
|
||||
.utime = p->utime,
|
||||
.stime = p->stime,
|
||||
.sum_exec_runtime = p->se.sum_exec_runtime,
|
||||
};
|
||||
|
||||
task_cputime(p, &cputime.utime, &cputime.stime);
|
||||
cputime_adjust(&cputime, &p->prev_cputime, ut, st);
|
||||
}
|
||||
|
||||
@@ -586,4 +597,221 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
|
||||
thread_group_cputime(p, &cputime);
|
||||
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
|
||||
}
|
||||
#endif
|
||||
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||
static unsigned long long vtime_delta(struct task_struct *tsk)
|
||||
{
|
||||
unsigned long long clock;
|
||||
|
||||
clock = sched_clock();
|
||||
if (clock < tsk->vtime_snap)
|
||||
return 0;
|
||||
|
||||
return clock - tsk->vtime_snap;
|
||||
}
|
||||
|
||||
static cputime_t get_vtime_delta(struct task_struct *tsk)
|
||||
{
|
||||
unsigned long long delta = vtime_delta(tsk);
|
||||
|
||||
WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
|
||||
tsk->vtime_snap += delta;
|
||||
|
||||
/* CHECKME: always safe to convert nsecs to cputime? */
|
||||
return nsecs_to_cputime(delta);
|
||||
}
|
||||
|
||||
static void __vtime_account_system(struct task_struct *tsk)
|
||||
{
|
||||
cputime_t delta_cpu = get_vtime_delta(tsk);
|
||||
|
||||
account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
|
||||
}
|
||||
|
||||
void vtime_account_system(struct task_struct *tsk)
|
||||
{
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
write_seqlock(&tsk->vtime_seqlock);
|
||||
__vtime_account_system(tsk);
|
||||
write_sequnlock(&tsk->vtime_seqlock);
|
||||
}
|
||||
|
||||
void vtime_account_irq_exit(struct task_struct *tsk)
|
||||
{
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
write_seqlock(&tsk->vtime_seqlock);
|
||||
if (context_tracking_in_user())
|
||||
tsk->vtime_snap_whence = VTIME_USER;
|
||||
__vtime_account_system(tsk);
|
||||
write_sequnlock(&tsk->vtime_seqlock);
|
||||
}
|
||||
|
||||
void vtime_account_user(struct task_struct *tsk)
|
||||
{
|
||||
cputime_t delta_cpu;
|
||||
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
delta_cpu = get_vtime_delta(tsk);
|
||||
|
||||
write_seqlock(&tsk->vtime_seqlock);
|
||||
tsk->vtime_snap_whence = VTIME_SYS;
|
||||
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
|
||||
write_sequnlock(&tsk->vtime_seqlock);
|
||||
}
|
||||
|
||||
void vtime_user_enter(struct task_struct *tsk)
|
||||
{
|
||||
if (!vtime_accounting_enabled())
|
||||
return;
|
||||
|
||||
write_seqlock(&tsk->vtime_seqlock);
|
||||
tsk->vtime_snap_whence = VTIME_USER;
|
||||
__vtime_account_system(tsk);
|
||||
write_sequnlock(&tsk->vtime_seqlock);
|
||||
}
|
||||
|
||||
void vtime_guest_enter(struct task_struct *tsk)
|
||||
{
|
||||
write_seqlock(&tsk->vtime_seqlock);
|
||||
__vtime_account_system(tsk);
|
||||
current->flags |= PF_VCPU;
|
||||
write_sequnlock(&tsk->vtime_seqlock);
|
||||
}
|
||||
|
||||
void vtime_guest_exit(struct task_struct *tsk)
|
||||
{
|
||||
write_seqlock(&tsk->vtime_seqlock);
|
||||
__vtime_account_system(tsk);
|
||||
current->flags &= ~PF_VCPU;
|
||||
write_sequnlock(&tsk->vtime_seqlock);
|
||||
}
|
||||
|
||||
void vtime_account_idle(struct task_struct *tsk)
|
||||
{
|
||||
cputime_t delta_cpu = get_vtime_delta(tsk);
|
||||
|
||||
account_idle_time(delta_cpu);
|
||||
}
|
||||
|
||||
bool vtime_accounting_enabled(void)
|
||||
{
|
||||
return context_tracking_active();
|
||||
}
|
||||
|
||||
void arch_vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
write_seqlock(&prev->vtime_seqlock);
|
||||
prev->vtime_snap_whence = VTIME_SLEEPING;
|
||||
write_sequnlock(&prev->vtime_seqlock);
|
||||
|
||||
write_seqlock(¤t->vtime_seqlock);
|
||||
current->vtime_snap_whence = VTIME_SYS;
|
||||
current->vtime_snap = sched_clock();
|
||||
write_sequnlock(¤t->vtime_seqlock);
|
||||
}
|
||||
|
||||
void vtime_init_idle(struct task_struct *t)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
write_seqlock_irqsave(&t->vtime_seqlock, flags);
|
||||
t->vtime_snap_whence = VTIME_SYS;
|
||||
t->vtime_snap = sched_clock();
|
||||
write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
|
||||
}
|
||||
|
||||
cputime_t task_gtime(struct task_struct *t)
|
||||
{
|
||||
unsigned int seq;
|
||||
cputime_t gtime;
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&t->vtime_seqlock);
|
||||
|
||||
gtime = t->gtime;
|
||||
if (t->flags & PF_VCPU)
|
||||
gtime += vtime_delta(t);
|
||||
|
||||
} while (read_seqretry(&t->vtime_seqlock, seq));
|
||||
|
||||
return gtime;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch cputime raw values from fields of task_struct and
|
||||
* add up the pending nohz execution time since the last
|
||||
* cputime snapshot.
|
||||
*/
|
||||
static void
|
||||
fetch_task_cputime(struct task_struct *t,
|
||||
cputime_t *u_dst, cputime_t *s_dst,
|
||||
cputime_t *u_src, cputime_t *s_src,
|
||||
cputime_t *udelta, cputime_t *sdelta)
|
||||
{
|
||||
unsigned int seq;
|
||||
unsigned long long delta;
|
||||
|
||||
do {
|
||||
*udelta = 0;
|
||||
*sdelta = 0;
|
||||
|
||||
seq = read_seqbegin(&t->vtime_seqlock);
|
||||
|
||||
if (u_dst)
|
||||
*u_dst = *u_src;
|
||||
if (s_dst)
|
||||
*s_dst = *s_src;
|
||||
|
||||
/* Task is sleeping, nothing to add */
|
||||
if (t->vtime_snap_whence == VTIME_SLEEPING ||
|
||||
is_idle_task(t))
|
||||
continue;
|
||||
|
||||
delta = vtime_delta(t);
|
||||
|
||||
/*
|
||||
* Task runs either in user or kernel space, add pending nohz time to
|
||||
* the right place.
|
||||
*/
|
||||
if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
|
||||
*udelta = delta;
|
||||
} else {
|
||||
if (t->vtime_snap_whence == VTIME_SYS)
|
||||
*sdelta = delta;
|
||||
}
|
||||
} while (read_seqretry(&t->vtime_seqlock, seq));
|
||||
}
|
||||
|
||||
|
||||
void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
|
||||
{
|
||||
cputime_t udelta, sdelta;
|
||||
|
||||
fetch_task_cputime(t, utime, stime, &t->utime,
|
||||
&t->stime, &udelta, &sdelta);
|
||||
if (utime)
|
||||
*utime += udelta;
|
||||
if (stime)
|
||||
*stime += sdelta;
|
||||
}
|
||||
|
||||
void task_cputime_scaled(struct task_struct *t,
|
||||
cputime_t *utimescaled, cputime_t *stimescaled)
|
||||
{
|
||||
cputime_t udelta, sdelta;
|
||||
|
||||
fetch_task_cputime(t, utimescaled, stimescaled,
|
||||
&t->utimescaled, &t->stimescaled, &udelta, &sdelta);
|
||||
if (utimescaled)
|
||||
*utimescaled += cputime_to_scaled(udelta);
|
||||
if (stimescaled)
|
||||
*stimescaled += cputime_to_scaled(sdelta);
|
||||
}
|
||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
||||
|
@@ -1680,9 +1680,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
||||
}
|
||||
|
||||
/* ensure we never gain time by being placed backwards. */
|
||||
vruntime = max_vruntime(se->vruntime, vruntime);
|
||||
|
||||
se->vruntime = vruntime;
|
||||
se->vruntime = max_vruntime(se->vruntime, vruntime);
|
||||
}
|
||||
|
||||
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
|
||||
@@ -3254,25 +3252,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
||||
*/
|
||||
static int select_idle_sibling(struct task_struct *p, int target)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int prev_cpu = task_cpu(p);
|
||||
struct sched_domain *sd;
|
||||
struct sched_group *sg;
|
||||
int i;
|
||||
int i = task_cpu(p);
|
||||
|
||||
if (idle_cpu(target))
|
||||
return target;
|
||||
|
||||
/*
|
||||
* If the task is going to be woken-up on this cpu and if it is
|
||||
* already idle, then it is the right target.
|
||||
* If the prevous cpu is cache affine and idle, don't be stupid.
|
||||
*/
|
||||
if (target == cpu && idle_cpu(cpu))
|
||||
return cpu;
|
||||
|
||||
/*
|
||||
* If the task is going to be woken-up on the cpu where it previously
|
||||
* ran and if it is currently idle, then it the right target.
|
||||
*/
|
||||
if (target == prev_cpu && idle_cpu(prev_cpu))
|
||||
return prev_cpu;
|
||||
if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
|
||||
return i;
|
||||
|
||||
/*
|
||||
* Otherwise, iterate the domains and find an elegible idle cpu.
|
||||
@@ -3286,7 +3277,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
|
||||
goto next;
|
||||
|
||||
for_each_cpu(i, sched_group_cpus(sg)) {
|
||||
if (!idle_cpu(i))
|
||||
if (i == target || !idle_cpu(i))
|
||||
goto next;
|
||||
}
|
||||
|
||||
@@ -6101,7 +6092,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
|
||||
* idle runqueue:
|
||||
*/
|
||||
if (rq->cfs.load.weight)
|
||||
rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
|
||||
rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
|
||||
|
||||
return rr_interval;
|
||||
}
|
||||
|
@@ -7,6 +7,8 @@
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
int sched_rr_timeslice = RR_TIMESLICE;
|
||||
|
||||
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
|
||||
|
||||
struct rt_bandwidth def_rt_bandwidth;
|
||||
@@ -925,8 +927,8 @@ static void update_curr_rt(struct rq *rq)
|
||||
return;
|
||||
|
||||
delta_exec = rq->clock_task - curr->se.exec_start;
|
||||
if (unlikely((s64)delta_exec < 0))
|
||||
delta_exec = 0;
|
||||
if (unlikely((s64)delta_exec <= 0))
|
||||
return;
|
||||
|
||||
schedstat_set(curr->se.statistics.exec_max,
|
||||
max(curr->se.statistics.exec_max, delta_exec));
|
||||
@@ -1427,8 +1429,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
||||
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
(cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
|
||||
(p->nr_cpus_allowed > 1))
|
||||
cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@@ -1889,8 +1890,11 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
|
||||
* we may need to handle the pulling of RT tasks
|
||||
* now.
|
||||
*/
|
||||
if (p->on_rq && !rq->rt.rt_nr_running)
|
||||
pull_rt_task(rq);
|
||||
if (!p->on_rq || rq->rt.rt_nr_running)
|
||||
return;
|
||||
|
||||
if (pull_rt_task(rq))
|
||||
resched_task(rq->curr);
|
||||
}
|
||||
|
||||
void init_sched_rt_class(void)
|
||||
@@ -1985,7 +1989,11 @@ static void watchdog(struct rq *rq, struct task_struct *p)
|
||||
if (soft != RLIM_INFINITY) {
|
||||
unsigned long next;
|
||||
|
||||
p->rt.timeout++;
|
||||
if (p->rt.watchdog_stamp != jiffies) {
|
||||
p->rt.timeout++;
|
||||
p->rt.watchdog_stamp = jiffies;
|
||||
}
|
||||
|
||||
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
|
||||
if (p->rt.timeout > next)
|
||||
p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
|
||||
@@ -2010,7 +2018,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
|
||||
if (--p->rt.time_slice)
|
||||
return;
|
||||
|
||||
p->rt.time_slice = RR_TIMESLICE;
|
||||
p->rt.time_slice = sched_rr_timeslice;
|
||||
|
||||
/*
|
||||
* Requeue to the end of queue if we (and all of our ancestors) are the
|
||||
@@ -2041,7 +2049,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
|
||||
* Time slice is 0 for SCHED_FIFO tasks
|
||||
*/
|
||||
if (task->policy == SCHED_RR)
|
||||
return RR_TIMESLICE;
|
||||
return sched_rr_timeslice;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1,5 +1,7 @@
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/sched/rt.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stop_machine.h>
|
||||
|
Reference in New Issue
Block a user