Merge branch 'sched-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (46 commits) sched: Add comments to find_busiest_group() function sched: Refactor the power savings balance code sched: Optimize the !power_savings_balance during fbg() sched: Create a helper function to calculate imbalance sched: Create helper to calculate small_imbalance in fbg() sched: Create a helper function to calculate sched_domain stats for fbg() sched: Define structure to store the sched_domain statistics for fbg() sched: Create a helper function to calculate sched_group stats for fbg() sched: Define structure to store the sched_group statistics for fbg() sched: Fix indentations in find_busiest_group() using gotos sched: Simple helper functions for find_busiest_group() sched: remove unused fields from struct rq sched: jiffies not printed per CPU sched: small optimisation of can_migrate_task() sched: fix typos in documentation sched: add avg_overlap decay x86, sched_clock(): mark variables read-mostly sched: optimize ttwu vs group scheduling sched: TIF_NEED_RESCHED -> need_reshed() cleanup sched: don't rebalance if attached on NULL domain ...
This commit is contained in:
@@ -9,6 +9,44 @@
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
|
||||
* used by the "latencytop" userspace tool. The latency that is tracked is not
|
||||
* the 'traditional' interrupt latency (which is primarily caused by something
|
||||
* else consuming CPU), but instead, it is the latency an application encounters
|
||||
* because the kernel sleeps on its behalf for various reasons.
|
||||
*
|
||||
* This code tracks 2 levels of statistics:
|
||||
* 1) System level latency
|
||||
* 2) Per process latency
|
||||
*
|
||||
* The latency is stored in fixed sized data structures in an accumulated form;
|
||||
* if the "same" latency cause is hit twice, this will be tracked as one entry
|
||||
* in the data structure. Both the count, total accumulated latency and maximum
|
||||
* latency are tracked in this data structure. When the fixed size structure is
|
||||
* full, no new causes are tracked until the buffer is flushed by writing to
|
||||
* the /proc file; the userspace tool does this on a regular basis.
|
||||
*
|
||||
* A latency cause is identified by a stringified backtrace at the point that
|
||||
* the scheduler gets invoked. The userland tool will use this string to
|
||||
* identify the cause of the latency in human readable form.
|
||||
*
|
||||
* The information is exported via /proc/latency_stats and /proc/<pid>/latency.
|
||||
* These files look like this:
|
||||
*
|
||||
* Latency Top version : v0.1
|
||||
* 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
|
||||
* | | | |
|
||||
* | | | +----> the stringified backtrace
|
||||
* | | +---------> The maximum latency for this entry in microseconds
|
||||
* | +--------------> The accumulated latency for this entry (microseconds)
|
||||
* +-------------------> The number of times this entry is hit
|
||||
*
|
||||
* (note: the average latency is the accumulated latency divided by the number
|
||||
* of times)
|
||||
*/
|
||||
|
||||
#include <linux/latencytop.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/seq_file.h>
|
||||
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
|
||||
firstnonnull = i;
|
||||
continue;
|
||||
}
|
||||
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
|
||||
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
||||
unsigned long record = lat->backtrace[q];
|
||||
|
||||
if (latency_record[i].backtrace[q] != record) {
|
||||
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
|
||||
memcpy(&latency_record[i], lat, sizeof(struct latency_record));
|
||||
}
|
||||
|
||||
static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat)
|
||||
/*
|
||||
* Iterator to store a backtrace into a latency record entry
|
||||
*/
|
||||
static inline void store_stacktrace(struct task_struct *tsk,
|
||||
struct latency_record *lat)
|
||||
{
|
||||
struct stack_trace trace;
|
||||
|
||||
memset(&trace, 0, sizeof(trace));
|
||||
trace.max_entries = LT_BACKTRACEDEPTH;
|
||||
trace.entries = &lat->backtrace[0];
|
||||
trace.skip = 0;
|
||||
save_stack_trace_tsk(tsk, &trace);
|
||||
}
|
||||
|
||||
/**
|
||||
* __account_scheduler_latency - record an occured latency
|
||||
* @tsk - the task struct of the task hitting the latency
|
||||
* @usecs - the duration of the latency in microseconds
|
||||
* @inter - 1 if the sleep was interruptible, 0 if uninterruptible
|
||||
*
|
||||
* This function is the main entry point for recording latency entries
|
||||
* as called by the scheduler.
|
||||
*
|
||||
* This function has a few special cases to deal with normal 'non-latency'
|
||||
* sleeps: specifically, interruptible sleep longer than 5 msec is skipped
|
||||
* since this usually is caused by waiting for events via select() and co.
|
||||
*
|
||||
* Negative latencies (caused by time going backwards) are also explicitly
|
||||
* skipped.
|
||||
*/
|
||||
void __sched
|
||||
account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
||||
__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i, q;
|
||||
struct latency_record lat;
|
||||
|
||||
if (!latencytop_enabled)
|
||||
return;
|
||||
|
||||
/* Long interruptible waits are generally user requested... */
|
||||
if (inter && usecs > 5000)
|
||||
return;
|
||||
|
||||
/* Negative sleeps are time going backwards */
|
||||
/* Zero-time sleeps are non-interesting */
|
||||
if (usecs <= 0)
|
||||
return;
|
||||
|
||||
memset(&lat, 0, sizeof(lat));
|
||||
lat.count = 1;
|
||||
lat.time = usecs;
|
||||
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
||||
if (tsk->latency_record_count >= LT_SAVECOUNT)
|
||||
goto out_unlock;
|
||||
|
||||
for (i = 0; i < LT_SAVECOUNT ; i++) {
|
||||
for (i = 0; i < LT_SAVECOUNT; i++) {
|
||||
struct latency_record *mylat;
|
||||
int same = 1;
|
||||
|
||||
mylat = &tsk->latency_record[i];
|
||||
for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) {
|
||||
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
||||
unsigned long record = lat.backtrace[q];
|
||||
|
||||
if (mylat->backtrace[q] != record) {
|
||||
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v)
|
||||
for (i = 0; i < MAXLR; i++) {
|
||||
if (latency_record[i].backtrace[0]) {
|
||||
int q;
|
||||
seq_printf(m, "%i %li %li ",
|
||||
seq_printf(m, "%i %lu %lu ",
|
||||
latency_record[i].count,
|
||||
latency_record[i].time,
|
||||
latency_record[i].max);
|
||||
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp)
|
||||
return single_open(filp, lstats_show, NULL);
|
||||
}
|
||||
|
||||
static struct file_operations lstats_fops = {
|
||||
static const struct file_operations lstats_fops = {
|
||||
.open = lstats_open,
|
||||
.read = seq_read,
|
||||
.write = lstats_write,
|
||||
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void)
|
||||
proc_create("latency_stats", 0644, NULL, &lstats_fops);
|
||||
return 0;
|
||||
}
|
||||
__initcall(init_lstats_procfs);
|
||||
device_initcall(init_lstats_procfs);
|
||||
|
1060
kernel/sched.c
1060
kernel/sched.c
Filskillnaden har hållits tillbaka eftersom den är för stor
Load Diff
@@ -24,11 +24,11 @@
|
||||
* The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
|
||||
* consistent between cpus (never more than 2 jiffies difference).
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
@@ -43,6 +43,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
__read_mostly int sched_clock_stable;
|
||||
|
||||
struct sched_clock_data {
|
||||
/*
|
||||
@@ -87,7 +88,7 @@ void sched_clock_init(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* min,max except they take wrapping into account
|
||||
* min, max except they take wrapping into account
|
||||
*/
|
||||
|
||||
static inline u64 wrap_min(u64 x, u64 y)
|
||||
@@ -111,15 +112,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
s64 delta = now - scd->tick_raw;
|
||||
u64 clock, min_clock, max_clock;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
if (unlikely(delta < 0))
|
||||
delta = 0;
|
||||
|
||||
/*
|
||||
* scd->clock = clamp(scd->tick_gtod + delta,
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
* scd->tick_gtod + TICK_NSEC);
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
* scd->tick_gtod + TICK_NSEC);
|
||||
*/
|
||||
|
||||
clock = scd->tick_gtod + delta;
|
||||
@@ -148,12 +147,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
|
||||
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
u64 now, clock, this_clock, remote_clock;
|
||||
struct sched_clock_data *scd;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0ull;
|
||||
if (sched_clock_stable)
|
||||
return sched_clock();
|
||||
|
||||
scd = cpu_sdc(cpu);
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
now = sched_clock();
|
||||
|
||||
@@ -195,14 +195,18 @@ u64 sched_clock_cpu(int cpu)
|
||||
|
||||
void sched_clock_tick(void)
|
||||
{
|
||||
struct sched_clock_data *scd = this_scd();
|
||||
struct sched_clock_data *scd;
|
||||
u64 now, now_gtod;
|
||||
|
||||
if (sched_clock_stable)
|
||||
return;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
scd = this_scd();
|
||||
now_gtod = ktime_to_ns(ktime_get());
|
||||
now = sched_clock();
|
||||
|
||||
@@ -250,7 +254,7 @@ u64 sched_clock_cpu(int cpu)
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
|
||||
unsigned long long cpu_clock(int cpu)
|
||||
{
|
||||
|
@@ -272,7 +272,6 @@ static void print_cpu(struct seq_file *m, int cpu)
|
||||
P(nr_switches);
|
||||
P(nr_load_updates);
|
||||
P(nr_uninterruptible);
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
|
||||
PN(next_balance);
|
||||
P(curr->pid);
|
||||
PN(clock);
|
||||
@@ -287,9 +286,6 @@ static void print_cpu(struct seq_file *m, int cpu)
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
|
||||
|
||||
P(yld_exp_empty);
|
||||
P(yld_act_empty);
|
||||
P(yld_both_empty);
|
||||
P(yld_count);
|
||||
|
||||
P(sched_switch);
|
||||
@@ -314,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
|
||||
u64 now = ktime_to_ns(ktime_get());
|
||||
int cpu;
|
||||
|
||||
SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n",
|
||||
SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
|
||||
init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
@@ -325,6 +321,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
|
||||
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
|
||||
#define PN(x) \
|
||||
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
|
||||
P(jiffies);
|
||||
PN(sysctl_sched_latency);
|
||||
PN(sysctl_sched_min_granularity);
|
||||
PN(sysctl_sched_wakeup_granularity);
|
||||
@@ -397,6 +394,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
||||
PN(se.vruntime);
|
||||
PN(se.sum_exec_runtime);
|
||||
PN(se.avg_overlap);
|
||||
PN(se.avg_wakeup);
|
||||
|
||||
nr_switches = p->nvcsw + p->nivcsw;
|
||||
|
||||
|
@@ -1314,16 +1314,63 @@ out:
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static unsigned long wakeup_gran(struct sched_entity *se)
|
||||
/*
|
||||
* Adaptive granularity
|
||||
*
|
||||
* se->avg_wakeup gives the average time a task runs until it does a wakeup,
|
||||
* with the limit of wakeup_gran -- when it never does a wakeup.
|
||||
*
|
||||
* So the smaller avg_wakeup is the faster we want this task to preempt,
|
||||
* but we don't want to treat the preemptee unfairly and therefore allow it
|
||||
* to run for at least the amount of time we'd like to run.
|
||||
*
|
||||
* NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
|
||||
*
|
||||
* NOTE: we use *nr_running to scale with load, this nicely matches the
|
||||
* degrading latency on load.
|
||||
*/
|
||||
static unsigned long
|
||||
adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
|
||||
{
|
||||
u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
|
||||
u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
|
||||
u64 gran = 0;
|
||||
|
||||
if (this_run < expected_wakeup)
|
||||
gran = expected_wakeup - this_run;
|
||||
|
||||
return min_t(s64, gran, sysctl_sched_wakeup_granularity);
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
|
||||
{
|
||||
unsigned long gran = sysctl_sched_wakeup_granularity;
|
||||
|
||||
if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
|
||||
gran = adaptive_gran(curr, se);
|
||||
|
||||
/*
|
||||
* More easily preempt - nice tasks, while not making it harder for
|
||||
* + nice tasks.
|
||||
* Since its curr running now, convert the gran from real-time
|
||||
* to virtual-time in his units.
|
||||
*/
|
||||
if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD)
|
||||
gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
|
||||
if (sched_feat(ASYM_GRAN)) {
|
||||
/*
|
||||
* By using 'se' instead of 'curr' we penalize light tasks, so
|
||||
* they get preempted easier. That is, if 'se' < 'curr' then
|
||||
* the resulting gran will be larger, therefore penalizing the
|
||||
* lighter, if otoh 'se' > 'curr' then the resulting gran will
|
||||
* be smaller, again penalizing the lighter task.
|
||||
*
|
||||
* This is especially important for buddies when the leftmost
|
||||
* task is higher priority than the buddy.
|
||||
*/
|
||||
if (unlikely(se->load.weight != NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, se);
|
||||
} else {
|
||||
if (unlikely(curr->load.weight != NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, curr);
|
||||
}
|
||||
|
||||
return gran;
|
||||
}
|
||||
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
|
||||
if (vdiff <= 0)
|
||||
return -1;
|
||||
|
||||
gran = wakeup_gran(curr);
|
||||
gran = wakeup_gran(curr, se);
|
||||
if (vdiff > gran)
|
||||
return 1;
|
||||
|
||||
|
@@ -1,5 +1,6 @@
|
||||
SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
|
||||
SCHED_FEAT(NORMALIZED_SLEEPER, 1)
|
||||
SCHED_FEAT(NORMALIZED_SLEEPER, 0)
|
||||
SCHED_FEAT(ADAPTIVE_GRAN, 1)
|
||||
SCHED_FEAT(WAKEUP_PREEMPT, 1)
|
||||
SCHED_FEAT(START_DEBIT, 1)
|
||||
SCHED_FEAT(AFFINE_WAKEUPS, 1)
|
||||
|
@@ -3,6 +3,40 @@
|
||||
* policies)
|
||||
*/
|
||||
|
||||
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
return container_of(rt_se, struct task_struct, rt);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
|
||||
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rq;
|
||||
}
|
||||
|
||||
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
return rt_se->rt_rq;
|
||||
}
|
||||
|
||||
#else /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
||||
{
|
||||
return container_of(rt_rq, struct rq, rt);
|
||||
}
|
||||
|
||||
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct task_struct *p = rt_task_of(rt_se);
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
return &rq->rt;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static inline int rt_overloaded(struct rq *rq)
|
||||
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
|
||||
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
|
||||
}
|
||||
|
||||
static void update_rt_migration(struct rq *rq)
|
||||
static void update_rt_migration(struct rt_rq *rt_rq)
|
||||
{
|
||||
if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
|
||||
if (!rq->rt.overloaded) {
|
||||
rt_set_overload(rq);
|
||||
rq->rt.overloaded = 1;
|
||||
if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
|
||||
if (!rt_rq->overloaded) {
|
||||
rt_set_overload(rq_of_rt_rq(rt_rq));
|
||||
rt_rq->overloaded = 1;
|
||||
}
|
||||
} else if (rq->rt.overloaded) {
|
||||
rt_clear_overload(rq);
|
||||
rq->rt.overloaded = 0;
|
||||
} else if (rt_rq->overloaded) {
|
||||
rt_clear_overload(rq_of_rt_rq(rt_rq));
|
||||
rt_rq->overloaded = 0;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
|
||||
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
return container_of(rt_se, struct task_struct, rt);
|
||||
if (rt_se->nr_cpus_allowed > 1)
|
||||
rt_rq->rt_nr_migratory++;
|
||||
|
||||
update_rt_migration(rt_rq);
|
||||
}
|
||||
|
||||
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
if (rt_se->nr_cpus_allowed > 1)
|
||||
rt_rq->rt_nr_migratory--;
|
||||
|
||||
update_rt_migration(rt_rq);
|
||||
}
|
||||
|
||||
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
||||
plist_node_init(&p->pushable_tasks, p->prio);
|
||||
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
||||
}
|
||||
|
||||
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
return !list_empty(&rt_se->run_list);
|
||||
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
||||
#define for_each_leaf_rt_rq(rt_rq, rq) \
|
||||
list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
|
||||
|
||||
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
||||
{
|
||||
return rt_rq->rq;
|
||||
}
|
||||
|
||||
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
return rt_se->rt_rq;
|
||||
}
|
||||
|
||||
#define for_each_sched_rt_entity(rt_se) \
|
||||
for (; rt_se; rt_se = rt_se->parent)
|
||||
|
||||
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
||||
if (rt_rq->rt_nr_running) {
|
||||
if (rt_se && !on_rt_rq(rt_se))
|
||||
enqueue_rt_entity(rt_se);
|
||||
if (rt_rq->highest_prio < curr->prio)
|
||||
if (rt_rq->highest_prio.curr < curr->prio)
|
||||
resched_task(curr);
|
||||
}
|
||||
}
|
||||
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
|
||||
#define for_each_leaf_rt_rq(rt_rq, rq) \
|
||||
for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
|
||||
|
||||
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
|
||||
{
|
||||
return container_of(rt_rq, struct rq, rt);
|
||||
}
|
||||
|
||||
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
|
||||
{
|
||||
struct task_struct *p = rt_task_of(rt_se);
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
return &rq->rt;
|
||||
}
|
||||
|
||||
#define for_each_sched_rt_entity(rt_se) \
|
||||
for (; rt_se; rt_se = NULL)
|
||||
|
||||
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
|
||||
struct rt_rq *rt_rq = group_rt_rq(rt_se);
|
||||
|
||||
if (rt_rq)
|
||||
return rt_rq->highest_prio;
|
||||
return rt_rq->highest_prio.curr;
|
||||
#endif
|
||||
|
||||
return rt_task_of(rt_se)->prio;
|
||||
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
#if defined CONFIG_SMP
|
||||
|
||||
static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
|
||||
|
||||
static inline int next_prio(struct rq *rq)
|
||||
{
|
||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||
rt_rq->rt_nr_running++;
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
|
||||
#ifdef CONFIG_SMP
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
#endif
|
||||
struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
|
||||
|
||||
if (next && rt_prio(next->prio))
|
||||
return next->prio;
|
||||
else
|
||||
return MAX_RT_PRIO;
|
||||
}
|
||||
|
||||
static void
|
||||
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
||||
{
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
if (prio < prev_prio) {
|
||||
|
||||
/*
|
||||
* If the new task is higher in priority than anything on the
|
||||
* run-queue, we know that the previous high becomes our
|
||||
* next-highest.
|
||||
*/
|
||||
rt_rq->highest_prio.next = prev_prio;
|
||||
|
||||
rt_rq->highest_prio = rt_se_prio(rt_se);
|
||||
#ifdef CONFIG_SMP
|
||||
if (rq->online)
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu,
|
||||
rt_se_prio(rt_se));
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
if (rt_se->nr_cpus_allowed > 1) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
|
||||
|
||||
rq->rt.rt_nr_migratory++;
|
||||
}
|
||||
} else if (prio == rt_rq->highest_prio.curr)
|
||||
/*
|
||||
* If the next task is equal in priority to the highest on
|
||||
* the run-queue, then we implicitly know that the next highest
|
||||
* task cannot be any lower than current
|
||||
*/
|
||||
rt_rq->highest_prio.next = prio;
|
||||
else if (prio < rt_rq->highest_prio.next)
|
||||
/*
|
||||
* Otherwise, we need to recompute next-highest
|
||||
*/
|
||||
rt_rq->highest_prio.next = next_prio(rq);
|
||||
}
|
||||
|
||||
static void
|
||||
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
||||
{
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
|
||||
rt_rq->highest_prio.next = next_prio(rq);
|
||||
|
||||
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline
|
||||
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
|
||||
static inline
|
||||
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
static void
|
||||
inc_rt_prio(struct rt_rq *rt_rq, int prio)
|
||||
{
|
||||
int prev_prio = rt_rq->highest_prio.curr;
|
||||
|
||||
if (prio < prev_prio)
|
||||
rt_rq->highest_prio.curr = prio;
|
||||
|
||||
inc_rt_prio_smp(rt_rq, prio, prev_prio);
|
||||
}
|
||||
|
||||
static void
|
||||
dec_rt_prio(struct rt_rq *rt_rq, int prio)
|
||||
{
|
||||
int prev_prio = rt_rq->highest_prio.curr;
|
||||
|
||||
if (rt_rq->rt_nr_running) {
|
||||
|
||||
WARN_ON(prio < prev_prio);
|
||||
|
||||
/*
|
||||
* This may have been our highest task, and therefore
|
||||
* we may have some recomputation to do
|
||||
*/
|
||||
if (prio == prev_prio) {
|
||||
struct rt_prio_array *array = &rt_rq->active;
|
||||
|
||||
rt_rq->highest_prio.curr =
|
||||
sched_find_first_bit(array->bitmap);
|
||||
}
|
||||
|
||||
} else
|
||||
rt_rq->highest_prio.curr = MAX_RT_PRIO;
|
||||
|
||||
dec_rt_prio_smp(rt_rq, prio, prev_prio);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
|
||||
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
|
||||
|
||||
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
update_rt_migration(rq_of_rt_rq(rt_rq));
|
||||
#endif
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
|
||||
static void
|
||||
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
if (rt_se_boosted(rt_se))
|
||||
rt_rq->rt_nr_boosted++;
|
||||
|
||||
if (rt_rq->tg)
|
||||
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
|
||||
#else
|
||||
}
|
||||
|
||||
static void
|
||||
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
if (rt_se_boosted(rt_se))
|
||||
rt_rq->rt_nr_boosted--;
|
||||
|
||||
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
|
||||
}
|
||||
|
||||
#else /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
static void
|
||||
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
start_rt_bandwidth(&def_rt_bandwidth);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline
|
||||
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
|
||||
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
static inline
|
||||
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
int prio = rt_se_prio(rt_se);
|
||||
|
||||
WARN_ON(!rt_prio(prio));
|
||||
rt_rq->rt_nr_running++;
|
||||
|
||||
inc_rt_prio(rt_rq, prio);
|
||||
inc_rt_migration(rt_se, rt_rq);
|
||||
inc_rt_group(rt_se, rt_rq);
|
||||
}
|
||||
|
||||
static inline
|
||||
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int highest_prio = rt_rq->highest_prio;
|
||||
#endif
|
||||
|
||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||
WARN_ON(!rt_rq->rt_nr_running);
|
||||
rt_rq->rt_nr_running--;
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
if (rt_rq->rt_nr_running) {
|
||||
struct rt_prio_array *array;
|
||||
|
||||
WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
|
||||
if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
|
||||
/* recalculate */
|
||||
array = &rt_rq->active;
|
||||
rt_rq->highest_prio =
|
||||
sched_find_first_bit(array->bitmap);
|
||||
} /* otherwise leave rq->highest prio alone */
|
||||
} else
|
||||
rt_rq->highest_prio = MAX_RT_PRIO;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
if (rt_se->nr_cpus_allowed > 1) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
rq->rt.rt_nr_migratory--;
|
||||
}
|
||||
|
||||
if (rt_rq->highest_prio != highest_prio) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
if (rq->online)
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu,
|
||||
rt_rq->highest_prio);
|
||||
}
|
||||
|
||||
update_rt_migration(rq_of_rt_rq(rt_rq));
|
||||
#endif /* CONFIG_SMP */
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_boosted(rt_se))
|
||||
rt_rq->rt_nr_boosted--;
|
||||
|
||||
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
|
||||
#endif
|
||||
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
|
||||
dec_rt_migration(rt_se, rt_rq);
|
||||
dec_rt_group(rt_se, rt_rq);
|
||||
}
|
||||
|
||||
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
|
||||
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
|
||||
enqueue_rt_entity(rt_se);
|
||||
|
||||
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
|
||||
inc_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
|
||||
update_curr_rt(rq);
|
||||
dequeue_rt_entity(rt_se);
|
||||
|
||||
dequeue_pushable_task(rq, p);
|
||||
|
||||
dec_cpu_load(rq, p->se.load.weight);
|
||||
}
|
||||
|
||||
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
|
||||
return next;
|
||||
}
|
||||
|
||||
static struct task_struct *pick_next_task_rt(struct rq *rq)
|
||||
static struct task_struct *_pick_next_task_rt(struct rq *rq)
|
||||
{
|
||||
struct sched_rt_entity *rt_se;
|
||||
struct task_struct *p;
|
||||
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
|
||||
|
||||
p = rt_task_of(rt_se);
|
||||
p->se.exec_start = rq->clock;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static struct task_struct *pick_next_task_rt(struct rq *rq)
|
||||
{
|
||||
struct task_struct *p = _pick_next_task_rt(rq);
|
||||
|
||||
/* The running task is never eligible for pushing */
|
||||
if (p)
|
||||
dequeue_pushable_task(rq, p);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
update_curr_rt(rq);
|
||||
p->se.exec_start = 0;
|
||||
|
||||
/*
|
||||
* The previous task needs to be made eligible for pushing
|
||||
* if it is still active
|
||||
*/
|
||||
if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -1072,7 +1234,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
}
|
||||
|
||||
/* If this rq is still suitable use it. */
|
||||
if (lowest_rq->rt.highest_prio > task->prio)
|
||||
if (lowest_rq->rt.highest_prio.curr > task->prio)
|
||||
break;
|
||||
|
||||
/* try again */
|
||||
@@ -1083,6 +1245,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
return lowest_rq;
|
||||
}
|
||||
|
||||
static inline int has_pushable_tasks(struct rq *rq)
|
||||
{
|
||||
return !plist_head_empty(&rq->rt.pushable_tasks);
|
||||
}
|
||||
|
||||
static struct task_struct *pick_next_pushable_task(struct rq *rq)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
if (!has_pushable_tasks(rq))
|
||||
return NULL;
|
||||
|
||||
p = plist_first_entry(&rq->rt.pushable_tasks,
|
||||
struct task_struct, pushable_tasks);
|
||||
|
||||
BUG_ON(rq->cpu != task_cpu(p));
|
||||
BUG_ON(task_current(rq, p));
|
||||
BUG_ON(p->rt.nr_cpus_allowed <= 1);
|
||||
|
||||
BUG_ON(!p->se.on_rq);
|
||||
BUG_ON(!rt_task(p));
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current CPU has more than one RT task, see if the non
|
||||
* running task can migrate over to a CPU that is running a task
|
||||
@@ -1092,13 +1279,11 @@ static int push_rt_task(struct rq *rq)
|
||||
{
|
||||
struct task_struct *next_task;
|
||||
struct rq *lowest_rq;
|
||||
int ret = 0;
|
||||
int paranoid = RT_MAX_TRIES;
|
||||
|
||||
if (!rq->rt.overloaded)
|
||||
return 0;
|
||||
|
||||
next_task = pick_next_highest_task_rt(rq, -1);
|
||||
next_task = pick_next_pushable_task(rq);
|
||||
if (!next_task)
|
||||
return 0;
|
||||
|
||||
@@ -1127,16 +1312,34 @@ static int push_rt_task(struct rq *rq)
|
||||
struct task_struct *task;
|
||||
/*
|
||||
* find lock_lowest_rq releases rq->lock
|
||||
* so it is possible that next_task has changed.
|
||||
* If it has, then try again.
|
||||
* so it is possible that next_task has migrated.
|
||||
*
|
||||
* We need to make sure that the task is still on the same
|
||||
* run-queue and is also still the next task eligible for
|
||||
* pushing.
|
||||
*/
|
||||
task = pick_next_highest_task_rt(rq, -1);
|
||||
if (unlikely(task != next_task) && task && paranoid--) {
|
||||
put_task_struct(next_task);
|
||||
next_task = task;
|
||||
goto retry;
|
||||
task = pick_next_pushable_task(rq);
|
||||
if (task_cpu(next_task) == rq->cpu && task == next_task) {
|
||||
/*
|
||||
* If we get here, the task hasnt moved at all, but
|
||||
* it has failed to push. We will not try again,
|
||||
* since the other cpus will pull from us when they
|
||||
* are ready.
|
||||
*/
|
||||
dequeue_pushable_task(rq, next_task);
|
||||
goto out;
|
||||
}
|
||||
goto out;
|
||||
|
||||
if (!task)
|
||||
/* No more tasks, just exit */
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Something has shifted, try again.
|
||||
*/
|
||||
put_task_struct(next_task);
|
||||
next_task = task;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
deactivate_task(rq, next_task, 0);
|
||||
@@ -1147,23 +1350,12 @@ static int push_rt_task(struct rq *rq)
|
||||
|
||||
double_unlock_balance(rq, lowest_rq);
|
||||
|
||||
ret = 1;
|
||||
out:
|
||||
put_task_struct(next_task);
|
||||
|
||||
return ret;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Currently we just use the second highest prio task on
|
||||
* the queue, and stop when it can't migrate (or there's
|
||||
* no more RT tasks). There may be a case where a lower
|
||||
* priority RT task has a different affinity than the
|
||||
* higher RT task. In this case the lower RT task could
|
||||
* possibly be able to migrate where as the higher priority
|
||||
* RT task could not. We currently ignore this issue.
|
||||
* Enhancements are welcome!
|
||||
*/
|
||||
static void push_rt_tasks(struct rq *rq)
|
||||
{
|
||||
/* push_rt_task will return true if it moved an RT */
|
||||
@@ -1174,33 +1366,35 @@ static void push_rt_tasks(struct rq *rq)
|
||||
static int pull_rt_task(struct rq *this_rq)
|
||||
{
|
||||
int this_cpu = this_rq->cpu, ret = 0, cpu;
|
||||
struct task_struct *p, *next;
|
||||
struct task_struct *p;
|
||||
struct rq *src_rq;
|
||||
|
||||
if (likely(!rt_overloaded(this_rq)))
|
||||
return 0;
|
||||
|
||||
next = pick_next_task_rt(this_rq);
|
||||
|
||||
for_each_cpu(cpu, this_rq->rd->rto_mask) {
|
||||
if (this_cpu == cpu)
|
||||
continue;
|
||||
|
||||
src_rq = cpu_rq(cpu);
|
||||
|
||||
/*
|
||||
* Don't bother taking the src_rq->lock if the next highest
|
||||
* task is known to be lower-priority than our current task.
|
||||
* This may look racy, but if this value is about to go
|
||||
* logically higher, the src_rq will push this task away.
|
||||
* And if its going logically lower, we do not care
|
||||
*/
|
||||
if (src_rq->rt.highest_prio.next >=
|
||||
this_rq->rt.highest_prio.curr)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We can potentially drop this_rq's lock in
|
||||
* double_lock_balance, and another CPU could
|
||||
* steal our next task - hence we must cause
|
||||
* the caller to recalculate the next task
|
||||
* in that case:
|
||||
* alter this_rq
|
||||
*/
|
||||
if (double_lock_balance(this_rq, src_rq)) {
|
||||
struct task_struct *old_next = next;
|
||||
|
||||
next = pick_next_task_rt(this_rq);
|
||||
if (next != old_next)
|
||||
ret = 1;
|
||||
}
|
||||
double_lock_balance(this_rq, src_rq);
|
||||
|
||||
/*
|
||||
* Are there still pullable RT tasks?
|
||||
@@ -1214,7 +1408,7 @@ static int pull_rt_task(struct rq *this_rq)
|
||||
* Do we have an RT task that preempts
|
||||
* the to-be-scheduled task?
|
||||
*/
|
||||
if (p && (!next || (p->prio < next->prio))) {
|
||||
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
|
||||
WARN_ON(p == src_rq->curr);
|
||||
WARN_ON(!p->se.on_rq);
|
||||
|
||||
@@ -1224,12 +1418,9 @@ static int pull_rt_task(struct rq *this_rq)
|
||||
* This is just that p is wakeing up and hasn't
|
||||
* had a chance to schedule. We only pull
|
||||
* p if it is lower in priority than the
|
||||
* current task on the run queue or
|
||||
* this_rq next task is lower in prio than
|
||||
* the current task on that rq.
|
||||
* current task on the run queue
|
||||
*/
|
||||
if (p->prio < src_rq->curr->prio ||
|
||||
(next && next->prio < src_rq->curr->prio))
|
||||
if (p->prio < src_rq->curr->prio)
|
||||
goto skip;
|
||||
|
||||
ret = 1;
|
||||
@@ -1242,13 +1433,7 @@ static int pull_rt_task(struct rq *this_rq)
|
||||
* case there's an even higher prio task
|
||||
* in another runqueue. (low likelyhood
|
||||
* but possible)
|
||||
*
|
||||
* Update next so that we won't pick a task
|
||||
* on another cpu with a priority lower (or equal)
|
||||
* than the one we just picked.
|
||||
*/
|
||||
next = p;
|
||||
|
||||
}
|
||||
skip:
|
||||
double_unlock_balance(this_rq, src_rq);
|
||||
@@ -1260,24 +1445,27 @@ static int pull_rt_task(struct rq *this_rq)
|
||||
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
|
||||
{
|
||||
/* Try to pull RT tasks here if we lower this rq's prio */
|
||||
if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
|
||||
if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
|
||||
pull_rt_task(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
* assumes rq->lock is held
|
||||
*/
|
||||
static int needs_post_schedule_rt(struct rq *rq)
|
||||
{
|
||||
return has_pushable_tasks(rq);
|
||||
}
|
||||
|
||||
static void post_schedule_rt(struct rq *rq)
|
||||
{
|
||||
/*
|
||||
* If we have more than one rt_task queued, then
|
||||
* see if we can push the other rt_tasks off to other CPUS.
|
||||
* Note we may release the rq lock, and since
|
||||
* the lock was owned by prev, we need to release it
|
||||
* first via finish_lock_switch and then reaquire it here.
|
||||
* This is only called if needs_post_schedule_rt() indicates that
|
||||
* we need to push tasks away
|
||||
*/
|
||||
if (unlikely(rq->rt.overloaded)) {
|
||||
spin_lock_irq(&rq->lock);
|
||||
push_rt_tasks(rq);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
spin_lock_irq(&rq->lock);
|
||||
push_rt_tasks(rq);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1288,7 +1476,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
if (!task_running(rq, p) &&
|
||||
!test_tsk_need_resched(rq->curr) &&
|
||||
rq->rt.overloaded)
|
||||
has_pushable_tasks(rq) &&
|
||||
p->rt.nr_cpus_allowed > 1)
|
||||
push_rt_tasks(rq);
|
||||
}
|
||||
|
||||
@@ -1324,6 +1513,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
|
||||
if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
if (!task_current(rq, p)) {
|
||||
/*
|
||||
* Make sure we dequeue this task from the pushable list
|
||||
* before going further. It will either remain off of
|
||||
* the list because we are no longer pushable, or it
|
||||
* will be requeued.
|
||||
*/
|
||||
if (p->rt.nr_cpus_allowed > 1)
|
||||
dequeue_pushable_task(rq, p);
|
||||
|
||||
/*
|
||||
* Requeue if our weight is changing and still > 1
|
||||
*/
|
||||
if (weight > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
|
||||
}
|
||||
|
||||
if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
|
||||
rq->rt.rt_nr_migratory++;
|
||||
} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
|
||||
@@ -1331,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
|
||||
rq->rt.rt_nr_migratory--;
|
||||
}
|
||||
|
||||
update_rt_migration(rq);
|
||||
update_rt_migration(&rq->rt);
|
||||
}
|
||||
|
||||
cpumask_copy(&p->cpus_allowed, new_mask);
|
||||
@@ -1346,7 +1553,7 @@ static void rq_online_rt(struct rq *rq)
|
||||
|
||||
__enable_runtime(rq);
|
||||
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
@@ -1438,7 +1645,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
|
||||
* can release the rq lock and p could migrate.
|
||||
* Only reschedule if p is still on the same runqueue.
|
||||
*/
|
||||
if (p->prio > rq->rt.highest_prio && rq->curr == p)
|
||||
if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
|
||||
resched_task(p);
|
||||
#else
|
||||
/* For UP simply resched on drop of prio */
|
||||
@@ -1509,6 +1716,9 @@ static void set_curr_task_rt(struct rq *rq)
|
||||
struct task_struct *p = rq->curr;
|
||||
|
||||
p->se.exec_start = rq->clock;
|
||||
|
||||
/* The running task is never eligible for pushing */
|
||||
dequeue_pushable_task(rq, p);
|
||||
}
|
||||
|
||||
static const struct sched_class rt_sched_class = {
|
||||
@@ -1531,6 +1741,7 @@ static const struct sched_class rt_sched_class = {
|
||||
.rq_online = rq_online_rt,
|
||||
.rq_offline = rq_offline_rt,
|
||||
.pre_schedule = pre_schedule_rt,
|
||||
.needs_post_schedule = needs_post_schedule_rt,
|
||||
.post_schedule = post_schedule_rt,
|
||||
.task_wake_up = task_wake_up_rt,
|
||||
.switched_from = switched_from_rt,
|
||||
|
@@ -4,7 +4,7 @@
|
||||
* bump this up when changing the output format or the meaning of an existing
|
||||
* format, so that tools can adapt (or abort)
|
||||
*/
|
||||
#define SCHEDSTAT_VERSION 14
|
||||
#define SCHEDSTAT_VERSION 15
|
||||
|
||||
static int show_schedstat(struct seq_file *seq, void *v)
|
||||
{
|
||||
@@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
|
||||
|
||||
/* runqueue-specific stats */
|
||||
seq_printf(seq,
|
||||
"cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
|
||||
cpu, rq->yld_both_empty,
|
||||
rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
|
||||
"cpu%d %u %u %u %u %u %u %llu %llu %lu",
|
||||
cpu, rq->yld_count,
|
||||
rq->sched_switch, rq->sched_count, rq->sched_goidle,
|
||||
rq->ttwu_count, rq->ttwu_local,
|
||||
rq->rq_cpu_time,
|
||||
|
Referens i nytt ärende
Block a user