sched: use a 2-d bitmap for searching lowest-pri CPU
The current code use a linear algorithm which causes scaling issues on larger SMP machines. This patch replaces that algorithm with a 2-dimensional bitmap to reduce latencies in the wake-up path. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Acked-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:

committed by
Ingo Molnar

parent
f333fdc909
commit
6e0534f278
@@ -391,8 +391,11 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||
rt_rq->rt_nr_running++;
|
||||
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
|
||||
if (rt_se_prio(rt_se) < rt_rq->highest_prio)
|
||||
if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
rt_rq->highest_prio = rt_se_prio(rt_se);
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_se_prio(rt_se));
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
if (rt_se->nr_cpus_allowed > 1) {
|
||||
@@ -416,6 +419,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
static inline
|
||||
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int highest_prio = rt_rq->highest_prio;
|
||||
#endif
|
||||
|
||||
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
|
||||
WARN_ON(!rt_rq->rt_nr_running);
|
||||
rt_rq->rt_nr_running--;
|
||||
@@ -439,6 +446,11 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
||||
rq->rt.rt_nr_migratory--;
|
||||
}
|
||||
|
||||
if (rt_rq->highest_prio != highest_prio) {
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio);
|
||||
}
|
||||
|
||||
update_rt_migration(rq_of_rt_rq(rt_rq));
|
||||
#endif /* CONFIG_SMP */
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
@@ -763,73 +775,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
|
||||
|
||||
static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
|
||||
|
||||
static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
|
||||
{
|
||||
int lowest_prio = -1;
|
||||
int lowest_cpu = -1;
|
||||
int count = 0;
|
||||
int cpu;
|
||||
|
||||
cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
|
||||
|
||||
/*
|
||||
* Scan each rq for the lowest prio.
|
||||
*/
|
||||
for_each_cpu_mask(cpu, *lowest_mask) {
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
/* We look for lowest RT prio or non-rt CPU */
|
||||
if (rq->rt.highest_prio >= MAX_RT_PRIO) {
|
||||
/*
|
||||
* if we already found a low RT queue
|
||||
* and now we found this non-rt queue
|
||||
* clear the mask and set our bit.
|
||||
* Otherwise just return the queue as is
|
||||
* and the count==1 will cause the algorithm
|
||||
* to use the first bit found.
|
||||
*/
|
||||
if (lowest_cpu != -1) {
|
||||
cpus_clear(*lowest_mask);
|
||||
cpu_set(rq->cpu, *lowest_mask);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* no locking for now */
|
||||
if ((rq->rt.highest_prio > task->prio)
|
||||
&& (rq->rt.highest_prio >= lowest_prio)) {
|
||||
if (rq->rt.highest_prio > lowest_prio) {
|
||||
/* new low - clear old data */
|
||||
lowest_prio = rq->rt.highest_prio;
|
||||
lowest_cpu = cpu;
|
||||
count = 0;
|
||||
}
|
||||
count++;
|
||||
} else
|
||||
cpu_clear(cpu, *lowest_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear out all the set bits that represent
|
||||
* runqueues that were of higher prio than
|
||||
* the lowest_prio.
|
||||
*/
|
||||
if (lowest_cpu > 0) {
|
||||
/*
|
||||
* Perhaps we could add another cpumask op to
|
||||
* zero out bits. Like cpu_zero_bits(cpumask, nrbits);
|
||||
* Then that could be optimized to use memset and such.
|
||||
*/
|
||||
for_each_cpu_mask(cpu, *lowest_mask) {
|
||||
if (cpu >= lowest_cpu)
|
||||
break;
|
||||
cpu_clear(cpu, *lowest_mask);
|
||||
}
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
|
||||
{
|
||||
int first;
|
||||
@@ -851,18 +796,13 @@ static int find_lowest_rq(struct task_struct *task)
|
||||
cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
|
||||
int this_cpu = smp_processor_id();
|
||||
int cpu = task_cpu(task);
|
||||
int count = find_lowest_cpus(task, lowest_mask);
|
||||
|
||||
if (!count)
|
||||
if (task->rt.nr_cpus_allowed == 1)
|
||||
return -1; /* No other targets possible */
|
||||
|
||||
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
|
||||
return -1; /* No targets found */
|
||||
|
||||
/*
|
||||
* There is no sense in performing an optimal search if only one
|
||||
* target is found.
|
||||
*/
|
||||
if (count == 1)
|
||||
return first_cpu(*lowest_mask);
|
||||
|
||||
/*
|
||||
* At this point we have built a mask of cpus representing the
|
||||
* lowest priority tasks in the system. Now we want to elect
|
||||
@@ -1218,6 +1158,8 @@ static void join_domain_rt(struct rq *rq)
|
||||
{
|
||||
if (rq->rt.overloaded)
|
||||
rt_set_overload(rq);
|
||||
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
|
||||
}
|
||||
|
||||
/* Assumes rq->lock is held */
|
||||
@@ -1225,6 +1167,8 @@ static void leave_domain_rt(struct rq *rq)
|
||||
{
|
||||
if (rq->rt.overloaded)
|
||||
rt_clear_overload(rq);
|
||||
|
||||
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user