Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (35 commits)
sched, cputime: Introduce thread_group_times()
sched, cputime: Cleanups related to task_times()
Revert "sched, x86: Optimize branch hint in __switch_to()"
sched: Fix isolcpus boot option
sched: Revert 498657a478
sched, time: Define nsecs_to_jiffies()
sched: Remove task_{u,s,g}time()
sched: Introduce task_times() to replace task_{u,s}time() pair
sched: Limit the number of scheduler debug messages
sched.c: Call debug_show_all_locks() when dumping all tasks
sched, x86: Optimize branch hint in __switch_to()
sched: Optimize branch hint in context_switch()
sched: Optimize branch hint in pick_next_task_fair()
sched_feat_write(): Update ppos instead of file->f_pos
sched: Sched_rt_periodic_timer vs cpu hotplug
sched, kvm: Fix race condition involving sched_in_preempt_notifers
sched: More generic WAKE_AFFINE vs select_idle_sibling()
sched: Cleanup select_task_rq_fair()
sched: Fix granularity of task_u/stime()
sched: Fix/add missing update_rq_clock() calls
...
This commit is contained in:
276
kernel/sched.c
276
kernel/sched.c
@@ -535,14 +535,12 @@ struct rq {
|
||||
#define CPU_LOAD_IDX_MAX 5
|
||||
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
|
||||
#ifdef CONFIG_NO_HZ
|
||||
unsigned long last_tick_seen;
|
||||
unsigned char in_nohz_recently;
|
||||
#endif
|
||||
/* capture load from *all* tasks on this cpu: */
|
||||
struct load_weight load;
|
||||
unsigned long nr_load_updates;
|
||||
u64 nr_switches;
|
||||
u64 nr_migrations_in;
|
||||
|
||||
struct cfs_rq cfs;
|
||||
struct rt_rq rt;
|
||||
@@ -591,6 +589,8 @@ struct rq {
|
||||
|
||||
u64 rt_avg;
|
||||
u64 age_stamp;
|
||||
u64 idle_stamp;
|
||||
u64 avg_idle;
|
||||
#endif
|
||||
|
||||
/* calc_load related fields */
|
||||
@@ -772,7 +772,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
|
||||
if (!sched_feat_names[i])
|
||||
return -EINVAL;
|
||||
|
||||
filp->f_pos += cnt;
|
||||
*ppos += cnt;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
@@ -2017,6 +2017,7 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
update_rq_clock(rq);
|
||||
set_task_cpu(p, cpu);
|
||||
p->cpus_allowed = cpumask_of_cpu(cpu);
|
||||
p->rt.nr_cpus_allowed = 1;
|
||||
@@ -2078,7 +2079,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||
#endif
|
||||
if (old_cpu != new_cpu) {
|
||||
p->se.nr_migrations++;
|
||||
new_rq->nr_migrations_in++;
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
if (task_hot(p, old_rq->clock, NULL))
|
||||
schedstat_inc(p, se.nr_forced2_migrations);
|
||||
@@ -2115,6 +2115,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
||||
* it is sufficient to simply update the task's cpu field.
|
||||
*/
|
||||
if (!p->se.on_rq && !task_running(rq, p)) {
|
||||
update_rq_clock(rq);
|
||||
set_task_cpu(p, dest_cpu);
|
||||
return 0;
|
||||
}
|
||||
@@ -2376,13 +2377,14 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
||||
if (cpu != orig_cpu)
|
||||
set_task_cpu(p, cpu);
|
||||
|
||||
rq = task_rq_lock(p, &flags);
|
||||
|
||||
if (rq != orig_rq)
|
||||
if (cpu != orig_cpu) {
|
||||
local_irq_save(flags);
|
||||
rq = cpu_rq(cpu);
|
||||
update_rq_clock(rq);
|
||||
set_task_cpu(p, cpu);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
rq = task_rq_lock(p, &flags);
|
||||
|
||||
WARN_ON(p->state != TASK_WAKING);
|
||||
cpu = task_cpu(p);
|
||||
@@ -2440,6 +2442,17 @@ out_running:
|
||||
#ifdef CONFIG_SMP
|
||||
if (p->sched_class->task_wake_up)
|
||||
p->sched_class->task_wake_up(rq, p);
|
||||
|
||||
if (unlikely(rq->idle_stamp)) {
|
||||
u64 delta = rq->clock - rq->idle_stamp;
|
||||
u64 max = 2*sysctl_sched_migration_cost;
|
||||
|
||||
if (delta > max)
|
||||
rq->avg_idle = max;
|
||||
else
|
||||
update_avg(&rq->avg_idle, delta);
|
||||
rq->idle_stamp = 0;
|
||||
}
|
||||
#endif
|
||||
out:
|
||||
task_rq_unlock(rq, &flags);
|
||||
@@ -2545,6 +2558,7 @@ static void __sched_fork(struct task_struct *p)
|
||||
void sched_fork(struct task_struct *p, int clone_flags)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
unsigned long flags;
|
||||
|
||||
__sched_fork(p);
|
||||
|
||||
@@ -2581,7 +2595,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
||||
#ifdef CONFIG_SMP
|
||||
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
|
||||
#endif
|
||||
local_irq_save(flags);
|
||||
update_rq_clock(cpu_rq(cpu));
|
||||
set_task_cpu(p, cpu);
|
||||
local_irq_restore(flags);
|
||||
|
||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||
if (likely(sched_info_on()))
|
||||
@@ -2848,14 +2865,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
||||
*/
|
||||
arch_start_context_switch(prev);
|
||||
|
||||
if (unlikely(!mm)) {
|
||||
if (likely(!mm)) {
|
||||
next->active_mm = oldmm;
|
||||
atomic_inc(&oldmm->mm_count);
|
||||
enter_lazy_tlb(oldmm, next);
|
||||
} else
|
||||
switch_mm(oldmm, mm, next);
|
||||
|
||||
if (unlikely(!prev->mm)) {
|
||||
if (likely(!prev->mm)) {
|
||||
prev->active_mm = NULL;
|
||||
rq->prev_mm = oldmm;
|
||||
}
|
||||
@@ -3017,15 +3034,6 @@ static void calc_load_account_active(struct rq *this_rq)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Externally visible per-cpu scheduler statistics:
|
||||
* cpu_nr_migrations(cpu) - number of migrations into that cpu
|
||||
*/
|
||||
u64 cpu_nr_migrations(int cpu)
|
||||
{
|
||||
return cpu_rq(cpu)->nr_migrations_in;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update rq->cpu_load[] statistics. This function is usually called every
|
||||
* scheduler tick (TICK_NSEC).
|
||||
@@ -4126,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
||||
unsigned long flags;
|
||||
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
|
||||
|
||||
cpumask_setall(cpus);
|
||||
cpumask_copy(cpus, cpu_online_mask);
|
||||
|
||||
/*
|
||||
* When power savings policy is enabled for the parent domain, idle
|
||||
@@ -4289,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
||||
int all_pinned = 0;
|
||||
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
|
||||
|
||||
cpumask_setall(cpus);
|
||||
cpumask_copy(cpus, cpu_online_mask);
|
||||
|
||||
/*
|
||||
* When power savings policy is enabled for the parent domain, idle
|
||||
@@ -4429,6 +4437,11 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||
int pulled_task = 0;
|
||||
unsigned long next_balance = jiffies + HZ;
|
||||
|
||||
this_rq->idle_stamp = this_rq->clock;
|
||||
|
||||
if (this_rq->avg_idle < sysctl_sched_migration_cost)
|
||||
return;
|
||||
|
||||
for_each_domain(this_cpu, sd) {
|
||||
unsigned long interval;
|
||||
|
||||
@@ -4443,8 +4456,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||
interval = msecs_to_jiffies(sd->balance_interval);
|
||||
if (time_after(next_balance, sd->last_balance + interval))
|
||||
next_balance = sd->last_balance + interval;
|
||||
if (pulled_task)
|
||||
if (pulled_task) {
|
||||
this_rq->idle_stamp = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
|
||||
/*
|
||||
@@ -5046,8 +5061,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
|
||||
p->gtime = cputime_add(p->gtime, cputime);
|
||||
|
||||
/* Add guest time to cpustat. */
|
||||
cpustat->user = cputime64_add(cpustat->user, tmp);
|
||||
cpustat->guest = cputime64_add(cpustat->guest, tmp);
|
||||
if (TASK_NICE(p) > 0) {
|
||||
cpustat->nice = cputime64_add(cpustat->nice, tmp);
|
||||
cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
|
||||
} else {
|
||||
cpustat->user = cputime64_add(cpustat->user, tmp);
|
||||
cpustat->guest = cputime64_add(cpustat->guest, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -5162,61 +5182,87 @@ void account_idle_ticks(unsigned long ticks)
|
||||
* Use precise platform statistics if available:
|
||||
*/
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
cputime_t task_utime(struct task_struct *p)
|
||||
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
return p->utime;
|
||||
*ut = p->utime;
|
||||
*st = p->stime;
|
||||
}
|
||||
|
||||
cputime_t task_stime(struct task_struct *p)
|
||||
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
return p->stime;
|
||||
struct task_cputime cputime;
|
||||
|
||||
thread_group_cputime(p, &cputime);
|
||||
|
||||
*ut = cputime.utime;
|
||||
*st = cputime.stime;
|
||||
}
|
||||
#else
|
||||
cputime_t task_utime(struct task_struct *p)
|
||||
|
||||
#ifndef nsecs_to_cputime
|
||||
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
|
||||
#endif
|
||||
|
||||
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
clock_t utime = cputime_to_clock_t(p->utime),
|
||||
total = utime + cputime_to_clock_t(p->stime);
|
||||
u64 temp;
|
||||
cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
|
||||
|
||||
/*
|
||||
* Use CFS's precise accounting:
|
||||
*/
|
||||
temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
|
||||
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
|
||||
|
||||
if (total) {
|
||||
temp *= utime;
|
||||
u64 temp;
|
||||
|
||||
temp = (u64)(rtime * utime);
|
||||
do_div(temp, total);
|
||||
}
|
||||
utime = (clock_t)temp;
|
||||
|
||||
p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
|
||||
return p->prev_utime;
|
||||
}
|
||||
|
||||
cputime_t task_stime(struct task_struct *p)
|
||||
{
|
||||
clock_t stime;
|
||||
utime = (cputime_t)temp;
|
||||
} else
|
||||
utime = rtime;
|
||||
|
||||
/*
|
||||
* Use CFS's precise accounting. (we subtract utime from
|
||||
* the total, to make sure the total observed by userspace
|
||||
* grows monotonically - apps rely on that):
|
||||
* Compare with previous values, to keep monotonicity:
|
||||
*/
|
||||
stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
|
||||
cputime_to_clock_t(task_utime(p));
|
||||
p->prev_utime = max(p->prev_utime, utime);
|
||||
p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
|
||||
|
||||
if (stime >= 0)
|
||||
p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
|
||||
*ut = p->prev_utime;
|
||||
*st = p->prev_stime;
|
||||
}
|
||||
|
||||
return p->prev_stime;
|
||||
/*
|
||||
* Must be called with siglock held.
|
||||
*/
|
||||
void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
|
||||
{
|
||||
struct signal_struct *sig = p->signal;
|
||||
struct task_cputime cputime;
|
||||
cputime_t rtime, utime, total;
|
||||
|
||||
thread_group_cputime(p, &cputime);
|
||||
|
||||
total = cputime_add(cputime.utime, cputime.stime);
|
||||
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
|
||||
|
||||
if (total) {
|
||||
u64 temp;
|
||||
|
||||
temp = (u64)(rtime * cputime.utime);
|
||||
do_div(temp, total);
|
||||
utime = (cputime_t)temp;
|
||||
} else
|
||||
utime = rtime;
|
||||
|
||||
sig->prev_utime = max(sig->prev_utime, utime);
|
||||
sig->prev_stime = max(sig->prev_stime,
|
||||
cputime_sub(rtime, sig->prev_utime));
|
||||
|
||||
*ut = sig->prev_utime;
|
||||
*st = sig->prev_stime;
|
||||
}
|
||||
#endif
|
||||
|
||||
inline cputime_t task_gtime(struct task_struct *p)
|
||||
{
|
||||
return p->gtime;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function gets called by the timer code, with HZ frequency.
|
||||
* We call it with interrupts disabled.
|
||||
@@ -6175,22 +6221,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
|
||||
BUG_ON(p->se.on_rq);
|
||||
|
||||
p->policy = policy;
|
||||
switch (p->policy) {
|
||||
case SCHED_NORMAL:
|
||||
case SCHED_BATCH:
|
||||
case SCHED_IDLE:
|
||||
p->sched_class = &fair_sched_class;
|
||||
break;
|
||||
case SCHED_FIFO:
|
||||
case SCHED_RR:
|
||||
p->sched_class = &rt_sched_class;
|
||||
break;
|
||||
}
|
||||
|
||||
p->rt_priority = prio;
|
||||
p->normal_prio = normal_prio(p);
|
||||
/* we are holding p->pi_lock already */
|
||||
p->prio = rt_mutex_getprio(p);
|
||||
if (rt_prio(p->prio))
|
||||
p->sched_class = &rt_sched_class;
|
||||
else
|
||||
p->sched_class = &fair_sched_class;
|
||||
set_load_weight(p);
|
||||
}
|
||||
|
||||
@@ -6935,7 +6973,7 @@ void show_state_filter(unsigned long state_filter)
|
||||
/*
|
||||
* Only show locks if all tasks are dumped:
|
||||
*/
|
||||
if (state_filter == -1)
|
||||
if (!state_filter)
|
||||
debug_show_all_locks();
|
||||
}
|
||||
|
||||
@@ -7740,6 +7778,16 @@ early_initcall(migration_init);
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
|
||||
static __read_mostly int sched_domain_debug_enabled;
|
||||
|
||||
static int __init sched_domain_debug_setup(char *str)
|
||||
{
|
||||
sched_domain_debug_enabled = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("sched_debug", sched_domain_debug_setup);
|
||||
|
||||
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
||||
struct cpumask *groupmask)
|
||||
{
|
||||
@@ -7826,6 +7874,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
||||
cpumask_var_t groupmask;
|
||||
int level = 0;
|
||||
|
||||
if (!sched_domain_debug_enabled)
|
||||
return;
|
||||
|
||||
if (!sd) {
|
||||
printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
|
||||
return;
|
||||
@@ -7905,6 +7956,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
|
||||
|
||||
static void free_rootdomain(struct root_domain *rd)
|
||||
{
|
||||
synchronize_sched();
|
||||
|
||||
cpupri_cleanup(&rd->cpupri);
|
||||
|
||||
free_cpumask_var(rd->rto_mask);
|
||||
@@ -8045,6 +8098,7 @@ static cpumask_var_t cpu_isolated_map;
|
||||
/* Setup the mask of cpus configured for isolated domains */
|
||||
static int __init isolated_cpu_setup(char *str)
|
||||
{
|
||||
alloc_bootmem_cpumask_var(&cpu_isolated_map);
|
||||
cpulist_parse(str, cpu_isolated_map);
|
||||
return 1;
|
||||
}
|
||||
@@ -8881,7 +8935,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
|
||||
return __build_sched_domains(cpu_map, NULL);
|
||||
}
|
||||
|
||||
static struct cpumask *doms_cur; /* current sched domains */
|
||||
static cpumask_var_t *doms_cur; /* current sched domains */
|
||||
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
|
||||
static struct sched_domain_attr *dattr_cur;
|
||||
/* attribues of custom domains in 'doms_cur' */
|
||||
@@ -8903,6 +8957,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
|
||||
{
|
||||
int i;
|
||||
cpumask_var_t *doms;
|
||||
|
||||
doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
|
||||
if (!doms)
|
||||
return NULL;
|
||||
for (i = 0; i < ndoms; i++) {
|
||||
if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
|
||||
free_sched_domains(doms, i);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return doms;
|
||||
}
|
||||
|
||||
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < ndoms; i++)
|
||||
free_cpumask_var(doms[i]);
|
||||
kfree(doms);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
||||
* For now this just excludes isolated cpus, but could be used to
|
||||
@@ -8914,12 +8993,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
|
||||
|
||||
arch_update_cpu_topology();
|
||||
ndoms_cur = 1;
|
||||
doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
|
||||
doms_cur = alloc_sched_domains(ndoms_cur);
|
||||
if (!doms_cur)
|
||||
doms_cur = fallback_doms;
|
||||
cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
|
||||
doms_cur = &fallback_doms;
|
||||
cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
|
||||
dattr_cur = NULL;
|
||||
err = build_sched_domains(doms_cur);
|
||||
err = build_sched_domains(doms_cur[0]);
|
||||
register_sched_domain_sysctl();
|
||||
|
||||
return err;
|
||||
@@ -8969,19 +9048,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
|
||||
* doms_new[] to the current sched domain partitioning, doms_cur[].
|
||||
* It destroys each deleted domain and builds each new domain.
|
||||
*
|
||||
* 'doms_new' is an array of cpumask's of length 'ndoms_new'.
|
||||
* 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
|
||||
* The masks don't intersect (don't overlap.) We should setup one
|
||||
* sched domain for each mask. CPUs not in any of the cpumasks will
|
||||
* not be load balanced. If the same cpumask appears both in the
|
||||
* current 'doms_cur' domains and in the new 'doms_new', we can leave
|
||||
* it as it is.
|
||||
*
|
||||
* The passed in 'doms_new' should be kmalloc'd. This routine takes
|
||||
* ownership of it and will kfree it when done with it. If the caller
|
||||
* failed the kmalloc call, then it can pass in doms_new == NULL &&
|
||||
* ndoms_new == 1, and partition_sched_domains() will fallback to
|
||||
* the single partition 'fallback_doms', it also forces the domains
|
||||
* to be rebuilt.
|
||||
* The passed in 'doms_new' should be allocated using
|
||||
* alloc_sched_domains. This routine takes ownership of it and will
|
||||
* free_sched_domains it when done with it. If the caller failed the
|
||||
* alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
|
||||
* and partition_sched_domains() will fallback to the single partition
|
||||
* 'fallback_doms', it also forces the domains to be rebuilt.
|
||||
*
|
||||
* If doms_new == NULL it will be replaced with cpu_online_mask.
|
||||
* ndoms_new == 0 is a special case for destroying existing domains,
|
||||
@@ -8989,8 +9068,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
|
||||
*
|
||||
* Call with hotplug lock held
|
||||
*/
|
||||
/* FIXME: Change to struct cpumask *doms_new[] */
|
||||
void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
|
||||
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
|
||||
struct sched_domain_attr *dattr_new)
|
||||
{
|
||||
int i, j, n;
|
||||
@@ -9009,40 +9087,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
|
||||
/* Destroy deleted domains */
|
||||
for (i = 0; i < ndoms_cur; i++) {
|
||||
for (j = 0; j < n && !new_topology; j++) {
|
||||
if (cpumask_equal(&doms_cur[i], &doms_new[j])
|
||||
if (cpumask_equal(doms_cur[i], doms_new[j])
|
||||
&& dattrs_equal(dattr_cur, i, dattr_new, j))
|
||||
goto match1;
|
||||
}
|
||||
/* no match - a current sched domain not in new doms_new[] */
|
||||
detach_destroy_domains(doms_cur + i);
|
||||
detach_destroy_domains(doms_cur[i]);
|
||||
match1:
|
||||
;
|
||||
}
|
||||
|
||||
if (doms_new == NULL) {
|
||||
ndoms_cur = 0;
|
||||
doms_new = fallback_doms;
|
||||
cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
|
||||
doms_new = &fallback_doms;
|
||||
cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
|
||||
WARN_ON_ONCE(dattr_new);
|
||||
}
|
||||
|
||||
/* Build new domains */
|
||||
for (i = 0; i < ndoms_new; i++) {
|
||||
for (j = 0; j < ndoms_cur && !new_topology; j++) {
|
||||
if (cpumask_equal(&doms_new[i], &doms_cur[j])
|
||||
if (cpumask_equal(doms_new[i], doms_cur[j])
|
||||
&& dattrs_equal(dattr_new, i, dattr_cur, j))
|
||||
goto match2;
|
||||
}
|
||||
/* no match - add a new doms_new */
|
||||
__build_sched_domains(doms_new + i,
|
||||
__build_sched_domains(doms_new[i],
|
||||
dattr_new ? dattr_new + i : NULL);
|
||||
match2:
|
||||
;
|
||||
}
|
||||
|
||||
/* Remember the new sched domains */
|
||||
if (doms_cur != fallback_doms)
|
||||
kfree(doms_cur);
|
||||
if (doms_cur != &fallback_doms)
|
||||
free_sched_domains(doms_cur, ndoms_cur);
|
||||
kfree(dattr_cur); /* kfree(NULL) is safe */
|
||||
doms_cur = doms_new;
|
||||
dattr_cur = dattr_new;
|
||||
@@ -9364,10 +9442,6 @@ void __init sched_init(void)
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
alloc_size += num_possible_cpus() * cpumask_size();
|
||||
#endif
|
||||
/*
|
||||
* As sched_init() is called before page_alloc is setup,
|
||||
* we use alloc_bootmem().
|
||||
*/
|
||||
if (alloc_size) {
|
||||
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
|
||||
|
||||
@@ -9522,6 +9596,8 @@ void __init sched_init(void)
|
||||
rq->cpu = i;
|
||||
rq->online = 0;
|
||||
rq->migration_thread = NULL;
|
||||
rq->idle_stamp = 0;
|
||||
rq->avg_idle = 2*sysctl_sched_migration_cost;
|
||||
INIT_LIST_HEAD(&rq->migration_queue);
|
||||
rq_attach_root(rq, &def_root_domain);
|
||||
#endif
|
||||
@@ -9571,7 +9647,9 @@ void __init sched_init(void)
|
||||
zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
|
||||
alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
|
||||
#endif
|
||||
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
|
||||
/* May be allocated at isolcpus cmdline parse time */
|
||||
if (cpu_isolated_map == NULL)
|
||||
zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
|
||||
#endif /* SMP */
|
||||
|
||||
perf_event_init();
|
||||
|
Reference in New Issue
Block a user