Merge remote-tracking branch 'tip/core/rcu' into next.2012.09.25b
Resolved conflict in kernel/sched/core.c using Peter Zijlstra's approach from https://lkml.org/lkml/2012/9/5/585.
This commit is contained in:
@@ -5342,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu)
|
||||
*/
|
||||
rq->stop = NULL;
|
||||
|
||||
/* Ensure any throttled groups are reachable by pick_next_task */
|
||||
unthrottle_offline_cfs_rqs(rq);
|
||||
|
||||
for ( ; ; ) {
|
||||
/*
|
||||
* There's this thread running, bail when that's the only
|
||||
@@ -5610,15 +5607,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
break;
|
||||
|
||||
case CPU_DEAD:
|
||||
{
|
||||
struct rq *dest_rq;
|
||||
|
||||
local_irq_save(flags);
|
||||
dest_rq = cpu_rq(smp_processor_id());
|
||||
raw_spin_lock(&dest_rq->lock);
|
||||
calc_load_migrate(rq);
|
||||
raw_spin_unlock_irqrestore(&dest_rq->lock, flags);
|
||||
}
|
||||
calc_load_migrate(rq);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
@@ -6027,11 +6016,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
|
||||
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
|
||||
* allows us to avoid some pointer chasing select_idle_sibling().
|
||||
*
|
||||
* Iterate domains and sched_groups downward, assigning CPUs to be
|
||||
* select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
|
||||
* due to random perturbation self canceling, ie sw buddies pull
|
||||
* their counterpart to their CPU's hw counterpart.
|
||||
*
|
||||
* Also keep a unique ID per domain (we use the first cpu number in
|
||||
* the cpumask of the domain), this allows us to quickly tell if
|
||||
* two cpus are in the same cache domain, see cpus_share_cache().
|
||||
@@ -6045,40 +6029,8 @@ static void update_top_cache_domain(int cpu)
|
||||
int id = cpu;
|
||||
|
||||
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
|
||||
if (sd) {
|
||||
struct sched_domain *tmp = sd;
|
||||
struct sched_group *sg, *prev;
|
||||
bool right;
|
||||
|
||||
/*
|
||||
* Traverse to first CPU in group, and count hops
|
||||
* to cpu from there, switching direction on each
|
||||
* hop, never ever pointing the last CPU rightward.
|
||||
*/
|
||||
do {
|
||||
id = cpumask_first(sched_domain_span(tmp));
|
||||
prev = sg = tmp->groups;
|
||||
right = 1;
|
||||
|
||||
while (cpumask_first(sched_group_cpus(sg)) != id)
|
||||
sg = sg->next;
|
||||
|
||||
while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
|
||||
prev = sg;
|
||||
sg = sg->next;
|
||||
right = !right;
|
||||
}
|
||||
|
||||
/* A CPU went down, never point back to domain start. */
|
||||
if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
|
||||
right = false;
|
||||
|
||||
sg = right ? sg->next : prev;
|
||||
tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
|
||||
} while ((tmp = tmp->child));
|
||||
|
||||
if (sd)
|
||||
id = cpumask_first(sched_domain_span(sd));
|
||||
}
|
||||
|
||||
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
|
||||
per_cpu(sd_llc_id, cpu) = id;
|
||||
|
Reference in New Issue
Block a user