Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (39 commits) cpumask: Move deprecated functions to end of header. cpumask: remove unused deprecated functions, avoid accusations of insanity cpumask: use new-style cpumask ops in mm/quicklist. cpumask: use mm_cpumask() wrapper: x86 cpumask: use mm_cpumask() wrapper: um cpumask: use mm_cpumask() wrapper: mips cpumask: use mm_cpumask() wrapper: mn10300 cpumask: use mm_cpumask() wrapper: m32r cpumask: use mm_cpumask() wrapper: arm cpumask: Use accessors for cpu_*_mask: um cpumask: Use accessors for cpu_*_mask: powerpc cpumask: Use accessors for cpu_*_mask: mips cpumask: Use accessors for cpu_*_mask: m32r cpumask: remove arch_send_call_function_ipi cpumask: arch_send_call_function_ipi_mask: s390 cpumask: arch_send_call_function_ipi_mask: powerpc cpumask: arch_send_call_function_ipi_mask: mips cpumask: arch_send_call_function_ipi_mask: m32r cpumask: arch_send_call_function_ipi_mask: alpha cpumask: remove obsolete topology_core_siblings and topology_thread_siblings: ia64 ...
This commit is contained in:
@@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
|
||||
|
||||
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
|
||||
if (cfg) {
|
||||
if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
|
||||
if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else if (!alloc_cpumask_var_node(&cfg->old_domain,
|
||||
} else if (!zalloc_cpumask_var_node(&cfg->old_domain,
|
||||
GFP_ATOMIC, node)) {
|
||||
free_cpumask_var(cfg->domain);
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else {
|
||||
cpumask_clear(cfg->domain);
|
||||
cpumask_clear(cfg->old_domain);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
||||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
load_LDT(pc);
|
||||
if (!cpus_equal(current->mm->cpu_vm_mask,
|
||||
cpumask_of_cpu(smp_processor_id())))
|
||||
if (!cpumask_equal(mm_cpumask(current->mm),
|
||||
cpumask_of(smp_processor_id())))
|
||||
smp_call_function(flush_ldt, current->mm, 1);
|
||||
preempt_enable();
|
||||
#else
|
||||
|
@@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
|
||||
void __init init_c1e_mask(void)
|
||||
{
|
||||
/* If we're using c1e_idle, we need to allocate c1e_mask. */
|
||||
if (pm_idle == c1e_idle) {
|
||||
alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
|
||||
cpumask_clear(c1e_mask);
|
||||
}
|
||||
if (pm_idle == c1e_idle)
|
||||
zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int __init idle_setup(char *str)
|
||||
|
@@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
||||
#endif
|
||||
current_thread_info()->cpu = 0; /* needed? */
|
||||
for_each_possible_cpu(i) {
|
||||
alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
|
||||
alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
|
||||
alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
|
||||
cpumask_clear(per_cpu(cpu_core_map, i));
|
||||
cpumask_clear(per_cpu(cpu_sibling_map, i));
|
||||
cpumask_clear(cpu_data(i).llc_shared_map);
|
||||
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
|
||||
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
|
||||
zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
|
||||
}
|
||||
set_cpu_sibling_map(0);
|
||||
|
||||
|
@@ -93,7 +93,6 @@ static struct irqaction irq0 = {
|
||||
|
||||
void __init setup_default_timer_irq(void)
|
||||
{
|
||||
irq0.mask = cpumask_of_cpu(0);
|
||||
setup_irq(0, &irq0);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user