mips: fix up obsolete cpu function usage.
Thanks to spatch, plus manual removal of "&*". Then a sweep for for_each_cpu_mask => for_each_cpu. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Kevin Cernekee <cernekee@gmail.com> Cc: Florian Fainelli <f.fainelli@gmail.com> Cc: linux-mips@linux-mips.org
This commit is contained in:
@@ -25,9 +25,9 @@ static void crash_shutdown_secondary(void *ignore)
|
||||
return;
|
||||
|
||||
local_irq_disable();
|
||||
if (!cpu_isset(cpu, cpus_in_crash))
|
||||
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
|
||||
crash_save_cpu(regs, cpu);
|
||||
cpu_set(cpu, cpus_in_crash);
|
||||
cpumask_set_cpu(cpu, &cpus_in_crash);
|
||||
|
||||
while (!atomic_read(&kexec_ready_to_reboot))
|
||||
cpu_relax();
|
||||
@@ -50,7 +50,7 @@ static void crash_kexec_prepare_cpus(void)
|
||||
*/
|
||||
pr_emerg("Sending IPI to other cpus...\n");
|
||||
msecs = 10000;
|
||||
while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
|
||||
while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
|
||||
cpu_relax();
|
||||
mdelay(1);
|
||||
}
|
||||
@@ -66,5 +66,5 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
|
||||
crashing_cpu = smp_processor_id();
|
||||
crash_save_cpu(regs, crashing_cpu);
|
||||
crash_kexec_prepare_cpus();
|
||||
cpu_set(crashing_cpu, cpus_in_crash);
|
||||
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
|
||||
}
|
||||
|
@@ -114,8 +114,8 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
|
||||
/* Compute new global allowed CPU set if necessary */
|
||||
ti = task_thread_info(p);
|
||||
if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
|
||||
cpus_intersects(*new_mask, mt_fpu_cpumask)) {
|
||||
cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);
|
||||
cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
|
||||
cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
|
||||
retval = set_cpus_allowed_ptr(p, effective_mask);
|
||||
} else {
|
||||
cpumask_copy(effective_mask, new_mask);
|
||||
|
@@ -49,7 +49,7 @@
|
||||
void arch_cpu_idle_dead(void)
|
||||
{
|
||||
/* What the heck is this check doing ? */
|
||||
if (!cpu_isset(smp_processor_id(), cpu_callin_map))
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
|
||||
play_dead();
|
||||
}
|
||||
#endif
|
||||
|
@@ -362,7 +362,7 @@ static int bmips_cpu_disable(void)
|
||||
pr_info("SMP: CPU%d is offline\n", cpu);
|
||||
|
||||
set_cpu_online(cpu, false);
|
||||
cpu_clear(cpu, cpu_callin_map);
|
||||
cpumask_clear_cpu(cpu, &cpu_callin_map);
|
||||
clear_c0_status(IE_IRQ5);
|
||||
|
||||
local_flush_tlb_all();
|
||||
|
@@ -66,7 +66,7 @@ static void cmp_smp_finish(void)
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(smp_processor_id(), mt_fpu_cpumask);
|
||||
cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
local_irq_enable();
|
||||
@@ -110,7 +110,7 @@ void __init cmp_smp_setup(void)
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(0, mt_fpu_cpumask);
|
||||
cpumask_set_cpu(0, &mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
for (i = 1; i < NR_CPUS; i++) {
|
||||
|
@@ -284,7 +284,7 @@ static void cps_smp_finish(void)
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(smp_processor_id(), mt_fpu_cpumask);
|
||||
cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
local_irq_enable();
|
||||
@@ -307,7 +307,7 @@ static int cps_cpu_disable(void)
|
||||
atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
|
||||
smp_mb__after_atomic();
|
||||
set_cpu_online(cpu, false);
|
||||
cpu_clear(cpu, cpu_callin_map);
|
||||
cpumask_clear_cpu(cpu, &cpu_callin_map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -178,7 +178,7 @@ static void vsmp_smp_finish(void)
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(smp_processor_id(), mt_fpu_cpumask);
|
||||
cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
local_irq_enable();
|
||||
@@ -239,7 +239,7 @@ static void __init vsmp_smp_setup(void)
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* If we have an FPU, enroll ourselves in the FPU-full mask */
|
||||
if (cpu_has_fpu)
|
||||
cpu_set(0, mt_fpu_cpumask);
|
||||
cpumask_set_cpu(0, &mt_fpu_cpumask);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
if (!cpu_has_mipsmt)
|
||||
return;
|
||||
|
@@ -75,30 +75,30 @@ static inline void set_cpu_sibling_map(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
cpu_set(cpu, cpu_sibling_setup_map);
|
||||
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
|
||||
|
||||
if (smp_num_siblings > 1) {
|
||||
for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
||||
for_each_cpu(i, &cpu_sibling_setup_map) {
|
||||
if (cpu_data[cpu].package == cpu_data[i].package &&
|
||||
cpu_data[cpu].core == cpu_data[i].core) {
|
||||
cpu_set(i, cpu_sibling_map[cpu]);
|
||||
cpu_set(cpu, cpu_sibling_map[i]);
|
||||
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
|
||||
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
|
||||
}
|
||||
}
|
||||
} else
|
||||
cpu_set(cpu, cpu_sibling_map[cpu]);
|
||||
cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
|
||||
}
|
||||
|
||||
static inline void set_cpu_core_map(int cpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
cpu_set(cpu, cpu_core_setup_map);
|
||||
cpumask_set_cpu(cpu, &cpu_core_setup_map);
|
||||
|
||||
for_each_cpu_mask(i, cpu_core_setup_map) {
|
||||
for_each_cpu(i, &cpu_core_setup_map) {
|
||||
if (cpu_data[cpu].package == cpu_data[i].package) {
|
||||
cpu_set(i, cpu_core_map[cpu]);
|
||||
cpu_set(cpu, cpu_core_map[i]);
|
||||
cpumask_set_cpu(i, &cpu_core_map[cpu]);
|
||||
cpumask_set_cpu(cpu, &cpu_core_map[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -138,7 +138,7 @@ asmlinkage void start_secondary(void)
|
||||
cpu = smp_processor_id();
|
||||
cpu_data[cpu].udelay_val = loops_per_jiffy;
|
||||
|
||||
cpu_set(cpu, cpu_coherent_mask);
|
||||
cpumask_set_cpu(cpu, &cpu_coherent_mask);
|
||||
notify_cpu_starting(cpu);
|
||||
|
||||
set_cpu_online(cpu, true);
|
||||
@@ -146,7 +146,7 @@ asmlinkage void start_secondary(void)
|
||||
set_cpu_sibling_map(cpu);
|
||||
set_cpu_core_map(cpu);
|
||||
|
||||
cpu_set(cpu, cpu_callin_map);
|
||||
cpumask_set_cpu(cpu, &cpu_callin_map);
|
||||
|
||||
synchronise_count_slave(cpu);
|
||||
|
||||
@@ -210,7 +210,7 @@ void smp_prepare_boot_cpu(void)
|
||||
{
|
||||
set_cpu_possible(0, true);
|
||||
set_cpu_online(0, true);
|
||||
cpu_set(0, cpu_callin_map);
|
||||
cpumask_set_cpu(0, &cpu_callin_map);
|
||||
}
|
||||
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
@@ -220,7 +220,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
/*
|
||||
* Trust is futile. We should really have timeouts ...
|
||||
*/
|
||||
while (!cpu_isset(cpu, cpu_callin_map))
|
||||
while (!cpumask_test_cpu(cpu, &cpu_callin_map))
|
||||
udelay(100);
|
||||
|
||||
synchronise_count_master(cpu);
|
||||
|
@@ -1121,13 +1121,13 @@ static void mt_ase_fp_affinity(void)
|
||||
* restricted the allowed set to exclude any CPUs with FPUs,
|
||||
* we'll skip the procedure.
|
||||
*/
|
||||
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
|
||||
if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
|
||||
cpumask_t tmask;
|
||||
|
||||
current->thread.user_cpus_allowed
|
||||
= current->cpus_allowed;
|
||||
cpus_and(tmask, current->cpus_allowed,
|
||||
mt_fpu_cpumask);
|
||||
cpumask_and(&tmask, ¤t->cpus_allowed,
|
||||
&mt_fpu_cpumask);
|
||||
set_cpus_allowed_ptr(current, &tmask);
|
||||
set_thread_flag(TIF_FPUBOUND);
|
||||
}
|
||||
|
Reference in New Issue
Block a user