Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask: oprofile: Thou shalt not call __exit functions from __init functions cpumask: remove the now-obsoleted pcibus_to_cpumask(): generic cpumask: remove cpumask_t from core cpumask: convert rcutorture.c cpumask: use new cpumask_ functions in core code. cpumask: remove references to struct irqaction's mask field. cpumask: use mm_cpumask() wrapper: kernel/fork.c cpumask: use set_cpu_active in init/main.c cpumask: remove node_to_first_cpu cpumask: fix seq_bitmap_*() functions. cpumask: remove dangerous CPU_MASK_ALL_PTR, &CPU_MASK_ALL
This commit is contained in:
@@ -281,7 +281,7 @@ int __ref cpu_down(unsigned int cpu)
|
||||
goto out;
|
||||
}
|
||||
|
||||
cpu_clear(cpu, cpu_active_map);
|
||||
set_cpu_active(cpu, false);
|
||||
|
||||
/*
|
||||
* Make sure the all cpus did the reschedule and are not
|
||||
@@ -296,7 +296,7 @@ int __ref cpu_down(unsigned int cpu)
|
||||
err = _cpu_down(cpu, 0);
|
||||
|
||||
if (cpu_online(cpu))
|
||||
cpu_set(cpu, cpu_active_map);
|
||||
set_cpu_active(cpu, true);
|
||||
|
||||
out:
|
||||
cpu_maps_update_done();
|
||||
@@ -333,7 +333,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
|
||||
goto out_notify;
|
||||
BUG_ON(!cpu_online(cpu));
|
||||
|
||||
cpu_set(cpu, cpu_active_map);
|
||||
set_cpu_active(cpu, true);
|
||||
|
||||
/* Now call notifier in preparation. */
|
||||
raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
|
||||
|
@@ -284,7 +284,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
mm->free_area_cache = oldmm->mmap_base;
|
||||
mm->cached_hole_size = ~0UL;
|
||||
mm->map_count = 0;
|
||||
cpus_clear(mm->cpu_vm_mask);
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
mm->mm_rb = RB_ROOT;
|
||||
rb_link = &mm->mm_rb.rb_node;
|
||||
rb_parent = NULL;
|
||||
|
@@ -167,7 +167,7 @@ static int ____call_usermodehelper(void *data)
|
||||
}
|
||||
|
||||
/* We can run anywhere, unlike our parent keventd(). */
|
||||
set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR);
|
||||
set_cpus_allowed_ptr(current, cpu_all_mask);
|
||||
|
||||
/*
|
||||
* Our parent is keventd, which runs with elevated scheduling priority.
|
||||
|
@@ -110,7 +110,7 @@ static void create_kthread(struct kthread_create_info *create)
|
||||
*/
|
||||
sched_setscheduler(create->result, SCHED_NORMAL, ¶m);
|
||||
set_user_nice(create->result, KTHREAD_NICE_LEVEL);
|
||||
set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
|
||||
set_cpus_allowed_ptr(create->result, cpu_all_mask);
|
||||
}
|
||||
complete(&create->done);
|
||||
}
|
||||
@@ -240,7 +240,7 @@ int kthreadd(void *unused)
|
||||
set_task_comm(tsk, "kthreadd");
|
||||
ignore_signals(tsk);
|
||||
set_user_nice(tsk, KTHREAD_NICE_LEVEL);
|
||||
set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);
|
||||
set_cpus_allowed_ptr(tsk, cpu_all_mask);
|
||||
|
||||
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
|
||||
|
||||
|
@@ -126,6 +126,7 @@ static atomic_t n_rcu_torture_mberror;
|
||||
static atomic_t n_rcu_torture_error;
|
||||
static long n_rcu_torture_timers = 0;
|
||||
static struct list_head rcu_torture_removed;
|
||||
static cpumask_var_t shuffle_tmp_mask;
|
||||
|
||||
static int stutter_pause_test = 0;
|
||||
|
||||
@@ -889,10 +890,9 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
|
||||
*/
|
||||
static void rcu_torture_shuffle_tasks(void)
|
||||
{
|
||||
cpumask_t tmp_mask;
|
||||
int i;
|
||||
|
||||
cpus_setall(tmp_mask);
|
||||
cpumask_setall(shuffle_tmp_mask);
|
||||
get_online_cpus();
|
||||
|
||||
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
||||
@@ -902,29 +902,29 @@ static void rcu_torture_shuffle_tasks(void)
|
||||
}
|
||||
|
||||
if (rcu_idle_cpu != -1)
|
||||
cpu_clear(rcu_idle_cpu, tmp_mask);
|
||||
cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
|
||||
|
||||
set_cpus_allowed_ptr(current, &tmp_mask);
|
||||
set_cpus_allowed_ptr(current, shuffle_tmp_mask);
|
||||
|
||||
if (reader_tasks) {
|
||||
for (i = 0; i < nrealreaders; i++)
|
||||
if (reader_tasks[i])
|
||||
set_cpus_allowed_ptr(reader_tasks[i],
|
||||
&tmp_mask);
|
||||
shuffle_tmp_mask);
|
||||
}
|
||||
|
||||
if (fakewriter_tasks) {
|
||||
for (i = 0; i < nfakewriters; i++)
|
||||
if (fakewriter_tasks[i])
|
||||
set_cpus_allowed_ptr(fakewriter_tasks[i],
|
||||
&tmp_mask);
|
||||
shuffle_tmp_mask);
|
||||
}
|
||||
|
||||
if (writer_task)
|
||||
set_cpus_allowed_ptr(writer_task, &tmp_mask);
|
||||
set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
|
||||
|
||||
if (stats_task)
|
||||
set_cpus_allowed_ptr(stats_task, &tmp_mask);
|
||||
set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
|
||||
|
||||
if (rcu_idle_cpu == -1)
|
||||
rcu_idle_cpu = num_online_cpus() - 1;
|
||||
@@ -1012,6 +1012,7 @@ rcu_torture_cleanup(void)
|
||||
if (shuffler_task) {
|
||||
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
|
||||
kthread_stop(shuffler_task);
|
||||
free_cpumask_var(shuffle_tmp_mask);
|
||||
}
|
||||
shuffler_task = NULL;
|
||||
|
||||
@@ -1190,10 +1191,18 @@ rcu_torture_init(void)
|
||||
}
|
||||
if (test_no_idle_hz) {
|
||||
rcu_idle_cpu = num_online_cpus() - 1;
|
||||
|
||||
if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
|
||||
firsterr = -ENOMEM;
|
||||
VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
/* Create the shuffler thread */
|
||||
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
|
||||
"rcu_torture_shuffle");
|
||||
if (IS_ERR(shuffler_task)) {
|
||||
free_cpumask_var(shuffle_tmp_mask);
|
||||
firsterr = PTR_ERR(shuffler_task);
|
||||
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
|
||||
shuffler_task = NULL;
|
||||
|
@@ -25,7 +25,7 @@ struct cpupri {
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int cpupri_find(struct cpupri *cp,
|
||||
struct task_struct *p, cpumask_t *lowest_mask);
|
||||
struct task_struct *p, struct cpumask *lowest_mask);
|
||||
void cpupri_set(struct cpupri *cp, int cpu, int pri);
|
||||
int cpupri_init(struct cpupri *cp, bool bootmem);
|
||||
void cpupri_cleanup(struct cpupri *cp);
|
||||
|
@@ -44,7 +44,7 @@ static DEFINE_MUTEX(setup_lock);
|
||||
static int refcount;
|
||||
static struct workqueue_struct *stop_machine_wq;
|
||||
static struct stop_machine_data active, idle;
|
||||
static const cpumask_t *active_cpus;
|
||||
static const struct cpumask *active_cpus;
|
||||
static void *stop_machine_work;
|
||||
|
||||
static void set_state(enum stopmachine_state newstate)
|
||||
|
@@ -416,7 +416,7 @@ void flush_workqueue(struct workqueue_struct *wq)
|
||||
might_sleep();
|
||||
lock_map_acquire(&wq->lockdep_map);
|
||||
lock_map_release(&wq->lockdep_map);
|
||||
for_each_cpu_mask_nr(cpu, *cpu_map)
|
||||
for_each_cpu(cpu, cpu_map)
|
||||
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_workqueue);
|
||||
@@ -547,7 +547,7 @@ static void wait_on_work(struct work_struct *work)
|
||||
wq = cwq->wq;
|
||||
cpu_map = wq_cpu_map(wq);
|
||||
|
||||
for_each_cpu_mask_nr(cpu, *cpu_map)
|
||||
for_each_cpu(cpu, cpu_map)
|
||||
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
||||
}
|
||||
|
||||
@@ -911,7 +911,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||||
list_del(&wq->list);
|
||||
spin_unlock(&workqueue_lock);
|
||||
|
||||
for_each_cpu_mask_nr(cpu, *cpu_map)
|
||||
for_each_cpu(cpu, cpu_map)
|
||||
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
|
||||
cpu_maps_update_done();
|
||||
|
||||
|
Reference in New Issue
Block a user