diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 9fb0f0916374..cc5f51fa2b28 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -812,7 +812,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) */ void synchronize_rcu_expedited(void) { - bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); + bool no_wq; struct rcu_exp_work rew; struct rcu_node *rnp; unsigned long s; @@ -837,9 +837,15 @@ void synchronize_rcu_expedited(void) if (exp_funnel_lock(s)) return; /* Someone else did our work for us. */ + /* Don't use workqueue during boot or from an incoming CPU. */ + preempt_disable(); + no_wq = rcu_scheduler_active == RCU_SCHEDULER_INIT || + !cpumask_test_cpu(smp_processor_id(), cpu_active_mask); + preempt_enable(); + /* Ensure that load happens before action based on it. */ - if (unlikely(boottime)) { - /* Direct call during scheduler init and early_initcalls(). */ + if (unlikely(no_wq)) { + /* Direct call for scheduler init, early_initcall()s, and incoming CPUs. */ rcu_exp_sel_wait_wake(s); } else { /* Marshall arguments & schedule the expedited grace period. */ @@ -857,7 +863,7 @@ void synchronize_rcu_expedited(void) /* Let the next expedited grace period start. */ mutex_unlock(&rcu_state.exp_mutex); - if (likely(!boottime)) + if (likely(!no_wq)) destroy_work_on_stack(&rew.rew_work); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);