FROMGIT: rcu: Allow expedited RCU grace periods on incoming CPUs
Although it is usually safe to invoke synchronize_rcu_expedited() from a preemption-enabled CPU-hotplug notifier, if it is invoked from a notifier between CPUHP_AP_RCUTREE_ONLINE and CPUHP_AP_ACTIVE, its attempts to invoke a workqueue handler will hang due to RCU waiting on a CPU that the scheduler is not paying attention to. This commit therefore expands use of the existing workqueue-independent synchronize_rcu_expedited() from early boot to also include CPUs that are being hotplugged. Bug: 216238044 Link: https://lore.kernel.org/lkml/7359f994-8aaf-3cea-f5cf-c0d3929689d6@quicinc.com/ Reported-by: Mukesh Ojha <quic_mojha@quicinc.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> (cherry picked from commit 710f460c395af6b81df1c81043308aaa60d5e25c https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git rcu/next) Change-Id: I3f81dee6deaf6a4504aec31e058785dc8cee6a3f Signed-off-by: Mukesh Ojha <quic_mojha@quicinc.com>
This commit is contained in:

committed by
Todd Kjos

parent
2f61ec09b0
commit
6aa9e78d6e
@@ -812,7 +812,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
|
|||||||
*/
|
*/
|
||||||
void synchronize_rcu_expedited(void)
|
void synchronize_rcu_expedited(void)
|
||||||
{
|
{
|
||||||
bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
|
bool no_wq;
|
||||||
struct rcu_exp_work rew;
|
struct rcu_exp_work rew;
|
||||||
struct rcu_node *rnp;
|
struct rcu_node *rnp;
|
||||||
unsigned long s;
|
unsigned long s;
|
||||||
@@ -837,9 +837,15 @@ void synchronize_rcu_expedited(void)
|
|||||||
if (exp_funnel_lock(s))
|
if (exp_funnel_lock(s))
|
||||||
return; /* Someone else did our work for us. */
|
return; /* Someone else did our work for us. */
|
||||||
|
|
||||||
|
/* Don't use workqueue during boot or from an incoming CPU. */
|
||||||
|
preempt_disable();
|
||||||
|
no_wq = rcu_scheduler_active == RCU_SCHEDULER_INIT ||
|
||||||
|
!cpumask_test_cpu(smp_processor_id(), cpu_active_mask);
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
/* Ensure that load happens before action based on it. */
|
/* Ensure that load happens before action based on it. */
|
||||||
if (unlikely(boottime)) {
|
if (unlikely(no_wq)) {
|
||||||
/* Direct call during scheduler init and early_initcalls(). */
|
/* Direct call for scheduler init, early_initcall()s, and incoming CPUs. */
|
||||||
rcu_exp_sel_wait_wake(s);
|
rcu_exp_sel_wait_wake(s);
|
||||||
} else {
|
} else {
|
||||||
/* Marshall arguments & schedule the expedited grace period. */
|
/* Marshall arguments & schedule the expedited grace period. */
|
||||||
@@ -857,7 +863,7 @@ void synchronize_rcu_expedited(void)
|
|||||||
/* Let the next expedited grace period start. */
|
/* Let the next expedited grace period start. */
|
||||||
mutex_unlock(&rcu_state.exp_mutex);
|
mutex_unlock(&rcu_state.exp_mutex);
|
||||||
|
|
||||||
if (likely(!boottime))
|
if (likely(!no_wq))
|
||||||
destroy_work_on_stack(&rew.rew_work);
|
destroy_work_on_stack(&rew.rew_work);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||||
|
Reference in New Issue
Block a user