Merge branches 'fixes.2020.04.27a', 'kfree_rcu.2020.04.27a', 'rcu-tasks.2020.04.27a', 'stall.2020.04.27a' and 'torture.2020.05.07a' into HEAD
fixes.2020.04.27a: Miscellaneous fixes. kfree_rcu.2020.04.27a: Changes related to kfree_rcu(). rcu-tasks.2020.04.27a: Addition of new RCU-tasks flavors. stall.2020.04.27a: RCU CPU stall-warning updates. torture.2020.05.07a: Torture-test updates.
This commit is contained in:
@@ -331,6 +331,7 @@ void rcu_note_context_switch(bool preempt)
|
||||
rcu_qs();
|
||||
if (rdp->exp_deferred_qs)
|
||||
rcu_report_exp_rdp(rdp);
|
||||
rcu_tasks_qs(current, preempt);
|
||||
trace_rcu_utilization(TPS("End context switch"));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
||||
@@ -345,9 +346,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
|
||||
return READ_ONCE(rnp->gp_tasks) != NULL;
|
||||
}
|
||||
|
||||
/* Bias and limit values for ->rcu_read_lock_nesting. */
|
||||
#define RCU_NEST_BIAS INT_MAX
|
||||
#define RCU_NEST_NMAX (-INT_MAX / 2)
|
||||
/* limit value for ->rcu_read_lock_nesting. */
|
||||
#define RCU_NEST_PMAX (INT_MAX / 2)
|
||||
|
||||
static void rcu_preempt_read_enter(void)
|
||||
@@ -355,9 +354,9 @@ static void rcu_preempt_read_enter(void)
|
||||
current->rcu_read_lock_nesting++;
|
||||
}
|
||||
|
||||
static void rcu_preempt_read_exit(void)
|
||||
static int rcu_preempt_read_exit(void)
|
||||
{
|
||||
current->rcu_read_lock_nesting--;
|
||||
return --current->rcu_read_lock_nesting;
|
||||
}
|
||||
|
||||
static void rcu_preempt_depth_set(int val)
|
||||
@@ -390,21 +389,15 @@ void __rcu_read_unlock(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
if (rcu_preempt_depth() != 1) {
|
||||
rcu_preempt_read_exit();
|
||||
} else {
|
||||
if (rcu_preempt_read_exit() == 0) {
|
||||
barrier(); /* critical section before exit code. */
|
||||
rcu_preempt_depth_set(-RCU_NEST_BIAS);
|
||||
barrier(); /* assign before ->rcu_read_unlock_special load */
|
||||
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
|
||||
rcu_read_unlock_special(t);
|
||||
barrier(); /* ->rcu_read_unlock_special load before assign */
|
||||
rcu_preempt_depth_set(0);
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
|
||||
int rrln = rcu_preempt_depth();
|
||||
|
||||
WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
|
||||
WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||
@@ -556,7 +549,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
|
||||
{
|
||||
return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
|
||||
READ_ONCE(t->rcu_read_unlock_special.s)) &&
|
||||
rcu_preempt_depth() <= 0;
|
||||
rcu_preempt_depth() == 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -569,16 +562,11 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
|
||||
static void rcu_preempt_deferred_qs(struct task_struct *t)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool couldrecurse = rcu_preempt_depth() >= 0;
|
||||
|
||||
if (!rcu_preempt_need_deferred_qs(t))
|
||||
return;
|
||||
if (couldrecurse)
|
||||
rcu_preempt_depth_set(rcu_preempt_depth() - RCU_NEST_BIAS);
|
||||
local_irq_save(flags);
|
||||
rcu_preempt_deferred_qs_irqrestore(t, flags);
|
||||
if (couldrecurse)
|
||||
rcu_preempt_depth_set(rcu_preempt_depth() + RCU_NEST_BIAS);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -615,19 +603,18 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) ||
|
||||
(rdp->grpmask & READ_ONCE(rnp->expmask)) ||
|
||||
tick_nohz_full_cpu(rdp->cpu);
|
||||
exp = (t->rcu_blocked_node &&
|
||||
READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
|
||||
(rdp->grpmask & READ_ONCE(rnp->expmask));
|
||||
// Need to defer quiescent state until everything is enabled.
|
||||
if (irqs_were_disabled && use_softirq &&
|
||||
(in_interrupt() ||
|
||||
(exp && !t->rcu_read_unlock_special.b.deferred_qs))) {
|
||||
// Using softirq, safe to awaken, and we get
|
||||
// no help from enabling irqs, unlike bh/preempt.
|
||||
if (use_softirq && (in_irq() || (exp && !irqs_were_disabled))) {
|
||||
// Using softirq, safe to awaken, and either the
|
||||
// wakeup is free or there is an expedited GP.
|
||||
raise_softirq_irqoff(RCU_SOFTIRQ);
|
||||
} else {
|
||||
// Enabling BH or preempt does reschedule, so...
|
||||
// Also if no expediting or NO_HZ_FULL, slow is OK.
|
||||
// Also if no expediting, slow is OK.
|
||||
// Plus nohz_full CPUs eventually get tick enabled.
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
|
||||
@@ -640,7 +627,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
||||
irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
|
||||
}
|
||||
}
|
||||
t->rcu_read_unlock_special.b.deferred_qs = true;
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
@@ -699,7 +685,7 @@ static void rcu_flavor_sched_clock_irq(int user)
|
||||
} else if (rcu_preempt_need_deferred_qs(t)) {
|
||||
rcu_preempt_deferred_qs(t); /* Report deferred QS. */
|
||||
return;
|
||||
} else if (!rcu_preempt_depth()) {
|
||||
} else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
|
||||
rcu_qs(); /* Report immediate QS. */
|
||||
return;
|
||||
}
|
||||
@@ -854,8 +840,7 @@ void rcu_note_context_switch(bool preempt)
|
||||
this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
||||
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
|
||||
rcu_momentary_dyntick_idle();
|
||||
if (!preempt)
|
||||
rcu_tasks_qs(current);
|
||||
rcu_tasks_qs(current, preempt);
|
||||
out:
|
||||
trace_rcu_utilization(TPS("End context switch"));
|
||||
}
|
||||
@@ -2568,3 +2553,21 @@ static void rcu_dynticks_task_exit(void)
|
||||
WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
|
||||
#endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
|
||||
}
|
||||
|
||||
/* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
|
||||
static void rcu_dynticks_task_trace_enter(void)
|
||||
{
|
||||
#ifdef CONFIG_TASKS_RCU_TRACE
|
||||
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
|
||||
current->trc_reader_special.b.need_mb = true;
|
||||
#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
|
||||
}
|
||||
|
||||
/* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
|
||||
static void rcu_dynticks_task_trace_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_TASKS_RCU_TRACE
|
||||
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
|
||||
current->trc_reader_special.b.need_mb = false;
|
||||
#endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
|
||||
}
|
||||
|
Reference in New Issue
Block a user