Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney: - Documentation updates, yet again just simple changes. - Miscellaneous fixes, including a change to call_rcu()'s rcu_head alignment check. - Security-motivated list consistency checks, which are disabled by default behind DEBUG_LIST. - Torture-test updates. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -289,15 +289,24 @@ static int rcu_torture_read_lock(void) __acquires(RCU)
|
||||
|
||||
static void rcu_read_delay(struct torture_random_state *rrsp)
|
||||
{
|
||||
unsigned long started;
|
||||
unsigned long completed;
|
||||
const unsigned long shortdelay_us = 200;
|
||||
const unsigned long longdelay_ms = 50;
|
||||
unsigned long long ts;
|
||||
|
||||
/* We want a short delay sometimes to make a reader delay the grace
|
||||
* period, and we want a long delay occasionally to trigger
|
||||
* force_quiescent_state. */
|
||||
|
||||
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
|
||||
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
|
||||
started = cur_ops->completed();
|
||||
ts = rcu_trace_clock_local();
|
||||
mdelay(longdelay_ms);
|
||||
completed = cur_ops->completed();
|
||||
do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
|
||||
started, completed);
|
||||
}
|
||||
if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
|
||||
udelay(shortdelay_us);
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
@@ -1304,7 +1304,8 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
|
||||
if (!rcu_kick_kthreads)
|
||||
return;
|
||||
j = READ_ONCE(rsp->jiffies_kick_kthreads);
|
||||
if (time_after(jiffies, j) && rsp->gp_kthread) {
|
||||
if (time_after(jiffies, j) && rsp->gp_kthread &&
|
||||
(rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
|
||||
WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
|
||||
rcu_ftrace_dump(DUMP_ALL);
|
||||
wake_up_process(rsp->gp_kthread);
|
||||
@@ -2828,8 +2829,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
* Also schedule RCU core processing.
|
||||
*
|
||||
* This function must be called from hardirq context. It is normally
|
||||
* invoked from the scheduling-clock interrupt. If rcu_pending returns
|
||||
* false, there is no point in invoking rcu_check_callbacks().
|
||||
* invoked from the scheduling-clock interrupt.
|
||||
*/
|
||||
void rcu_check_callbacks(int user)
|
||||
{
|
||||
@@ -3121,7 +3121,9 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp;
|
||||
|
||||
WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
|
||||
/* Misaligned rcu_head! */
|
||||
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
|
||||
|
||||
if (debug_rcu_head_queue(head)) {
|
||||
/* Probable double call_rcu(), so leak the callback. */
|
||||
WRITE_ONCE(head->func, rcu_leak_callback);
|
||||
@@ -3130,13 +3132,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
|
||||
}
|
||||
head->func = func;
|
||||
head->next = NULL;
|
||||
|
||||
/*
|
||||
* Opportunistically note grace-period endings and beginnings.
|
||||
* Note that we might see a beginning right after we see an
|
||||
* end, but never vice versa, since this CPU has to pass through
|
||||
* a quiescent state betweentimes.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
|
||||
|
@@ -404,6 +404,7 @@ struct rcu_data {
|
||||
atomic_long_t exp_workdone1; /* # done by others #1. */
|
||||
atomic_long_t exp_workdone2; /* # done by others #2. */
|
||||
atomic_long_t exp_workdone3; /* # done by others #3. */
|
||||
int exp_dynticks_snap; /* Double-check need for IPI. */
|
||||
|
||||
/* 7) Callback offloading. */
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
|
@@ -358,8 +358,10 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
|
||||
rdp->exp_dynticks_snap =
|
||||
atomic_add_return(0, &rdtp->dynticks);
|
||||
if (raw_smp_processor_id() == cpu ||
|
||||
!(atomic_add_return(0, &rdtp->dynticks) & 0x1) ||
|
||||
!(rdp->exp_dynticks_snap & 0x1) ||
|
||||
!(rnp->qsmaskinitnext & rdp->grpmask))
|
||||
mask_ofl_test |= rdp->grpmask;
|
||||
}
|
||||
@@ -377,9 +379,17 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
|
||||
/* IPI the remaining CPUs for expedited quiescent state. */
|
||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
|
||||
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
|
||||
if (!(mask_ofl_ipi & mask))
|
||||
continue;
|
||||
retry_ipi:
|
||||
if (atomic_add_return(0, &rdtp->dynticks) !=
|
||||
rdp->exp_dynticks_snap) {
|
||||
mask_ofl_test |= mask;
|
||||
continue;
|
||||
}
|
||||
ret = smp_call_function_single(cpu, func, rsp, 0);
|
||||
if (!ret) {
|
||||
mask_ofl_ipi &= ~mask;
|
||||
|
Reference in New Issue
Block a user