Merge branches 'doc.2020.08.24a', 'fixes.2020.09.03b' and 'torture.2020.08.24a' into HEAD
doc.2020.08.24a: Documentation updates. fixes.2020.09.03b: Miscellaneous fixes. torture.2020.08.24a: Torture-test updates.
This commit is contained in:
@@ -475,8 +475,16 @@ bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq)
|
||||
* Also advance to the oldest segment of callbacks whose
|
||||
* ->gp_seq[] completion is at or after that passed in via "seq",
|
||||
* skipping any empty segments.
|
||||
*
|
||||
* Note that segment "i" (and any lower-numbered segments
|
||||
* containing older callbacks) will be unaffected, and their
|
||||
* grace-period numbers remain unchanged. For example, if i ==
|
||||
* WAIT_TAIL, then neither WAIT_TAIL nor DONE_TAIL will be touched.
|
||||
* Instead, the CBs in NEXT_TAIL will be merged with those in
|
||||
* NEXT_READY_TAIL and the grace-period number of NEXT_READY_TAIL
|
||||
* would be updated. NEXT_TAIL would then be empty.
|
||||
*/
|
||||
if (++i >= RCU_NEXT_TAIL)
|
||||
if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL)
|
||||
return false;
|
||||
|
||||
/*
|
||||
|
@@ -52,19 +52,6 @@
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
|
||||
|
||||
#ifndef data_race
|
||||
#define data_race(expr) \
|
||||
({ \
|
||||
expr; \
|
||||
})
|
||||
#endif
|
||||
#ifndef ASSERT_EXCLUSIVE_WRITER
|
||||
#define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
|
||||
#endif
|
||||
#ifndef ASSERT_EXCLUSIVE_ACCESS
|
||||
#define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* Bits for ->extendables field, extendables param, and related definitions. */
|
||||
#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
|
||||
#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
|
||||
@@ -100,6 +87,7 @@ torture_param(bool, gp_normal, false,
|
||||
"Use normal (non-expedited) GP wait primitives");
|
||||
torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
|
||||
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
|
||||
torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
|
||||
torture_param(int, n_barrier_cbs, 0,
|
||||
"# of callbacks/kthreads for barrier testing");
|
||||
torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
|
||||
@@ -185,6 +173,7 @@ static long n_barrier_successes; /* did rcu_barrier test succeed? */
|
||||
static unsigned long n_read_exits;
|
||||
static struct list_head rcu_torture_removed;
|
||||
static unsigned long shutdown_jiffies;
|
||||
static unsigned long start_gp_seq;
|
||||
|
||||
static int rcu_torture_writer_state;
|
||||
#define RTWS_FIXED_DELAY 0
|
||||
@@ -1413,6 +1402,9 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp)
|
||||
preempt_enable();
|
||||
rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
|
||||
WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
|
||||
// This next splat is expected behavior if leakpointer, especially
|
||||
// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
|
||||
WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
|
||||
|
||||
/* If error or close call, record the sequence of reader protections. */
|
||||
if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
|
||||
@@ -1808,6 +1800,7 @@ struct rcu_fwd {
|
||||
unsigned long rcu_launder_gp_seq_start;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(rcu_fwd_mutex);
|
||||
static struct rcu_fwd *rcu_fwds;
|
||||
static bool rcu_fwd_emergency_stop;
|
||||
|
||||
@@ -2074,8 +2067,14 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
|
||||
static int rcutorture_oom_notify(struct notifier_block *self,
|
||||
unsigned long notused, void *nfreed)
|
||||
{
|
||||
struct rcu_fwd *rfp = rcu_fwds;
|
||||
struct rcu_fwd *rfp;
|
||||
|
||||
mutex_lock(&rcu_fwd_mutex);
|
||||
rfp = rcu_fwds;
|
||||
if (!rfp) {
|
||||
mutex_unlock(&rcu_fwd_mutex);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
|
||||
__func__);
|
||||
rcu_torture_fwd_cb_hist(rfp);
|
||||
@@ -2093,6 +2092,7 @@ static int rcutorture_oom_notify(struct notifier_block *self,
|
||||
smp_mb(); /* Frees before return to avoid redoing OOM. */
|
||||
(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
|
||||
pr_info("%s returning after OOM processing.\n", __func__);
|
||||
mutex_unlock(&rcu_fwd_mutex);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
@@ -2114,13 +2114,11 @@ static int rcu_torture_fwd_prog(void *args)
|
||||
do {
|
||||
schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
|
||||
WRITE_ONCE(rcu_fwd_emergency_stop, false);
|
||||
register_oom_notifier(&rcutorture_oom_nb);
|
||||
if (!IS_ENABLED(CONFIG_TINY_RCU) ||
|
||||
rcu_inkernel_boot_has_ended())
|
||||
rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
|
||||
if (rcu_inkernel_boot_has_ended())
|
||||
rcu_torture_fwd_prog_cr(rfp);
|
||||
unregister_oom_notifier(&rcutorture_oom_nb);
|
||||
|
||||
/* Avoid slow periods, better to test when busy. */
|
||||
stutter_wait("rcu_torture_fwd_prog");
|
||||
@@ -2160,9 +2158,26 @@ static int __init rcu_torture_fwd_prog_init(void)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&rfp->rcu_fwd_lock);
|
||||
rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
|
||||
mutex_lock(&rcu_fwd_mutex);
|
||||
rcu_fwds = rfp;
|
||||
mutex_unlock(&rcu_fwd_mutex);
|
||||
register_oom_notifier(&rcutorture_oom_nb);
|
||||
return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
|
||||
}
|
||||
|
||||
static void rcu_torture_fwd_prog_cleanup(void)
|
||||
{
|
||||
struct rcu_fwd *rfp;
|
||||
|
||||
torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
|
||||
rfp = rcu_fwds;
|
||||
mutex_lock(&rcu_fwd_mutex);
|
||||
rcu_fwds = NULL;
|
||||
mutex_unlock(&rcu_fwd_mutex);
|
||||
unregister_oom_notifier(&rcutorture_oom_nb);
|
||||
kfree(rfp);
|
||||
}
|
||||
|
||||
/* Callback function for RCU barrier testing. */
|
||||
static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
|
||||
{
|
||||
@@ -2460,7 +2475,7 @@ rcu_torture_cleanup(void)
|
||||
show_rcu_gp_kthreads();
|
||||
rcu_torture_read_exit_cleanup();
|
||||
rcu_torture_barrier_cleanup();
|
||||
torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
|
||||
rcu_torture_fwd_prog_cleanup();
|
||||
torture_stop_kthread(rcu_torture_stall, stall_task);
|
||||
torture_stop_kthread(rcu_torture_writer, writer_task);
|
||||
|
||||
@@ -2482,8 +2497,9 @@ rcu_torture_cleanup(void)
|
||||
|
||||
rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
|
||||
srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
|
||||
pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
|
||||
cur_ops->name, gp_seq, flags);
|
||||
pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
|
||||
cur_ops->name, (long)gp_seq, flags,
|
||||
rcutorture_seq_diff(gp_seq, start_gp_seq));
|
||||
torture_stop_kthread(rcu_torture_stats, stats_task);
|
||||
torture_stop_kthread(rcu_torture_fqs, fqs_task);
|
||||
if (rcu_torture_can_boost())
|
||||
@@ -2607,6 +2623,8 @@ rcu_torture_init(void)
|
||||
long i;
|
||||
int cpu;
|
||||
int firsterr = 0;
|
||||
int flags = 0;
|
||||
unsigned long gp_seq = 0;
|
||||
static struct rcu_torture_ops *torture_ops[] = {
|
||||
&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
|
||||
&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
|
||||
@@ -2649,6 +2667,11 @@ rcu_torture_init(void)
|
||||
nrealreaders = 1;
|
||||
}
|
||||
rcu_torture_print_module_parms(cur_ops, "Start of test");
|
||||
rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
|
||||
srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
|
||||
start_gp_seq = gp_seq;
|
||||
pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
|
||||
cur_ops->name, (long)gp_seq, flags);
|
||||
|
||||
/* Set up the freelist. */
|
||||
|
||||
|
@@ -546,9 +546,11 @@ static int main_func(void *arg)
|
||||
// Print the average of all experiments
|
||||
SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
|
||||
|
||||
buf[0] = 0;
|
||||
strcat(buf, "\n");
|
||||
strcat(buf, "Runs\tTime(ns)\n");
|
||||
if (!errexit) {
|
||||
buf[0] = 0;
|
||||
strcat(buf, "\n");
|
||||
strcat(buf, "Runs\tTime(ns)\n");
|
||||
}
|
||||
|
||||
for (exp = 0; exp < nruns; exp++) {
|
||||
u64 avg;
|
||||
|
@@ -29,19 +29,6 @@
|
||||
#include "rcu.h"
|
||||
#include "rcu_segcblist.h"
|
||||
|
||||
#ifndef data_race
|
||||
#define data_race(expr) \
|
||||
({ \
|
||||
expr; \
|
||||
})
|
||||
#endif
|
||||
#ifndef ASSERT_EXCLUSIVE_WRITER
|
||||
#define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
|
||||
#endif
|
||||
#ifndef ASSERT_EXCLUSIVE_ACCESS
|
||||
#define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* Holdoff in nanoseconds for auto-expediting. */
|
||||
#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
|
||||
static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
|
||||
|
@@ -70,19 +70,6 @@
|
||||
#endif
|
||||
#define MODULE_PARAM_PREFIX "rcutree."
|
||||
|
||||
#ifndef data_race
|
||||
#define data_race(expr) \
|
||||
({ \
|
||||
expr; \
|
||||
})
|
||||
#endif
|
||||
#ifndef ASSERT_EXCLUSIVE_WRITER
|
||||
#define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
|
||||
#endif
|
||||
#ifndef ASSERT_EXCLUSIVE_ACCESS
|
||||
#define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
|
||||
#endif
|
||||
|
||||
/* Data structures. */
|
||||
|
||||
/*
|
||||
@@ -1090,11 +1077,6 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
|
||||
}
|
||||
}
|
||||
|
||||
noinstr bool __rcu_is_watching(void)
|
||||
{
|
||||
return !rcu_dynticks_curr_cpu_in_eqs();
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_is_watching - see if RCU thinks that the current CPU is not idle
|
||||
*
|
||||
@@ -1227,13 +1209,28 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* If waiting too long on an offline CPU, complain. */
|
||||
if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
|
||||
time_after(jiffies, rcu_state.gp_start + HZ)) {
|
||||
/*
|
||||
* Complain if a CPU that is considered to be offline from RCU's
|
||||
* perspective has not yet reported a quiescent state. After all,
|
||||
* the offline CPU should have reported a quiescent state during
|
||||
* the CPU-offline process, or, failing that, by rcu_gp_init()
|
||||
* if it ran concurrently with either the CPU going offline or the
|
||||
* last task on a leaf rcu_node structure exiting its RCU read-side
|
||||
* critical section while all CPUs corresponding to that structure
|
||||
* are offline. This added warning detects bugs in any of these
|
||||
* code paths.
|
||||
*
|
||||
* The rcu_node structure's ->lock is held here, which excludes
|
||||
* the relevant portions the CPU-hotplug code, the grace-period
|
||||
* initialization code, and the rcu_read_unlock() code paths.
|
||||
*
|
||||
* For more detail, please refer to the "Hotplug CPU" section
|
||||
* of RCU's Requirements documentation.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
|
||||
bool onl;
|
||||
struct rcu_node *rnp1;
|
||||
|
||||
WARN_ON(1); /* Offline CPUs are supposed to report QS! */
|
||||
pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
|
||||
__func__, rnp->grplo, rnp->grphi, rnp->level,
|
||||
(long)rnp->gp_seq, (long)rnp->completedqs);
|
||||
@@ -1496,9 +1493,10 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
|
||||
/* Trace depending on how much we were able to accelerate. */
|
||||
if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
|
||||
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB"));
|
||||
trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
|
||||
else
|
||||
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB"));
|
||||
trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1718,10 +1716,13 @@ static bool rcu_gp_init(void)
|
||||
raw_spin_unlock_irq_rcu_node(rnp);
|
||||
|
||||
/*
|
||||
* Apply per-leaf buffered online and offline operations to the
|
||||
* rcu_node tree. Note that this new grace period need not wait
|
||||
* for subsequent online CPUs, and that quiescent-state forcing
|
||||
* will handle subsequent offline CPUs.
|
||||
* Apply per-leaf buffered online and offline operations to
|
||||
* the rcu_node tree. Note that this new grace period need not
|
||||
* wait for subsequent online CPUs, and that RCU hooks in the CPU
|
||||
* offlining path, when combined with checks in this function,
|
||||
* will handle CPUs that are currently going offline or that will
|
||||
* go offline later. Please also refer to "Hotplug CPU" section
|
||||
* of RCU's Requirements documentation.
|
||||
*/
|
||||
rcu_state.gp_state = RCU_GP_ONOFF;
|
||||
rcu_for_each_leaf_node(rnp) {
|
||||
@@ -1896,7 +1897,7 @@ static void rcu_gp_fqs_loop(void)
|
||||
break;
|
||||
/* If time for quiescent-state forcing, do it. */
|
||||
if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
|
||||
(gf & RCU_GP_FLAG_FQS)) {
|
||||
(gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
|
||||
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
|
||||
TPS("fqsstart"));
|
||||
rcu_gp_fqs(first_gp_fqs);
|
||||
@@ -2374,6 +2375,7 @@ int rcutree_dead_cpu(unsigned int cpu)
|
||||
*/
|
||||
static void rcu_do_batch(struct rcu_data *rdp)
|
||||
{
|
||||
int div;
|
||||
unsigned long flags;
|
||||
const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
|
||||
rcu_segcblist_is_offloaded(&rdp->cblist);
|
||||
@@ -2402,9 +2404,15 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
rcu_nocb_lock(rdp);
|
||||
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
|
||||
pending = rcu_segcblist_n_cbs(&rdp->cblist);
|
||||
bl = max(rdp->blimit, pending >> rcu_divisor);
|
||||
if (unlikely(bl > 100))
|
||||
tlimit = local_clock() + rcu_resched_ns;
|
||||
div = READ_ONCE(rcu_divisor);
|
||||
div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
|
||||
bl = max(rdp->blimit, pending >> div);
|
||||
if (unlikely(bl > 100)) {
|
||||
long rrn = READ_ONCE(rcu_resched_ns);
|
||||
|
||||
rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
|
||||
tlimit = local_clock() + rrn;
|
||||
}
|
||||
trace_rcu_batch_start(rcu_state.name,
|
||||
rcu_segcblist_n_cbs(&rdp->cblist), bl);
|
||||
rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
|
||||
@@ -2545,8 +2553,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
rcu_state.cbovldnext |= !!rnp->cbovldmask;
|
||||
if (rnp->qsmask == 0) {
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT_RCU) ||
|
||||
rcu_preempt_blocked_readers_cgp(rnp)) {
|
||||
if (rcu_preempt_blocked_readers_cgp(rnp)) {
|
||||
/*
|
||||
* No point in scanning bits because they
|
||||
* are all zero. But we might need to
|
||||
@@ -3443,7 +3450,7 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
unsigned long count = 0;
|
||||
|
||||
/* Snapshot count of all CPUs */
|
||||
for_each_online_cpu(cpu) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
||||
|
||||
count += READ_ONCE(krcp->count);
|
||||
@@ -3458,7 +3465,7 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
int cpu, freed = 0;
|
||||
unsigned long flags;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
int count;
|
||||
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
||||
|
||||
@@ -3491,7 +3498,7 @@ void __init kfree_rcu_scheduler_running(void)
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
|
||||
|
||||
raw_spin_lock_irqsave(&krcp->lock, flags);
|
||||
@@ -3973,8 +3980,6 @@ int rcutree_offline_cpu(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(int, rcu_cpu_started);
|
||||
|
||||
/*
|
||||
* Mark the specified CPU as being online so that subsequent grace periods
|
||||
* (both expedited and normal) will wait on it. Note that this means that
|
||||
@@ -3994,12 +3999,11 @@ void rcu_cpu_starting(unsigned int cpu)
|
||||
struct rcu_node *rnp;
|
||||
bool newcpu;
|
||||
|
||||
if (per_cpu(rcu_cpu_started, cpu))
|
||||
return;
|
||||
|
||||
per_cpu(rcu_cpu_started, cpu) = 1;
|
||||
|
||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
if (rdp->cpu_started)
|
||||
return;
|
||||
rdp->cpu_started = true;
|
||||
|
||||
rnp = rdp->mynode;
|
||||
mask = rdp->grpmask;
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
@@ -4059,7 +4063,7 @@ void rcu_report_dead(unsigned int cpu)
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
raw_spin_unlock(&rcu_state.ofl_lock);
|
||||
|
||||
per_cpu(rcu_cpu_started, cpu) = 0;
|
||||
rdp->cpu_started = false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -156,6 +156,7 @@ struct rcu_data {
|
||||
bool beenonline; /* CPU online at least once. */
|
||||
bool gpwrap; /* Possible ->gp_seq wrap. */
|
||||
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
|
||||
bool cpu_started; /* RCU watching this onlining CPU. */
|
||||
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
||||
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
||||
unsigned long ticks_this_gp; /* The number of scheduling-clock */
|
||||
|
@@ -732,11 +732,9 @@ static void rcu_exp_need_qs(void)
|
||||
/* Invoked on each online non-idle CPU for expedited quiescent state. */
|
||||
static void rcu_exp_handler(void *unused)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp;
|
||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
rdp = this_cpu_ptr(&rcu_data);
|
||||
rnp = rdp->mynode;
|
||||
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
|
||||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
|
||||
return;
|
||||
|
@@ -1926,6 +1926,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
|
||||
* nearest grace period (if any) to wait for next. The CB kthreads
|
||||
* and the global grace-period kthread are awakened if needed.
|
||||
*/
|
||||
WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
|
||||
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
|
||||
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
|
||||
rcu_nocb_lock_irqsave(rdp, flags);
|
||||
@@ -2411,13 +2412,12 @@ static void show_rcu_nocb_state(struct rcu_data *rdp)
|
||||
return;
|
||||
|
||||
waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
|
||||
wastimer = timer_pending(&rdp->nocb_timer);
|
||||
wastimer = timer_pending(&rdp->nocb_bypass_timer);
|
||||
wassleep = swait_active(&rdp->nocb_gp_wq);
|
||||
if (!rdp->nocb_defer_wakeup && !rdp->nocb_gp_sleep &&
|
||||
!waslocked && !wastimer && !wassleep)
|
||||
if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep)
|
||||
return; /* Nothing untowards. */
|
||||
|
||||
pr_info(" !!! %c%c%c%c %c\n",
|
||||
pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n",
|
||||
"lL"[waslocked],
|
||||
"dD"[!!rdp->nocb_defer_wakeup],
|
||||
"tT"[wastimer],
|
||||
|
@@ -158,7 +158,7 @@ static void rcu_stall_kick_kthreads(void)
|
||||
{
|
||||
unsigned long j;
|
||||
|
||||
if (!rcu_kick_kthreads)
|
||||
if (!READ_ONCE(rcu_kick_kthreads))
|
||||
return;
|
||||
j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
|
||||
if (time_after(jiffies, j) && rcu_state.gp_kthread &&
|
||||
@@ -580,7 +580,7 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||
unsigned long js;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
if ((rcu_stall_is_suppressed() && !rcu_kick_kthreads) ||
|
||||
if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
|
||||
!rcu_gp_in_progress())
|
||||
return;
|
||||
rcu_stall_kick_kthreads();
|
||||
@@ -623,7 +623,7 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||
|
||||
/* We haven't checked in, so go dump stack. */
|
||||
print_cpu_stall(gps);
|
||||
if (rcu_cpu_stall_ftrace_dump)
|
||||
if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
|
||||
rcu_ftrace_dump(DUMP_ALL);
|
||||
|
||||
} else if (rcu_gp_in_progress() &&
|
||||
@@ -632,7 +632,7 @@ static void check_cpu_stall(struct rcu_data *rdp)
|
||||
|
||||
/* They had a few time units to dump stack, so complain. */
|
||||
print_other_cpu_stall(gs2, gps);
|
||||
if (rcu_cpu_stall_ftrace_dump)
|
||||
if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
|
||||
rcu_ftrace_dump(DUMP_ALL);
|
||||
}
|
||||
}
|
||||
|
@@ -53,19 +53,6 @@
|
||||
#endif
|
||||
#define MODULE_PARAM_PREFIX "rcupdate."
|
||||
|
||||
#ifndef data_race
|
||||
#define data_race(expr) \
|
||||
({ \
|
||||
expr; \
|
||||
})
|
||||
#endif
|
||||
#ifndef ASSERT_EXCLUSIVE_WRITER
|
||||
#define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
|
||||
#endif
|
||||
#ifndef ASSERT_EXCLUSIVE_ACCESS
|
||||
#define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_TINY_RCU
|
||||
module_param(rcu_expedited, int, 0);
|
||||
module_param(rcu_normal, int, 0);
|
||||
|
Reference in New Issue
Block a user