Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu

Pull RCU changes from Paul E. McKenney:

- Convert RCU's BUG_ON() and similar calls to WARN_ON() and similar.

- Replace calls of RCU-bh and RCU-sched update-side functions
  to their vanilla RCU counterparts.  This series is a step
  towards complete removal of the RCU-bh and RCU-sched update-side
  functions.

  ( Note that some of these conversions are going upstream via their
    respective maintainers. )

- Documentation updates, including a number of flavor-consolidation
  updates from Joel Fernandes.

- Miscellaneous fixes.

- Automate generation of the initrd filesystem used for
  rcutorture testing.

- Convert spin_is_locked() assertions to instead use lockdep.

  ( Note that some of these conversions are going upstream via their
    respective maintainers. )

- SRCU updates, especially including a fix from Dennis Krein
  for a bag-on-head-class bug.

- RCU torture-test updates.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar
2018-12-04 07:52:30 +01:00
88 changed files with 4282 additions and 4109 deletions

View File

@@ -5343,7 +5343,7 @@ int __init cgroup_init(void)
cgroup_rstat_boot();
/*
* The latency of the synchronize_sched() is too high for cgroups,
* The latency of the synchronize_rcu() is too high for cgroups,
* avoid it at the cost of forcing all readers into the slow path.
*/
rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);

View File

@@ -9918,7 +9918,7 @@ static void account_event(struct perf_event *event)
* call the perf scheduling hooks before proceeding to
* install events that need them.
*/
synchronize_sched();
synchronize_rcu();
}
/*
* Now that we have waited for the sync_sched(), allow further

View File

@@ -229,7 +229,7 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
struct kprobe_insn_page *kip, *next;
/* Ensure no-one is interrupted on the garbages */
synchronize_sched();
synchronize_rcu();
list_for_each_entry_safe(kip, next, &c->pages, list) {
int i;
@@ -1382,7 +1382,7 @@ out:
if (ret) {
ap->flags |= KPROBE_FLAG_DISABLED;
list_del_rcu(&p->list);
synchronize_sched();
synchronize_rcu();
}
}
}
@@ -1597,7 +1597,7 @@ int register_kprobe(struct kprobe *p)
ret = arm_kprobe(p);
if (ret) {
hlist_del_rcu(&p->hlist);
synchronize_sched();
synchronize_rcu();
goto out;
}
}
@@ -1776,7 +1776,7 @@ void unregister_kprobes(struct kprobe **kps, int num)
kps[i]->addr = NULL;
mutex_unlock(&kprobe_mutex);
synchronize_sched();
synchronize_rcu();
for (i = 0; i < num; i++)
if (kps[i]->addr)
__unregister_kprobe_bottom(kps[i]);
@@ -1966,7 +1966,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
rps[i]->kp.addr = NULL;
mutex_unlock(&kprobe_mutex);
synchronize_sched();
synchronize_rcu();
for (i = 0; i < num; i++) {
if (rps[i]->kp.addr) {
__unregister_kprobe_bottom(&rps[i]->kp);

View File

@@ -61,7 +61,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
ops = container_of(fops, struct klp_ops, fops);
/*
* A variant of synchronize_sched() is used to allow patching functions
* A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();
@@ -72,7 +72,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
/*
* func should never be NULL because preemption should be disabled here
* and unregister_ftrace_function() does the equivalent of a
* synchronize_sched() before the func_stack removal.
* synchronize_rcu() before the func_stack removal.
*/
if (WARN_ON_ONCE(!func))
goto unlock;

View File

@@ -52,7 +52,7 @@ static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
/*
* This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing
* of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle.
*/
static void klp_sync(struct work_struct *work)
@@ -175,7 +175,7 @@ void klp_cancel_transition(void)
void klp_update_patch_state(struct task_struct *task)
{
/*
* A variant of synchronize_sched() is used to allow patching functions
* A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();

View File

@@ -4195,7 +4195,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
*
* sync_sched() is sufficient because the read-side is IRQ disable.
*/
synchronize_sched();
synchronize_rcu();
/*
* XXX at this point we could return the resources to the pool;

View File

@@ -36,7 +36,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
lockdep_assert_held(&lock->wait_lock);
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -51,7 +51,7 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
lockdep_assert_held(&lock->wait_lock);
/* Mark the current thread as blocked on the lock: */
task->blocked_on = waiter;

View File

@@ -2159,7 +2159,7 @@ static void free_module(struct module *mod)
/* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup(mod);
/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
synchronize_sched();
synchronize_rcu();
mutex_unlock(&module_mutex);
/* This may be empty, but that's OK */
@@ -3507,15 +3507,15 @@ static noinline int do_init_module(struct module *mod)
/*
* We want to free module_init, but be aware that kallsyms may be
* walking this with preempt disabled. In all the failure paths, we
* call synchronize_sched(), but we don't want to slow down the success
* call synchronize_rcu(), but we don't want to slow down the success
* path, so use actual RCU here.
* Note that module_alloc() on most architectures creates W+X page
* mappings which won't be cleaned up until do_free_init() runs. Any
* code such as mark_rodata_ro() which depends on those mappings to
* be cleaned up needs to sync with the queued work - ie
* rcu_barrier_sched()
* rcu_barrier()
*/
call_rcu_sched(&freeinit->rcu, do_free_init);
call_rcu(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
@@ -3526,7 +3526,7 @@ fail_free_freeinit:
fail:
/* Try to protect us from buggy refcounters. */
mod->state = MODULE_STATE_GOING;
synchronize_sched();
synchronize_rcu();
module_put(mod);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
@@ -3819,7 +3819,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
ddebug_cleanup:
ftrace_release_mod(mod);
dynamic_debug_remove(mod, info->debug);
synchronize_sched();
synchronize_rcu();
kfree(mod->args);
free_arch_cleanup:
module_arch_cleanup(mod);
@@ -3834,7 +3834,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
mod_tree_remove(mod);
wake_up_all(&module_wq);
/* Wait for RCU-sched synchronizing before releasing mod->list. */
synchronize_sched();
synchronize_rcu();
mutex_unlock(&module_mutex);
free_module:
/* Free lock-classes; relies on the preceding sync_rcu() */

View File

@@ -526,12 +526,14 @@ srcu_batches_completed(struct srcu_struct *sp) { return 0; }
static inline void rcu_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
static inline void rcu_fwd_progress_check(unsigned long j) { }
#else /* #ifdef CONFIG_TINY_RCU */
unsigned long rcu_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void);
int rcu_get_gp_kthreads_prio(void);
void rcu_fwd_progress_check(unsigned long j);
void rcu_force_quiescent_state(void);
extern struct workqueue_struct *rcu_gp_wq;
extern struct workqueue_struct *rcu_par_gp_wq;
@@ -539,8 +541,10 @@ extern struct workqueue_struct *rcu_par_gp_wq;
#ifdef CONFIG_RCU_NOCB_CPU
bool rcu_is_nocb_cpu(int cpu);
void rcu_bind_current_to_nocb(void);
#else
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
static inline void rcu_bind_current_to_nocb(void) { }
#endif
#endif /* __LINUX_RCU_H */

View File

@@ -56,6 +56,7 @@
#include <linux/vmalloc.h>
#include <linux/sched/debug.h>
#include <linux/sched/sysctl.h>
#include <linux/oom.h>
#include "rcu.h"
@@ -80,13 +81,6 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@jos
/* Must be power of two minus one. */
#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
torture_param(int, cbflood_inter_holdoff, HZ,
"Holdoff between floods (jiffies)");
torture_param(int, cbflood_intra_holdoff, 1,
"Holdoff between bursts (jiffies)");
torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
torture_param(int, cbflood_n_per_burst, 20000,
"# callbacks per burst in flood");
torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
"Extend readers by disabling bh (1), irqs (2), or preempt (4)");
torture_param(int, fqs_duration, 0,
@@ -138,12 +132,10 @@ module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
static int nrealreaders;
static int ncbflooders;
static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
static struct task_struct **cbflood_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
static struct task_struct *stall_task;
@@ -181,7 +173,6 @@ static long n_rcu_torture_boosts;
static atomic_long_t n_rcu_torture_timers;
static long n_barrier_attempts;
static long n_barrier_successes; /* did rcu_barrier test succeed? */
static atomic_long_t n_cbfloods;
static struct list_head rcu_torture_removed;
static int rcu_torture_writer_state;
@@ -259,6 +250,8 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
/*
* Allocate an element from the rcu_tortures pool.
*/
@@ -348,7 +341,8 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
* period, and we want a long delay occasionally to trigger
* force_quiescent_state. */
if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
if (!rcu_fwd_cb_nodelay &&
!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
@@ -870,59 +864,6 @@ checkwait: stutter_wait("rcu_torture_boost");
return 0;
}
static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
{
}
/*
* RCU torture callback-flood kthread. Repeatedly induces bursts of calls
* to call_rcu() or analogous, increasing the probability of occurrence
* of callback-overflow corner cases.
*/
static int
rcu_torture_cbflood(void *arg)
{
int err = 1;
int i;
int j;
struct rcu_head *rhp;
if (cbflood_n_per_burst > 0 &&
cbflood_inter_holdoff > 0 &&
cbflood_intra_holdoff > 0 &&
cur_ops->call &&
cur_ops->cb_barrier) {
rhp = vmalloc(array3_size(cbflood_n_burst,
cbflood_n_per_burst,
sizeof(*rhp)));
err = !rhp;
}
if (err) {
VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
goto wait_for_stop;
}
VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
do {
schedule_timeout_interruptible(cbflood_inter_holdoff);
atomic_long_inc(&n_cbfloods);
WARN_ON(signal_pending(current));
for (i = 0; i < cbflood_n_burst; i++) {
for (j = 0; j < cbflood_n_per_burst; j++) {
cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
rcu_torture_cbflood_cb);
}
schedule_timeout_interruptible(cbflood_intra_holdoff);
WARN_ON(signal_pending(current));
}
cur_ops->cb_barrier();
stutter_wait("rcu_torture_cbflood");
} while (!torture_must_stop());
vfree(rhp);
wait_for_stop:
torture_kthread_stopping("rcu_torture_cbflood");
return 0;
}
/*
* RCU torture force-quiescent-state kthread. Repeatedly induces
* bursts of calls to force_quiescent_state(), increasing the probability
@@ -1457,11 +1398,10 @@ rcu_torture_stats_print(void)
n_rcu_torture_boosts,
atomic_long_read(&n_rcu_torture_timers));
torture_onoff_stats();
pr_cont("barrier: %ld/%ld:%ld ",
pr_cont("barrier: %ld/%ld:%ld\n",
n_barrier_successes,
n_barrier_attempts,
n_rcu_torture_barrier_error);
pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
pr_alert("%s%s ", torture_type, TORTURE_FLAG);
if (atomic_read(&n_rcu_torture_mberror) != 0 ||
@@ -1674,8 +1614,90 @@ static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
}
/* Carry out grace-period forward-progress testing. */
static int rcu_torture_fwd_prog(void *args)
/* State for continuous-flood RCU callbacks. */
struct rcu_fwd_cb {
struct rcu_head rh;
struct rcu_fwd_cb *rfc_next;
int rfc_gps;
};
static DEFINE_SPINLOCK(rcu_fwd_lock);
static struct rcu_fwd_cb *rcu_fwd_cb_head;
static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head;
static long n_launders_cb;
static unsigned long rcu_fwd_startat;
static bool rcu_fwd_emergency_stop;
#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)];
static void rcu_torture_fwd_cb_hist(void)
{
int i;
int j;
for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
if (n_launders_hist[i] > 0)
break;
pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
__func__, jiffies - rcu_fwd_startat);
for (j = 0; j <= i; j++)
pr_cont(" %ds/%d: %ld",
j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]);
pr_cont("\n");
}
/* Callback function for continuous-flood RCU callbacks. */
static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
{
unsigned long flags;
int i;
struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
struct rcu_fwd_cb **rfcpp;
rfcp->rfc_next = NULL;
rfcp->rfc_gps++;
spin_lock_irqsave(&rcu_fwd_lock, flags);
rfcpp = rcu_fwd_cb_tail;
rcu_fwd_cb_tail = &rfcp->rfc_next;
WRITE_ONCE(*rfcpp, rfcp);
WRITE_ONCE(n_launders_cb, n_launders_cb + 1);
i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
if (i >= ARRAY_SIZE(n_launders_hist))
i = ARRAY_SIZE(n_launders_hist) - 1;
n_launders_hist[i]++;
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
}
/*
* Free all callbacks on the rcu_fwd_cb_head list, either because the
* test is over or because we hit an OOM event.
*/
static unsigned long rcu_torture_fwd_prog_cbfree(void)
{
unsigned long flags;
unsigned long freed = 0;
struct rcu_fwd_cb *rfcp;
for (;;) {
spin_lock_irqsave(&rcu_fwd_lock, flags);
rfcp = rcu_fwd_cb_head;
if (!rfcp)
break;
rcu_fwd_cb_head = rfcp->rfc_next;
if (!rcu_fwd_cb_head)
rcu_fwd_cb_tail = &rcu_fwd_cb_head;
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
kfree(rfcp);
freed++;
}
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
return freed;
}
/* Carry out need_resched()/cond_resched() forward-progress testing. */
static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
{
unsigned long cver;
unsigned long dur;
@@ -1686,56 +1708,186 @@ static int rcu_torture_fwd_prog(void *args)
int sd4;
bool selfpropcb = false;
unsigned long stopat;
int tested = 0;
int tested_tries = 0;
static DEFINE_TORTURE_RANDOM(trs);
VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
set_user_nice(current, MAX_NICE);
if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
init_rcu_head_on_stack(&fcs.rh);
selfpropcb = true;
}
do {
schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
if (selfpropcb) {
WRITE_ONCE(fcs.stop, 0);
cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
}
cver = READ_ONCE(rcu_torture_current_version);
gps = cur_ops->get_gp_seq();
sd = cur_ops->stall_dur() + 1;
sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
dur = sd4 + torture_random(&trs) % (sd - sd4);
stopat = jiffies + dur;
while (time_before(jiffies, stopat) && !torture_must_stop()) {
idx = cur_ops->readlock();
udelay(10);
cur_ops->readunlock(idx);
if (!fwd_progress_need_resched || need_resched())
cond_resched();
}
tested_tries++;
if (!time_before(jiffies, stopat) && !torture_must_stop()) {
tested++;
cver = READ_ONCE(rcu_torture_current_version) - cver;
gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
WARN_ON(!cver && gps < 2);
pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
}
if (selfpropcb) {
WRITE_ONCE(fcs.stop, 1);
cur_ops->sync(); /* Wait for running CB to complete. */
cur_ops->cb_barrier(); /* Wait for queued callbacks. */
}
/* Avoid slow periods, better to test when busy. */
stutter_wait("rcu_torture_fwd_prog");
} while (!torture_must_stop());
/* Tight loop containing cond_resched(). */
if (selfpropcb) {
WRITE_ONCE(fcs.stop, 0);
cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
}
cver = READ_ONCE(rcu_torture_current_version);
gps = cur_ops->get_gp_seq();
sd = cur_ops->stall_dur() + 1;
sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
dur = sd4 + torture_random(&trs) % (sd - sd4);
WRITE_ONCE(rcu_fwd_startat, jiffies);
stopat = rcu_fwd_startat + dur;
while (time_before(jiffies, stopat) &&
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
idx = cur_ops->readlock();
udelay(10);
cur_ops->readunlock(idx);
if (!fwd_progress_need_resched || need_resched())
cond_resched();
}
(*tested_tries)++;
if (!time_before(jiffies, stopat) &&
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
(*tested)++;
cver = READ_ONCE(rcu_torture_current_version) - cver;
gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
WARN_ON(!cver && gps < 2);
pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
}
if (selfpropcb) {
WRITE_ONCE(fcs.stop, 1);
cur_ops->sync(); /* Wait for running CB to complete. */
cur_ops->cb_barrier(); /* Wait for queued callbacks. */
}
if (selfpropcb) {
WARN_ON(READ_ONCE(fcs.stop) != 2);
destroy_rcu_head_on_stack(&fcs.rh);
}
}
/* Carry out call_rcu() forward-progress testing. */
static void rcu_torture_fwd_prog_cr(void)
{
unsigned long cver;
unsigned long gps;
int i;
long n_launders;
long n_launders_cb_snap;
long n_launders_sa;
long n_max_cbs;
long n_max_gps;
struct rcu_fwd_cb *rfcp;
struct rcu_fwd_cb *rfcpn;
unsigned long stopat;
unsigned long stoppedat;
if (READ_ONCE(rcu_fwd_emergency_stop))
return; /* Get out of the way quickly, no GP wait! */
/* Loop continuously posting RCU callbacks. */
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
cur_ops->sync(); /* Later readers see above write. */
WRITE_ONCE(rcu_fwd_startat, jiffies);
stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
n_launders = 0;
n_launders_cb = 0;
n_launders_sa = 0;
n_max_cbs = 0;
n_max_gps = 0;
for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
n_launders_hist[i] = 0;
cver = READ_ONCE(rcu_torture_current_version);
gps = cur_ops->get_gp_seq();
while (time_before(jiffies, stopat) &&
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
rfcp = READ_ONCE(rcu_fwd_cb_head);
rfcpn = NULL;
if (rfcp)
rfcpn = READ_ONCE(rfcp->rfc_next);
if (rfcpn) {
if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
break;
rcu_fwd_cb_head = rfcpn;
n_launders++;
n_launders_sa++;
} else {
rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
if (WARN_ON_ONCE(!rfcp)) {
schedule_timeout_interruptible(1);
continue;
}
n_max_cbs++;
n_launders_sa = 0;
rfcp->rfc_gps = 0;
}
cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
cond_resched();
}
stoppedat = jiffies;
n_launders_cb_snap = READ_ONCE(n_launders_cb);
cver = READ_ONCE(rcu_torture_current_version) - cver;
gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
(void)rcu_torture_fwd_prog_cbfree();
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) {
WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
__func__,
stoppedat - rcu_fwd_startat, jiffies - stoppedat,
n_launders + n_max_cbs - n_launders_cb_snap,
n_launders, n_launders_sa,
n_max_gps, n_max_cbs, cver, gps);
rcu_torture_fwd_cb_hist();
}
}
/*
* OOM notifier, but this only prints diagnostic information for the
* current forward-progress test.
*/
static int rcutorture_oom_notify(struct notifier_block *self,
unsigned long notused, void *nfreed)
{
WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
__func__);
rcu_torture_fwd_cb_hist();
rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2));
WRITE_ONCE(rcu_fwd_emergency_stop, true);
smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
pr_info("%s: Freed %lu RCU callbacks.\n",
__func__, rcu_torture_fwd_prog_cbfree());
rcu_barrier();
pr_info("%s: Freed %lu RCU callbacks.\n",
__func__, rcu_torture_fwd_prog_cbfree());
rcu_barrier();
pr_info("%s: Freed %lu RCU callbacks.\n",
__func__, rcu_torture_fwd_prog_cbfree());
smp_mb(); /* Frees before return to avoid redoing OOM. */
(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
pr_info("%s returning after OOM processing.\n", __func__);
return NOTIFY_OK;
}
static struct notifier_block rcutorture_oom_nb = {
.notifier_call = rcutorture_oom_notify
};
/* Carry out grace-period forward-progress testing. */
static int rcu_torture_fwd_prog(void *args)
{
int tested = 0;
int tested_tries = 0;
VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
rcu_bind_current_to_nocb();
if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
set_user_nice(current, MAX_NICE);
do {
schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
WRITE_ONCE(rcu_fwd_emergency_stop, false);
register_oom_notifier(&rcutorture_oom_nb);
rcu_torture_fwd_prog_nr(&tested, &tested_tries);
rcu_torture_fwd_prog_cr();
unregister_oom_notifier(&rcutorture_oom_nb);
/* Avoid slow periods, better to test when busy. */
stutter_wait("rcu_torture_fwd_prog");
} while (!torture_must_stop());
/* Short runs might not contain a valid forward-progress attempt. */
WARN_ON(!tested && tested_tries >= 5);
pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
@@ -1748,7 +1900,8 @@ static int __init rcu_torture_fwd_prog_init(void)
{
if (!fwd_progress)
return 0; /* Not requested, so don't do it. */
if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) {
if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
cur_ops == &rcu_busted_ops) {
VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
return 0;
}
@@ -1968,8 +2121,6 @@ rcu_torture_cleanup(void)
cur_ops->name, gp_seq, flags);
torture_stop_kthread(rcu_torture_stats, stats_task);
torture_stop_kthread(rcu_torture_fqs, fqs_task);
for (i = 0; i < ncbflooders; i++)
torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
if (rcu_torture_can_boost())
cpuhp_remove_state(rcutor_hp);
@@ -2252,24 +2403,6 @@ rcu_torture_init(void)
goto unwind;
if (object_debug)
rcu_test_debug_objects();
if (cbflood_n_burst > 0) {
/* Create the cbflood threads */
ncbflooders = (num_online_cpus() + 3) / 4;
cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
GFP_KERNEL);
if (!cbflood_task) {
VERBOSE_TOROUT_ERRSTRING("out of memory");
firsterr = -ENOMEM;
goto unwind;
}
for (i = 0; i < ncbflooders; i++) {
firsterr = torture_create_kthread(rcu_torture_cbflood,
NULL,
cbflood_task[i]);
if (firsterr)
goto unwind;
}
}
torture_init_end();
return 0;

View File

@@ -37,30 +37,30 @@ int rcu_scheduler_active __read_mostly;
static LIST_HEAD(srcu_boot_list);
static bool srcu_init_done;
static int init_srcu_struct_fields(struct srcu_struct *sp)
static int init_srcu_struct_fields(struct srcu_struct *ssp)
{
sp->srcu_lock_nesting[0] = 0;
sp->srcu_lock_nesting[1] = 0;
init_swait_queue_head(&sp->srcu_wq);
sp->srcu_cb_head = NULL;
sp->srcu_cb_tail = &sp->srcu_cb_head;
sp->srcu_gp_running = false;
sp->srcu_gp_waiting = false;
sp->srcu_idx = 0;
INIT_WORK(&sp->srcu_work, srcu_drive_gp);
INIT_LIST_HEAD(&sp->srcu_work.entry);
ssp->srcu_lock_nesting[0] = 0;
ssp->srcu_lock_nesting[1] = 0;
init_swait_queue_head(&ssp->srcu_wq);
ssp->srcu_cb_head = NULL;
ssp->srcu_cb_tail = &ssp->srcu_cb_head;
ssp->srcu_gp_running = false;
ssp->srcu_gp_waiting = false;
ssp->srcu_idx = 0;
INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
INIT_LIST_HEAD(&ssp->srcu_work.entry);
return 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
int __init_srcu_struct(struct srcu_struct *sp, const char *name,
int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key)
{
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
return init_srcu_struct_fields(sp);
debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
lockdep_init_map(&ssp->dep_map, name, key, 0);
return init_srcu_struct_fields(ssp);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -68,15 +68,15 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/*
* init_srcu_struct - initialize a sleep-RCU structure
* @sp: structure to initialize.
* @ssp: structure to initialize.
*
* Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain
* of SRCU protection.
*/
int init_srcu_struct(struct srcu_struct *sp)
int init_srcu_struct(struct srcu_struct *ssp)
{
return init_srcu_struct_fields(sp);
return init_srcu_struct_fields(ssp);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -84,22 +84,22 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
/*
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
* @sp: structure to clean up.
* @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{
WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
if (quiesced)
WARN_ON(work_pending(&sp->srcu_work));
WARN_ON(work_pending(&ssp->srcu_work));
else
flush_work(&sp->srcu_work);
WARN_ON(sp->srcu_gp_running);
WARN_ON(sp->srcu_gp_waiting);
WARN_ON(sp->srcu_cb_head);
WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
flush_work(&ssp->srcu_work);
WARN_ON(ssp->srcu_gp_running);
WARN_ON(ssp->srcu_gp_waiting);
WARN_ON(ssp->srcu_cb_head);
WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
}
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
@@ -107,13 +107,13 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* Removes the count for the old reader from the appropriate element of
* the srcu_struct.
*/
void __srcu_read_unlock(struct srcu_struct *sp, int idx)
void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
int newval = sp->srcu_lock_nesting[idx] - 1;
int newval = ssp->srcu_lock_nesting[idx] - 1;
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
if (!newval && READ_ONCE(sp->srcu_gp_waiting))
swake_up_one(&sp->srcu_wq);
WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
swake_up_one(&ssp->srcu_wq);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@@ -127,24 +127,24 @@ void srcu_drive_gp(struct work_struct *wp)
int idx;
struct rcu_head *lh;
struct rcu_head *rhp;
struct srcu_struct *sp;
struct srcu_struct *ssp;
sp = container_of(wp, struct srcu_struct, srcu_work);
if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
ssp = container_of(wp, struct srcu_struct, srcu_work);
if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */
WRITE_ONCE(sp->srcu_gp_running, true);
WRITE_ONCE(ssp->srcu_gp_running, true);
local_irq_disable();
lh = sp->srcu_cb_head;
sp->srcu_cb_head = NULL;
sp->srcu_cb_tail = &sp->srcu_cb_head;
lh = ssp->srcu_cb_head;
ssp->srcu_cb_head = NULL;
ssp->srcu_cb_tail = &ssp->srcu_cb_head;
local_irq_enable();
idx = sp->srcu_idx;
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
idx = ssp->srcu_idx;
WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
/* Invoke the callbacks we removed above. */
while (lh) {
@@ -161,9 +161,9 @@ void srcu_drive_gp(struct work_struct *wp)
* at interrupt level, but the ->srcu_gp_running checks will
* straighten that out.
*/
WRITE_ONCE(sp->srcu_gp_running, false);
if (READ_ONCE(sp->srcu_cb_head))
schedule_work(&sp->srcu_work);
WRITE_ONCE(ssp->srcu_gp_running, false);
if (READ_ONCE(ssp->srcu_cb_head))
schedule_work(&ssp->srcu_work);
}
EXPORT_SYMBOL_GPL(srcu_drive_gp);
@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp);
* Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running.
*/
void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func)
{
unsigned long flags;
@@ -179,14 +179,14 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rhp->func = func;
rhp->next = NULL;
local_irq_save(flags);
*sp->srcu_cb_tail = rhp;
sp->srcu_cb_tail = &rhp->next;
*ssp->srcu_cb_tail = rhp;
ssp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags);
if (!READ_ONCE(sp->srcu_gp_running)) {
if (!READ_ONCE(ssp->srcu_gp_running)) {
if (likely(srcu_init_done))
schedule_work(&sp->srcu_work);
else if (list_empty(&sp->srcu_work.entry))
list_add(&sp->srcu_work.entry, &srcu_boot_list);
schedule_work(&ssp->srcu_work);
else if (list_empty(&ssp->srcu_work.entry))
list_add(&ssp->srcu_work.entry, &srcu_boot_list);
}
}
EXPORT_SYMBOL_GPL(call_srcu);
@@ -194,13 +194,13 @@ EXPORT_SYMBOL_GPL(call_srcu);
/*
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
*/
void synchronize_srcu(struct srcu_struct *sp)
void synchronize_srcu(struct srcu_struct *ssp)
{
struct rcu_synchronize rs;
init_rcu_head_on_stack(&rs.head);
init_completion(&rs.completion);
call_srcu(sp, &rs.head, wakeme_after_rcu);
call_srcu(ssp, &rs.head, wakeme_after_rcu);
wait_for_completion(&rs.completion);
destroy_rcu_head_on_stack(&rs.head);
}
@@ -219,13 +219,13 @@ void __init rcu_scheduler_starting(void)
*/
void __init srcu_init(void)
{
struct srcu_struct *sp;
struct srcu_struct *ssp;
srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) {
sp = list_first_entry(&srcu_boot_list,
ssp = list_first_entry(&srcu_boot_list,
struct srcu_struct, srcu_work.entry);
list_del_init(&sp->srcu_work.entry);
schedule_work(&sp->srcu_work);
list_del_init(&ssp->srcu_work.entry);
schedule_work(&ssp->srcu_work);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -44,15 +44,15 @@ static const struct {
__INIT_HELD(rcu_read_lock_held)
},
[RCU_SCHED_SYNC] = {
.sync = synchronize_sched,
.call = call_rcu_sched,
.wait = rcu_barrier_sched,
.sync = synchronize_rcu,
.call = call_rcu,
.wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_sched_held)
},
[RCU_BH_SYNC] = {
.sync = synchronize_rcu_bh,
.call = call_rcu_bh,
.wait = rcu_barrier_bh,
.sync = synchronize_rcu,
.call = call_rcu,
.wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_bh_held)
},
};
@@ -125,8 +125,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
rsp->gp_state = GP_PENDING;
spin_unlock_irq(&rsp->rss_lock);
BUG_ON(need_wait && need_sync);
WARN_ON_ONCE(need_wait && need_sync);
if (need_sync) {
gp_ops[rsp->gp_type].sync();
rsp->gp_state = GP_PASSED;
@@ -139,7 +138,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* Nobody has yet been allowed the 'fast' path and thus we can
* avoid doing any sync(). The callback will get 'dropped'.
*/
BUG_ON(rsp->gp_state != GP_PASSED);
WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
}
}
@@ -166,8 +165,8 @@ static void rcu_sync_func(struct rcu_head *rhp)
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
unsigned long flags;
BUG_ON(rsp->gp_state != GP_PASSED);
BUG_ON(rsp->cb_state == CB_IDLE);
WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
WARN_ON_ONCE(rsp->cb_state == CB_IDLE);
spin_lock_irqsave(&rsp->rss_lock, flags);
if (rsp->gp_count) {
@@ -225,7 +224,7 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{
int cb_state;
BUG_ON(rsp->gp_count);
WARN_ON_ONCE(rsp->gp_count);
spin_lock_irq(&rsp->rss_lock);
if (rsp->cb_state == CB_REPLAY)
@@ -235,6 +234,6 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
if (cb_state != CB_IDLE) {
gp_ops[rsp->gp_type].wait();
BUG_ON(rsp->cb_state != CB_IDLE);
WARN_ON_ONCE(rsp->cb_state != CB_IDLE);
}
}

View File

@@ -207,6 +207,19 @@ static int rcu_gp_in_progress(void)
return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
}
/*
* Return the number of callbacks queued on the specified CPU.
* Handles both the nocbs and normal cases.
*/
static long rcu_get_n_cbs_cpu(int cpu)
{
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_segcblist_is_enabled(&rdp->cblist)) /* Online normal CPU? */
return rcu_segcblist_n_cbs(&rdp->cblist);
return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */
}
void rcu_softirq_qs(void)
{
rcu_qs();
@@ -499,17 +512,30 @@ void rcu_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
* Convert a ->gp_state value to a character string.
*/
static const char *gp_state_getname(short gs)
{
if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
return "???";
return gp_state_names[gs];
}
/*
* Show the state of the grace-period kthreads.
*/
void show_rcu_gp_kthreads(void)
{
int cpu;
unsigned long j;
struct rcu_data *rdp;
struct rcu_node *rnp;
pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name,
rcu_state.gp_state, rcu_state.gp_kthread->state);
j = jiffies - READ_ONCE(rcu_state.gp_activity);
pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n",
rcu_state.name, gp_state_getname(rcu_state.gp_state),
rcu_state.gp_state, rcu_state.gp_kthread->state, j);
rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
continue;
@@ -891,12 +917,12 @@ void rcu_irq_enter_irqson(void)
}
/**
* rcu_is_watching - see if RCU thinks that the current CPU is idle
* rcu_is_watching - see if RCU thinks that the current CPU is not idle
*
* Return true if RCU is watching the running CPU, which means that this
* CPU can safely enter RCU read-side critical sections. In other words,
* if the current CPU is in its idle loop and is neither in an interrupt
* or NMI handler, return true.
* if the current CPU is not in its idle loop or is in an interrupt or
* NMI handler, return true.
*/
bool notrace rcu_is_watching(void)
{
@@ -1142,16 +1168,6 @@ static void record_gp_stall_check_time(void)
rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
}
/*
* Convert a ->gp_state value to a character string.
*/
static const char *gp_state_getname(short gs)
{
if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
return "???";
return gp_state_names[gs];
}
/*
* Complain about starvation of grace-period kthread.
*/
@@ -1262,8 +1278,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
cpu)->cblist);
totqlen += rcu_get_n_cbs_cpu(cpu);
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
@@ -1323,8 +1338,7 @@ static void print_cpu_stall(void)
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
cpu)->cblist);
totqlen += rcu_get_n_cbs_cpu(cpu);
pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
jiffies - rcu_state.gp_start,
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
@@ -1986,7 +2000,8 @@ static void rcu_gp_cleanup(void)
WRITE_ONCE(rcu_state.gp_activity, jiffies);
raw_spin_lock_irq_rcu_node(rnp);
gp_duration = jiffies - rcu_state.gp_start;
rcu_state.gp_end = jiffies;
gp_duration = rcu_state.gp_end - rcu_state.gp_start;
if (gp_duration > rcu_state.gp_max)
rcu_state.gp_max = gp_duration;
@@ -2032,9 +2047,9 @@ static void rcu_gp_cleanup(void)
rnp = rcu_get_root();
raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
/* Declare grace period done. */
rcu_seq_end(&rcu_state.gp_seq);
/* Declare grace period done, trace first to use old GP number. */
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
rcu_seq_end(&rcu_state.gp_seq);
rcu_state.gp_state = RCU_GP_IDLE;
/* Check for GP requests since above loop. */
rdp = this_cpu_ptr(&rcu_data);
@@ -2600,10 +2615,10 @@ static void force_quiescent_state(void)
* This function checks for grace-period requests that fail to motivate
* RCU to come out of its idle mode.
*/
static void
rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
void
rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
const unsigned long gpssdelay)
{
const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
unsigned long flags;
unsigned long j;
struct rcu_node *rnp_root = rcu_get_root();
@@ -2654,6 +2669,48 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
/*
* Do a forward-progress check for rcutorture. This is normally invoked
* due to an OOM event. The argument "j" gives the time period during
* which rcutorture would like progress to have been made.
*/
void rcu_fwd_progress_check(unsigned long j)
{
unsigned long cbs;
int cpu;
unsigned long max_cbs = 0;
int max_cpu = -1;
struct rcu_data *rdp;
if (rcu_gp_in_progress()) {
pr_info("%s: GP age %lu jiffies\n",
__func__, jiffies - rcu_state.gp_start);
show_rcu_gp_kthreads();
} else {
pr_info("%s: Last GP end %lu jiffies ago\n",
__func__, jiffies - rcu_state.gp_end);
preempt_disable();
rdp = this_cpu_ptr(&rcu_data);
rcu_check_gp_start_stall(rdp->mynode, rdp, j);
preempt_enable();
}
for_each_possible_cpu(cpu) {
cbs = rcu_get_n_cbs_cpu(cpu);
if (!cbs)
continue;
if (max_cpu < 0)
pr_info("%s: callbacks", __func__);
pr_cont(" %d: %lu", cpu, cbs);
if (cbs <= max_cbs)
continue;
max_cbs = cbs;
max_cpu = cpu;
}
if (max_cpu >= 0)
pr_cont("\n");
}
EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
/*
* This does the RCU core processing work for the specified rcu_data
* structures. This may be called only from the CPU to whom the rdp
@@ -2690,7 +2747,7 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
local_irq_restore(flags);
}
rcu_check_gp_start_stall(rnp, rdp);
rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
/* If there are callbacks ready, invoke them. */
if (rcu_segcblist_ready_cbs(&rdp->cblist))
@@ -2826,7 +2883,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
* Very early boot, before rcu_init(). Initialize if needed
* and then drop through to queue the callback.
*/
BUG_ON(cpu != -1);
WARN_ON_ONCE(cpu != -1);
WARN_ON_ONCE(!rcu_is_watching());
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
@@ -3485,7 +3542,8 @@ static int __init rcu_spawn_gp_kthread(void)
rcu_scheduler_fully_active = 1;
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
BUG_ON(IS_ERR(t));
if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
return 0;
rnp = rcu_get_root();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rcu_state.gp_kthread = t;

View File

@@ -57,7 +57,7 @@ struct rcu_node {
/* some rcu_state fields as well as */
/* following. */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */
unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
unsigned long gp_seq_needed; /* Track furthest future GP request. */
unsigned long completedqs; /* All QSes done for this node. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
@@ -163,7 +163,7 @@ union rcu_noqs {
struct rcu_data {
/* 1) quiescent-state and grace-period handling : */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */
unsigned long gp_seq_needed; /* Track furthest future GP request. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
bool core_needs_qs; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
@@ -328,6 +328,8 @@ struct rcu_state {
/* force_quiescent_state(). */
unsigned long gp_start; /* Time at which GP started, */
/* but in jiffies. */
unsigned long gp_end; /* Time last GP ended, again */
/* in jiffies. */
unsigned long gp_activity; /* Time of last GP kthread */
/* activity in jiffies. */
unsigned long gp_req_activity; /* Time of last GP request */
@@ -398,17 +400,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
#define RCU_NAME rcu_name
#endif /* #else #ifdef CONFIG_TRACING */
/*
* RCU implementation internal declarations:
*/
extern struct rcu_state rcu_sched_state;
extern struct rcu_state rcu_bh_state;
#ifdef CONFIG_PREEMPT_RCU
extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
int rcu_dynticks_snap(struct rcu_data *rdp);
#ifdef CONFIG_RCU_BOOST
@@ -466,6 +457,7 @@ static void __init rcu_spawn_nocb_kthreads(void);
static void __init rcu_organize_nocb_kthreads(void);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool init_nocb_callback_list(struct rcu_data *rdp);
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp);
static void rcu_bind_gp_kthread(void);
static bool rcu_nohz_full_cpu(void);
static void rcu_dynticks_task_enter(void);

View File

@@ -450,10 +450,12 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
preempt_disable();
cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
/* If all offline, queue the work on an unbound CPU. */
if (unlikely(cpu > rnp->grphi))
if (unlikely(cpu > rnp->grphi - rnp->grplo))
cpu = WORK_CPU_UNBOUND;
else
cpu += rnp->grplo;
queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
preempt_enable();
rnp->exp_need_flush = true;
@@ -690,8 +692,10 @@ static void sync_rcu_exp_handler(void *unused)
*/
if (t->rcu_read_lock_nesting > 0) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
if (rnp->expmask & rdp->grpmask)
if (rnp->expmask & rdp->grpmask) {
rdp->deferred_qs = true;
WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}

View File

@@ -397,6 +397,11 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return rnp->gp_tasks != NULL;
}
/* Bias and limit values for ->rcu_read_lock_nesting. */
#define RCU_NEST_BIAS INT_MAX
#define RCU_NEST_NMAX (-INT_MAX / 2)
#define RCU_NEST_PMAX (INT_MAX / 2)
/*
* Preemptible RCU implementation for rcu_read_lock().
* Just increment ->rcu_read_lock_nesting, shared state will be updated
@@ -405,6 +410,8 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
void __rcu_read_lock(void)
{
current->rcu_read_lock_nesting++;
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
barrier(); /* critical section after entry code. */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -424,20 +431,18 @@ void __rcu_read_unlock(void)
--t->rcu_read_lock_nesting;
} else {
barrier(); /* critical section before exit code. */
t->rcu_read_lock_nesting = INT_MIN;
t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
}
#ifdef CONFIG_PROVE_LOCKING
{
int rrln = READ_ONCE(t->rcu_read_lock_nesting);
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
int rrln = t->rcu_read_lock_nesting;
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -597,7 +602,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
*/
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
return (this_cpu_ptr(&rcu_data)->deferred_qs ||
return (__this_cpu_read(rcu_data.deferred_qs) ||
READ_ONCE(t->rcu_read_unlock_special.s)) &&
t->rcu_read_lock_nesting <= 0;
}
@@ -617,11 +622,11 @@ static void rcu_preempt_deferred_qs(struct task_struct *t)
if (!rcu_preempt_need_deferred_qs(t))
return;
if (couldrecurse)
t->rcu_read_lock_nesting -= INT_MIN;
t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
local_irq_save(flags);
rcu_preempt_deferred_qs_irqrestore(t, flags);
if (couldrecurse)
t->rcu_read_lock_nesting += INT_MIN;
t->rcu_read_lock_nesting += RCU_NEST_BIAS;
}
/*
@@ -642,13 +647,21 @@ static void rcu_read_unlock_special(struct task_struct *t)
local_irq_save(flags);
irqs_were_disabled = irqs_disabled_flags(flags);
if ((preempt_bh_were_disabled || irqs_were_disabled) &&
t->rcu_read_unlock_special.b.blocked) {
if (preempt_bh_were_disabled || irqs_were_disabled) {
WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
/* Need to defer quiescent state until everything is enabled. */
raise_softirq_irqoff(RCU_SOFTIRQ);
if (irqs_were_disabled) {
/* Enabling irqs does not reschedule, so... */
raise_softirq_irqoff(RCU_SOFTIRQ);
} else {
/* Enabling BH or preempt does reschedule, so... */
set_tsk_need_resched(current);
set_preempt_need_resched();
}
local_irq_restore(flags);
return;
}
WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
rcu_preempt_deferred_qs_irqrestore(t, flags);
}
@@ -1464,7 +1477,8 @@ static void __init rcu_spawn_boost_kthreads(void)
for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0;
BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
return;
rcu_for_each_leaf_node(rnp)
(void)rcu_spawn_one_boost_kthread(rnp);
}
@@ -1997,7 +2011,7 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
* (if a callback is in fact needed). This is associated with an
* atomic_inc() in the caller.
*/
ret = atomic_long_read(&rdp->nocb_q_count);
ret = rcu_get_n_cbs_nocb_cpu(rdp);
#ifdef CONFIG_PROVE_RCU
rhp = READ_ONCE(rdp->nocb_head);
@@ -2052,7 +2066,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
TPS("WakeNotPoll"));
return;
}
len = atomic_long_read(&rdp->nocb_q_count);
len = rcu_get_n_cbs_nocb_cpu(rdp);
if (old_rhpp == &rdp->nocb_head) {
if (!irqs_disabled_flags(flags)) {
/* ... if queue was empty ... */
@@ -2101,11 +2115,11 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
trace_rcu_kfree_callback(rcu_state.name, rhp,
(unsigned long)rhp->func,
-atomic_long_read(&rdp->nocb_q_count_lazy),
-atomic_long_read(&rdp->nocb_q_count));
-rcu_get_n_cbs_nocb_cpu(rdp));
else
trace_rcu_callback(rcu_state.name, rhp,
-atomic_long_read(&rdp->nocb_q_count_lazy),
-atomic_long_read(&rdp->nocb_q_count));
-rcu_get_n_cbs_nocb_cpu(rdp));
/*
* If called from an extended quiescent state with interrupts
@@ -2322,13 +2336,14 @@ static int rcu_nocb_kthread(void *arg)
tail = rdp->nocb_follower_tail;
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
BUG_ON(!list);
if (WARN_ON_ONCE(!list))
continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty"));
/* Each pass through the following loop invokes a callback. */
trace_rcu_batch_start(rcu_state.name,
atomic_long_read(&rdp->nocb_q_count_lazy),
atomic_long_read(&rdp->nocb_q_count), -1);
rcu_get_n_cbs_nocb_cpu(rdp), -1);
c = cl = 0;
while (list) {
next = list->next;
@@ -2495,7 +2510,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
/* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
"rcuo%c/%d", rcu_state.abbr, cpu);
BUG_ON(IS_ERR(t));
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo kthread, OOM is now expected behavior\n", __func__))
return;
WRITE_ONCE(rdp_spawn->nocb_kthread, t);
}
@@ -2587,6 +2603,26 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
return true;
}
/*
* Bind the current task to the offloaded CPUs. If there are no offloaded
* CPUs, leave the task unbound. Splat if the bind attempt fails.
*/
void rcu_bind_current_to_nocb(void)
{
if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
}
EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
/*
* Return the number of RCU callbacks still queued from the specified
* CPU, which must be a nocbs CPU.
*/
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
{
return atomic_long_read(&rdp->nocb_q_count);
}
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool rcu_nocb_cpu_needs_barrier(int cpu)
@@ -2647,6 +2683,11 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
return false;
}
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
{
return 0;
}
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*

View File

@@ -335,8 +335,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
/* Initialize and register callbacks for each crcu_array element. */
for (i = 0; i < n; i++) {
if (checktiny &&
(crcu_array[i] == call_rcu ||
crcu_array[i] == call_rcu_bh)) {
(crcu_array[i] == call_rcu)) {
might_sleep();
continue;
}
@@ -352,8 +351,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
/* Wait for all callbacks to be invoked. */
for (i = 0; i < n; i++) {
if (checktiny &&
(crcu_array[i] == call_rcu ||
crcu_array[i] == call_rcu_bh))
(crcu_array[i] == call_rcu))
continue;
for (j = 0; j < i; j++)
if (crcu_array[j] == crcu_array[i])
@@ -822,7 +820,8 @@ static int __init rcu_spawn_tasks_kthread(void)
struct task_struct *t;
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
BUG_ON(IS_ERR(t));
if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
return 0;
smp_mb(); /* Ensure others see full kthread. */
WRITE_ONCE(rcu_tasks_kthread_ptr, t);
return 0;

View File

@@ -5783,7 +5783,7 @@ int sched_cpu_deactivate(unsigned int cpu)
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
synchronize_rcu_mult(call_rcu, call_rcu_sched);
synchronize_rcu();
#ifdef CONFIG_SCHED_SMT
/*

View File

@@ -210,7 +210,7 @@ static int membarrier_register_global_expedited(void)
* future scheduler executions will observe the new
* thread flag state for this mm.
*/
synchronize_sched();
synchronize_rcu();
}
atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
&mm->membarrier_state);
@@ -246,7 +246,7 @@ static int membarrier_register_private_expedited(int flags)
* Ensure all future scheduler executions will observe the
* new thread flag state for this process.
*/
synchronize_sched();
synchronize_rcu();
}
atomic_or(state, &mm->membarrier_state);
@@ -298,7 +298,7 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
if (tick_nohz_full_enabled())
return -EINVAL;
if (num_online_cpus() > 1)
synchronize_sched();
synchronize_rcu();
return 0;
case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
return membarrier_global_expedited();

View File

@@ -194,11 +194,23 @@ torture_onoff(void *arg)
int cpu;
int maxcpu = -1;
DEFINE_TORTURE_RANDOM(rand);
int ret;
VERBOSE_TOROUT_STRING("torture_onoff task started");
for_each_online_cpu(cpu)
maxcpu = cpu;
WARN_ON(maxcpu < 0);
if (!IS_MODULE(CONFIG_TORTURE_TEST))
for_each_possible_cpu(cpu) {
if (cpu_online(cpu))
continue;
ret = cpu_up(cpu);
if (ret && verbose) {
pr_alert("%s" TORTURE_FLAG
"%s: Initial online %d: errno %d\n",
__func__, torture_type, cpu, ret);
}
}
if (maxcpu == 0) {
VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled");
@@ -233,16 +245,15 @@ stop:
*/
int torture_onoff_init(long ooholdoff, long oointerval)
{
int ret = 0;
#ifdef CONFIG_HOTPLUG_CPU
onoff_holdoff = ooholdoff;
onoff_interval = oointerval;
if (onoff_interval <= 0)
return 0;
ret = torture_create_kthread(torture_onoff, NULL, onoff_task);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
return ret;
return torture_create_kthread(torture_onoff, NULL, onoff_task);
#else /* #ifdef CONFIG_HOTPLUG_CPU */
return 0;
#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
}
EXPORT_SYMBOL_GPL(torture_onoff_init);
@@ -513,15 +524,13 @@ static int torture_shutdown(void *arg)
*/
int torture_shutdown_init(int ssecs, void (*cleanup)(void))
{
int ret = 0;
torture_shutdown_hook = cleanup;
if (ssecs > 0) {
shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0));
ret = torture_create_kthread(torture_shutdown, NULL,
return torture_create_kthread(torture_shutdown, NULL,
shutdown_task);
}
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(torture_shutdown_init);
@@ -620,13 +629,10 @@ static int torture_stutter(void *arg)
/*
* Initialize and kick off the torture_stutter kthread.
*/
int torture_stutter_init(int s)
int torture_stutter_init(const int s)
{
int ret;
stutter = s;
ret = torture_create_kthread(torture_stutter, NULL, stutter_task);
return ret;
return torture_create_kthread(torture_stutter, NULL, stutter_task);
}
EXPORT_SYMBOL_GPL(torture_stutter_init);

View File

@@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work)
{
/*
* This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing
* of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle.
*
* Yes, function tracing is rude.
@@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
ftrace_profile_enabled = 0;
/*
* unregister_ftrace_profiler calls stop_machine
* so this acts like an synchronize_sched.
* so this acts like an synchronize_rcu.
*/
unregister_ftrace_profiler();
}
@@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
/*
* Some of the ops may be dynamically allocated,
* they are freed after a synchronize_sched().
* they are freed after a synchronize_rcu().
*/
preempt_disable_notrace();
@@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{
if (!hash || hash == EMPTY_HASH)
return;
call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
}
void ftrace_free_filter(struct ftrace_ops *ops)
@@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip,
* the ip is not in the ops->notrace_hash.
*
* This needs to be called with preemption disabled as
* the hashes are freed with call_rcu_sched().
* the hashes are freed with call_rcu().
*/
static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
@@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
if (ftrace_enabled && !ftrace_hash_empty(hash))
ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
&old_hash_ops);
synchronize_sched();
synchronize_rcu();
hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
hlist_del(&entry->hlist);
@@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
mutex_unlock(&graph_lock);
/* Wait till all users are no longer using the old hash */
synchronize_sched();
synchronize_rcu();
free_ftrace_hash(old_hash);
}
@@ -5707,7 +5707,7 @@ void ftrace_release_mod(struct module *mod)
list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
if (mod_map->mod == mod) {
list_del_rcu(&mod_map->list);
call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
call_rcu(&mod_map->rcu, ftrace_free_mod_map);
break;
}
}
@@ -5927,7 +5927,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
struct ftrace_mod_map *mod_map;
const char *ret = NULL;
/* mod_map is freed via call_rcu_sched() */
/* mod_map is freed via call_rcu() */
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
@@ -6262,7 +6262,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
/*
* Some of the ops may be dynamically allocated,
* they must be freed after a synchronize_sched().
* they must be freed after a synchronize_rcu().
*/
preempt_disable_notrace();
@@ -6433,7 +6433,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
rcu_assign_pointer(tr->function_pids, NULL);
/* Wait till all users are no longer using pid filtering */
synchronize_sched();
synchronize_rcu();
trace_free_pid_list(pid_list);
}
@@ -6580,7 +6580,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
rcu_assign_pointer(tr->function_pids, pid_list);
if (filtered_pids) {
synchronize_sched();
synchronize_rcu();
trace_free_pid_list(filtered_pids);
} else if (pid_list) {
/* Register a probe to set whether to ignore the tracing of a task */

View File

@@ -1834,7 +1834,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
* There could have been a race between checking
* record_disable and incrementing it.
*/
synchronize_sched();
synchronize_rcu();
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
rb_check_pages(cpu_buffer);
@@ -3151,7 +3151,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
* The caller should call synchronize_sched() after this.
* The caller should call synchronize_rcu() after this.
*/
void ring_buffer_record_disable(struct ring_buffer *buffer)
{
@@ -3253,7 +3253,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
* The caller should call synchronize_sched() after this.
* The caller should call synchronize_rcu() after this.
*/
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{
@@ -4191,7 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
void
ring_buffer_read_prepare_sync(void)
{
synchronize_sched();
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
@@ -4363,7 +4363,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */
synchronize_sched();
synchronize_rcu();
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -4496,7 +4496,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
goto out;
/*
* We can't do a synchronize_sched here because this
* We can't do a synchronize_rcu here because this
* function can be called in atomic context.
* Normally this will be called from the same CPU as cpu.
* If not it's up to the caller to protect this.

View File

@@ -1681,7 +1681,7 @@ void tracing_reset(struct trace_buffer *buf, int cpu)
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
synchronize_sched();
synchronize_rcu();
ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer);
@@ -1698,7 +1698,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
synchronize_sched();
synchronize_rcu();
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
@@ -2250,7 +2250,7 @@ void trace_buffered_event_disable(void)
preempt_enable();
/* Wait for all current users to finish */
synchronize_sched();
synchronize_rcu();
for_each_tracing_cpu(cpu) {
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
@@ -5398,7 +5398,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (tr->current_trace->reset)
tr->current_trace->reset(tr);
/* Current trace needs to be nop_trace before synchronize_sched */
/* Current trace needs to be nop_trace before synchronize_rcu */
tr->current_trace = &nop_trace;
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5412,7 +5412,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
* The update_max_tr is called from interrupts disabled
* so a synchronized_sched() is sufficient.
*/
synchronize_sched();
synchronize_rcu();
free_snapshot(tr);
}
#endif

View File

@@ -1614,7 +1614,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
/*
* The calls can still be using the old filters.
* Do a synchronize_sched() and to ensure all calls are
* Do a synchronize_rcu() and to ensure all calls are
* done with them before we free them.
*/
tracepoint_synchronize_unregister();
@@ -1845,7 +1845,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
if (filter) {
/*
* No event actually uses the system filter
* we can free it without synchronize_sched().
* we can free it without synchronize_rcu().
*/
__free_filter(system->filter);
system->filter = filter;

View File

@@ -333,7 +333,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
* event_call related objects, which will be accessed in
* the kprobe_trace_func/kretprobe_trace_func.
*/
synchronize_sched();
synchronize_rcu();
kfree(link); /* Ignored if link == NULL */
}

View File

@@ -92,7 +92,7 @@ static __init int release_early_probes(void)
while (early_probes) {
tmp = early_probes;
early_probes = tmp->next;
call_rcu_sched(tmp, rcu_free_old_probes);
call_rcu(tmp, rcu_free_old_probes);
}
return 0;
@@ -123,7 +123,7 @@ static inline void release_probes(struct tracepoint_func *old)
* cover both cases. So let us chain the SRCU and sched RCU
* callbacks to wait for both grace periods.
*/
call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
call_rcu(&tp_probes->rcu, rcu_free_old_probes);
}
}

View File

@@ -3396,7 +3396,7 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->mayday_timer);
/* sched-RCU protected to allow dereferences from get_work_pool() */
call_rcu_sched(&pool->rcu, rcu_free_pool);
call_rcu(&pool->rcu, rcu_free_pool);
}
/**
@@ -3503,14 +3503,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
call_rcu_sched(&pwq->rcu, rcu_free_pwq);
call_rcu(&pwq->rcu, rcu_free_pwq);
/*
* If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free.
*/
if (is_last)
call_rcu_sched(&wq->rcu, rcu_free_wq);
call_rcu(&wq->rcu, rcu_free_wq);
}
/**
@@ -4195,7 +4195,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
call_rcu_sched(&wq->rcu, rcu_free_wq);
call_rcu(&wq->rcu, rcu_free_wq);
} else {
/*
* We're the sole accessor of @wq at this point. Directly