Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull rcu/next + tools/memory-model changes from Paul E. McKenney: - RCU flavor consolidation cleanups and optmizations - Documentation updates - Miscellaneous fixes - SRCU updates - RCU-sync flavor consolidation - Torture-test updates - Linux-kernel memory-consistency-model updates, most notably the addition of plain C-language accesses Signed-off-by: Ingo Molnar <mingo@kernel.org>
Šī revīzija ir iekļauta:
@@ -101,7 +101,7 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
|
||||
*/
|
||||
static DEFINE_SPINLOCK(cgroup_file_kn_lock);
|
||||
|
||||
struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
|
||||
DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
|
||||
|
||||
#define cgroup_assert_mutex_or_rcu_locked() \
|
||||
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
|
||||
@@ -5666,7 +5666,6 @@ int __init cgroup_init(void)
|
||||
int ssid;
|
||||
|
||||
BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16);
|
||||
BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
|
||||
BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
|
||||
BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
|
||||
|
||||
|
@@ -46,7 +46,7 @@ static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
|
||||
static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
|
||||
#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
|
||||
|
||||
static struct percpu_rw_semaphore dup_mmap_sem;
|
||||
DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
|
||||
|
||||
/* Have a copy of original instruction */
|
||||
#define UPROBE_COPY_INSN 0
|
||||
@@ -2302,7 +2302,5 @@ void __init uprobes_init(void)
|
||||
for (i = 0; i < UPROBES_HASH_SZ; i++)
|
||||
mutex_init(&uprobes_mmap_mutex[i]);
|
||||
|
||||
BUG_ON(percpu_init_rwsem(&dup_mmap_sem));
|
||||
|
||||
BUG_ON(register_die_notifier(&uprobe_exception_nb));
|
||||
}
|
||||
|
@@ -975,7 +975,7 @@ static int __init lock_torture_init(void)
|
||||
goto unwind;
|
||||
}
|
||||
if (stutter > 0) {
|
||||
firsterr = torture_stutter_init(stutter);
|
||||
firsterr = torture_stutter_init(stutter, stutter);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
|
||||
return -ENOMEM;
|
||||
|
||||
/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
|
||||
rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
|
||||
rcu_sync_init(&sem->rss);
|
||||
__init_rwsem(&sem->rw_sem, name, rwsem_key);
|
||||
rcuwait_init(&sem->writer);
|
||||
sem->readers_block = 0;
|
||||
|
@@ -3083,6 +3083,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
|
||||
sizeof(*mod->tracepoints_ptrs),
|
||||
&mod->num_tracepoints);
|
||||
#endif
|
||||
#ifdef CONFIG_TREE_SRCU
|
||||
mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
|
||||
sizeof(*mod->srcu_struct_ptrs),
|
||||
&mod->num_srcu_structs);
|
||||
#endif
|
||||
#ifdef CONFIG_BPF_EVENTS
|
||||
mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
|
||||
sizeof(*mod->bpf_raw_events),
|
||||
|
@@ -446,6 +446,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t);
|
||||
enum rcutorture_type {
|
||||
RCU_FLAVOR,
|
||||
RCU_TASKS_FLAVOR,
|
||||
RCU_TRIVIAL_FLAVOR,
|
||||
SRCU_FLAVOR,
|
||||
INVALID_RCU_FLAVOR
|
||||
};
|
||||
@@ -479,6 +480,10 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
|
||||
long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TINY_SRCU
|
||||
|
||||
static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
|
||||
|
@@ -299,6 +299,7 @@ struct rcu_torture_ops {
|
||||
int irq_capable;
|
||||
int can_boost;
|
||||
int extendables;
|
||||
int slow_gps;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
@@ -667,9 +668,51 @@ static struct rcu_torture_ops tasks_ops = {
|
||||
.fqs = NULL,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.slow_gps = 1,
|
||||
.name = "tasks"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
|
||||
* This implementation does not necessarily work well with CPU hotplug.
|
||||
*/
|
||||
|
||||
static void synchronize_rcu_trivial(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
|
||||
{
|
||||
preempt_disable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static struct rcu_torture_ops trivial_ops = {
|
||||
.ttype = RCU_TRIVIAL_FLAVOR,
|
||||
.init = rcu_sync_torture_init,
|
||||
.readlock = rcu_torture_read_lock_trivial,
|
||||
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
|
||||
.readunlock = rcu_torture_read_unlock_trivial,
|
||||
.get_gp_seq = rcu_no_completed,
|
||||
.sync = synchronize_rcu_trivial,
|
||||
.exp_sync = synchronize_rcu_trivial,
|
||||
.fqs = NULL,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "trivial"
|
||||
};
|
||||
|
||||
static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
|
||||
{
|
||||
if (!cur_ops->gp_diff)
|
||||
@@ -1010,10 +1053,17 @@ rcu_torture_writer(void *arg)
|
||||
!rcu_gp_is_normal();
|
||||
}
|
||||
rcu_torture_writer_state = RTWS_STUTTER;
|
||||
if (stutter_wait("rcu_torture_writer"))
|
||||
if (stutter_wait("rcu_torture_writer") &&
|
||||
!READ_ONCE(rcu_fwd_cb_nodelay) &&
|
||||
!cur_ops->slow_gps &&
|
||||
!torture_must_stop())
|
||||
for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
|
||||
if (list_empty(&rcu_tortures[i].rtort_free))
|
||||
WARN_ON_ONCE(1);
|
||||
if (list_empty(&rcu_tortures[i].rtort_free) &&
|
||||
rcu_access_pointer(rcu_torture_current) !=
|
||||
&rcu_tortures[i]) {
|
||||
rcu_ftrace_dump(DUMP_ALL);
|
||||
WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
|
||||
}
|
||||
} while (!torture_must_stop());
|
||||
/* Reset expediting back to unexpedited. */
|
||||
if (expediting > 0)
|
||||
@@ -1358,8 +1408,9 @@ rcu_torture_stats_print(void)
|
||||
}
|
||||
|
||||
pr_alert("%s%s ", torture_type, TORTURE_FLAG);
|
||||
pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
|
||||
pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
|
||||
rcu_torture_current,
|
||||
rcu_torture_current ? "ver" : "VER",
|
||||
rcu_torture_current_version,
|
||||
list_empty(&rcu_torture_freelist),
|
||||
atomic_read(&n_rcu_torture_alloc),
|
||||
@@ -1661,6 +1712,17 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
}
|
||||
|
||||
// Give the scheduler a chance, even on nohz_full CPUs.
|
||||
static void rcu_torture_fwd_prog_cond_resched(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
|
||||
if (need_resched())
|
||||
schedule();
|
||||
} else {
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Free all callbacks on the rcu_fwd_cb_head list, either because the
|
||||
* test is over or because we hit an OOM event.
|
||||
@@ -1674,16 +1736,18 @@ static unsigned long rcu_torture_fwd_prog_cbfree(void)
|
||||
for (;;) {
|
||||
spin_lock_irqsave(&rcu_fwd_lock, flags);
|
||||
rfcp = rcu_fwd_cb_head;
|
||||
if (!rfcp)
|
||||
if (!rfcp) {
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
break;
|
||||
}
|
||||
rcu_fwd_cb_head = rfcp->rfc_next;
|
||||
if (!rcu_fwd_cb_head)
|
||||
rcu_fwd_cb_tail = &rcu_fwd_cb_head;
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
kfree(rfcp);
|
||||
freed++;
|
||||
rcu_torture_fwd_prog_cond_resched();
|
||||
}
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
return freed;
|
||||
}
|
||||
|
||||
@@ -1707,6 +1771,8 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
|
||||
}
|
||||
|
||||
/* Tight loop containing cond_resched(). */
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
|
||||
cur_ops->sync(); /* Later readers see above write. */
|
||||
if (selfpropcb) {
|
||||
WRITE_ONCE(fcs.stop, 0);
|
||||
cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
|
||||
@@ -1724,7 +1790,7 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
|
||||
udelay(10);
|
||||
cur_ops->readunlock(idx);
|
||||
if (!fwd_progress_need_resched || need_resched())
|
||||
cond_resched();
|
||||
rcu_torture_fwd_prog_cond_resched();
|
||||
}
|
||||
(*tested_tries)++;
|
||||
if (!time_before(jiffies, stopat) &&
|
||||
@@ -1745,6 +1811,8 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
|
||||
WARN_ON(READ_ONCE(fcs.stop) != 2);
|
||||
destroy_rcu_head_on_stack(&fcs.rh);
|
||||
}
|
||||
schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
|
||||
}
|
||||
|
||||
/* Carry out call_rcu() forward-progress testing. */
|
||||
@@ -1765,6 +1833,8 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
|
||||
if (READ_ONCE(rcu_fwd_emergency_stop))
|
||||
return; /* Get out of the way quickly, no GP wait! */
|
||||
if (!cur_ops->call)
|
||||
return; /* Can't do call_rcu() fwd prog without ->call. */
|
||||
|
||||
/* Loop continuously posting RCU callbacks. */
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
|
||||
@@ -1805,7 +1875,7 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
rfcp->rfc_gps = 0;
|
||||
}
|
||||
cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
|
||||
cond_resched();
|
||||
rcu_torture_fwd_prog_cond_resched();
|
||||
}
|
||||
stoppedat = jiffies;
|
||||
n_launders_cb_snap = READ_ONCE(n_launders_cb);
|
||||
@@ -1814,7 +1884,6 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
|
||||
(void)rcu_torture_fwd_prog_cbfree();
|
||||
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
|
||||
if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) {
|
||||
WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
|
||||
pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
|
||||
@@ -1825,6 +1894,8 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
n_max_gps, n_max_cbs, cver, gps);
|
||||
rcu_torture_fwd_cb_hist();
|
||||
}
|
||||
schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
|
||||
}
|
||||
|
||||
|
||||
@@ -2240,7 +2311,7 @@ rcu_torture_init(void)
|
||||
int firsterr = 0;
|
||||
static struct rcu_torture_ops *torture_ops[] = {
|
||||
&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
|
||||
&busted_srcud_ops, &tasks_ops,
|
||||
&busted_srcud_ops, &tasks_ops, &trivial_ops,
|
||||
};
|
||||
|
||||
if (!torture_init_begin(torture_type, verbose))
|
||||
@@ -2363,7 +2434,10 @@ rcu_torture_init(void)
|
||||
if (stutter < 0)
|
||||
stutter = 0;
|
||||
if (stutter) {
|
||||
firsterr = torture_stutter_init(stutter * HZ);
|
||||
int t;
|
||||
|
||||
t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
|
||||
firsterr = torture_stutter_init(stutter * HZ, t);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
|
@@ -831,8 +831,8 @@ static void srcu_leak_callback(struct rcu_head *rhp)
|
||||
* srcu_read_lock(), and srcu_read_unlock() that are all passed the same
|
||||
* srcu_struct structure.
|
||||
*/
|
||||
void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
|
||||
rcu_callback_t func, bool do_norm)
|
||||
static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
|
||||
rcu_callback_t func, bool do_norm)
|
||||
{
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
@@ -1310,3 +1310,68 @@ void __init srcu_init(void)
|
||||
queue_work(rcu_gp_wq, &ssp->work.work);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
/* Initialize any global-scope srcu_struct structures used by this module. */
|
||||
static int srcu_module_coming(struct module *mod)
|
||||
{
|
||||
int i;
|
||||
struct srcu_struct **sspp = mod->srcu_struct_ptrs;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < mod->num_srcu_structs; i++) {
|
||||
ret = init_srcu_struct(*(sspp++));
|
||||
if (WARN_ON_ONCE(ret))
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Clean up any global-scope srcu_struct structures used by this module. */
|
||||
static void srcu_module_going(struct module *mod)
|
||||
{
|
||||
int i;
|
||||
struct srcu_struct **sspp = mod->srcu_struct_ptrs;
|
||||
|
||||
for (i = 0; i < mod->num_srcu_structs; i++)
|
||||
cleanup_srcu_struct(*(sspp++));
|
||||
}
|
||||
|
||||
/* Handle one module, either coming or going. */
|
||||
static int srcu_module_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct module *mod = data;
|
||||
int ret = 0;
|
||||
|
||||
switch (val) {
|
||||
case MODULE_STATE_COMING:
|
||||
ret = srcu_module_coming(mod);
|
||||
break;
|
||||
case MODULE_STATE_GOING:
|
||||
srcu_module_going(mod);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct notifier_block srcu_module_nb = {
|
||||
.notifier_call = srcu_module_notify,
|
||||
.priority = 0,
|
||||
};
|
||||
|
||||
static __init int init_srcu_module_notifier(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_module_notifier(&srcu_module_nb);
|
||||
if (ret)
|
||||
pr_warn("Failed to register srcu module notifier\n");
|
||||
return ret;
|
||||
}
|
||||
late_initcall(init_srcu_module_notifier);
|
||||
|
||||
#endif /* #ifdef CONFIG_MODULES */
|
||||
|
@@ -10,65 +10,18 @@
|
||||
#include <linux/rcu_sync.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
#define __INIT_HELD(func) .held = func,
|
||||
#else
|
||||
#define __INIT_HELD(func)
|
||||
#endif
|
||||
|
||||
static const struct {
|
||||
void (*sync)(void);
|
||||
void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
|
||||
void (*wait)(void);
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
int (*held)(void);
|
||||
#endif
|
||||
} gp_ops[] = {
|
||||
[RCU_SYNC] = {
|
||||
.sync = synchronize_rcu,
|
||||
.call = call_rcu,
|
||||
.wait = rcu_barrier,
|
||||
__INIT_HELD(rcu_read_lock_held)
|
||||
},
|
||||
[RCU_SCHED_SYNC] = {
|
||||
.sync = synchronize_rcu,
|
||||
.call = call_rcu,
|
||||
.wait = rcu_barrier,
|
||||
__INIT_HELD(rcu_read_lock_sched_held)
|
||||
},
|
||||
[RCU_BH_SYNC] = {
|
||||
.sync = synchronize_rcu,
|
||||
.call = call_rcu,
|
||||
.wait = rcu_barrier,
|
||||
__INIT_HELD(rcu_read_lock_bh_held)
|
||||
},
|
||||
};
|
||||
|
||||
enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
|
||||
enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
|
||||
enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
|
||||
|
||||
#define rss_lock gp_wait.lock
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
|
||||
{
|
||||
RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
|
||||
"suspicious rcu_sync_is_idle() usage");
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* rcu_sync_init() - Initialize an rcu_sync structure
|
||||
* @rsp: Pointer to rcu_sync structure to be initialized
|
||||
* @type: Flavor of RCU with which to synchronize rcu_sync structure
|
||||
*/
|
||||
void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
|
||||
void rcu_sync_init(struct rcu_sync *rsp)
|
||||
{
|
||||
memset(rsp, 0, sizeof(*rsp));
|
||||
init_waitqueue_head(&rsp->gp_wait);
|
||||
rsp->gp_type = type;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -86,6 +39,70 @@ void rcu_sync_enter_start(struct rcu_sync *rsp)
|
||||
rsp->gp_state = GP_PASSED;
|
||||
}
|
||||
|
||||
|
||||
static void rcu_sync_func(struct rcu_head *rhp);
|
||||
|
||||
static void rcu_sync_call(struct rcu_sync *rsp)
|
||||
{
|
||||
call_rcu(&rsp->cb_head, rcu_sync_func);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_sync_func() - Callback function managing reader access to fastpath
|
||||
* @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
|
||||
*
|
||||
* This function is passed to call_rcu() function by rcu_sync_enter() and
|
||||
* rcu_sync_exit(), so that it is invoked after a grace period following the
|
||||
* that invocation of enter/exit.
|
||||
*
|
||||
* If it is called by rcu_sync_enter() it signals that all the readers were
|
||||
* switched onto slow path.
|
||||
*
|
||||
* If it is called by rcu_sync_exit() it takes action based on events that
|
||||
* have taken place in the meantime, so that closely spaced rcu_sync_enter()
|
||||
* and rcu_sync_exit() pairs need not wait for a grace period.
|
||||
*
|
||||
* If another rcu_sync_enter() is invoked before the grace period
|
||||
* ended, reset state to allow the next rcu_sync_exit() to let the
|
||||
* readers back onto their fastpaths (after a grace period). If both
|
||||
* another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
|
||||
* before the grace period ended, re-invoke call_rcu() on behalf of that
|
||||
* rcu_sync_exit(). Otherwise, set all state back to idle so that readers
|
||||
* can again use their fastpaths.
|
||||
*/
|
||||
static void rcu_sync_func(struct rcu_head *rhp)
|
||||
{
|
||||
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
|
||||
|
||||
spin_lock_irqsave(&rsp->rss_lock, flags);
|
||||
if (rsp->gp_count) {
|
||||
/*
|
||||
* We're at least a GP after the GP_IDLE->GP_ENTER transition.
|
||||
*/
|
||||
WRITE_ONCE(rsp->gp_state, GP_PASSED);
|
||||
wake_up_locked(&rsp->gp_wait);
|
||||
} else if (rsp->gp_state == GP_REPLAY) {
|
||||
/*
|
||||
* A new rcu_sync_exit() has happened; requeue the callback to
|
||||
* catch a later GP.
|
||||
*/
|
||||
WRITE_ONCE(rsp->gp_state, GP_EXIT);
|
||||
rcu_sync_call(rsp);
|
||||
} else {
|
||||
/*
|
||||
* We're at least a GP after the last rcu_sync_exit(); eveybody
|
||||
* will now have observed the write side critical section.
|
||||
* Let 'em rip!.
|
||||
*/
|
||||
WRITE_ONCE(rsp->gp_state, GP_IDLE);
|
||||
}
|
||||
spin_unlock_irqrestore(&rsp->rss_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_sync_enter() - Force readers onto slowpath
|
||||
* @rsp: Pointer to rcu_sync structure to use for synchronization
|
||||
@@ -103,84 +120,43 @@ void rcu_sync_enter_start(struct rcu_sync *rsp)
|
||||
*/
|
||||
void rcu_sync_enter(struct rcu_sync *rsp)
|
||||
{
|
||||
bool need_wait, need_sync;
|
||||
int gp_state;
|
||||
|
||||
spin_lock_irq(&rsp->rss_lock);
|
||||
need_wait = rsp->gp_count++;
|
||||
need_sync = rsp->gp_state == GP_IDLE;
|
||||
if (need_sync)
|
||||
rsp->gp_state = GP_PENDING;
|
||||
gp_state = rsp->gp_state;
|
||||
if (gp_state == GP_IDLE) {
|
||||
WRITE_ONCE(rsp->gp_state, GP_ENTER);
|
||||
WARN_ON_ONCE(rsp->gp_count);
|
||||
/*
|
||||
* Note that we could simply do rcu_sync_call(rsp) here and
|
||||
* avoid the "if (gp_state == GP_IDLE)" block below.
|
||||
*
|
||||
* However, synchronize_rcu() can be faster if rcu_expedited
|
||||
* or rcu_blocking_is_gp() is true.
|
||||
*
|
||||
* Another reason is that we can't wait for rcu callback if
|
||||
* we are called at early boot time but this shouldn't happen.
|
||||
*/
|
||||
}
|
||||
rsp->gp_count++;
|
||||
spin_unlock_irq(&rsp->rss_lock);
|
||||
|
||||
WARN_ON_ONCE(need_wait && need_sync);
|
||||
if (need_sync) {
|
||||
gp_ops[rsp->gp_type].sync();
|
||||
rsp->gp_state = GP_PASSED;
|
||||
wake_up_all(&rsp->gp_wait);
|
||||
} else if (need_wait) {
|
||||
wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
|
||||
} else {
|
||||
if (gp_state == GP_IDLE) {
|
||||
/*
|
||||
* Possible when there's a pending CB from a rcu_sync_exit().
|
||||
* Nobody has yet been allowed the 'fast' path and thus we can
|
||||
* avoid doing any sync(). The callback will get 'dropped'.
|
||||
* See the comment above, this simply does the "synchronous"
|
||||
* call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
|
||||
*/
|
||||
WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
|
||||
synchronize_rcu();
|
||||
rcu_sync_func(&rsp->cb_head);
|
||||
/* Not really needed, wait_event() would see GP_PASSED. */
|
||||
return;
|
||||
}
|
||||
|
||||
wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_sync_func() - Callback function managing reader access to fastpath
|
||||
* @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
|
||||
*
|
||||
* This function is passed to one of the call_rcu() functions by
|
||||
* rcu_sync_exit(), so that it is invoked after a grace period following the
|
||||
* that invocation of rcu_sync_exit(). It takes action based on events that
|
||||
* have taken place in the meantime, so that closely spaced rcu_sync_enter()
|
||||
* and rcu_sync_exit() pairs need not wait for a grace period.
|
||||
*
|
||||
* If another rcu_sync_enter() is invoked before the grace period
|
||||
* ended, reset state to allow the next rcu_sync_exit() to let the
|
||||
* readers back onto their fastpaths (after a grace period). If both
|
||||
* another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
|
||||
* before the grace period ended, re-invoke call_rcu() on behalf of that
|
||||
* rcu_sync_exit(). Otherwise, set all state back to idle so that readers
|
||||
* can again use their fastpaths.
|
||||
*/
|
||||
static void rcu_sync_func(struct rcu_head *rhp)
|
||||
{
|
||||
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
|
||||
WARN_ON_ONCE(rsp->cb_state == CB_IDLE);
|
||||
|
||||
spin_lock_irqsave(&rsp->rss_lock, flags);
|
||||
if (rsp->gp_count) {
|
||||
/*
|
||||
* A new rcu_sync_begin() has happened; drop the callback.
|
||||
*/
|
||||
rsp->cb_state = CB_IDLE;
|
||||
} else if (rsp->cb_state == CB_REPLAY) {
|
||||
/*
|
||||
* A new rcu_sync_exit() has happened; requeue the callback
|
||||
* to catch a later GP.
|
||||
*/
|
||||
rsp->cb_state = CB_PENDING;
|
||||
gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
|
||||
} else {
|
||||
/*
|
||||
* We're at least a GP after rcu_sync_exit(); eveybody will now
|
||||
* have observed the write side critical section. Let 'em rip!.
|
||||
*/
|
||||
rsp->cb_state = CB_IDLE;
|
||||
rsp->gp_state = GP_IDLE;
|
||||
}
|
||||
spin_unlock_irqrestore(&rsp->rss_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_sync_exit() - Allow readers back onto fast patch after grace period
|
||||
* rcu_sync_exit() - Allow readers back onto fast path after grace period
|
||||
* @rsp: Pointer to rcu_sync structure to use for synchronization
|
||||
*
|
||||
* This function is used by updaters who have completed, and can therefore
|
||||
@@ -191,13 +167,16 @@ static void rcu_sync_func(struct rcu_head *rhp)
|
||||
*/
|
||||
void rcu_sync_exit(struct rcu_sync *rsp)
|
||||
{
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
|
||||
|
||||
spin_lock_irq(&rsp->rss_lock);
|
||||
if (!--rsp->gp_count) {
|
||||
if (rsp->cb_state == CB_IDLE) {
|
||||
rsp->cb_state = CB_PENDING;
|
||||
gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
|
||||
} else if (rsp->cb_state == CB_PENDING) {
|
||||
rsp->cb_state = CB_REPLAY;
|
||||
if (rsp->gp_state == GP_PASSED) {
|
||||
WRITE_ONCE(rsp->gp_state, GP_EXIT);
|
||||
rcu_sync_call(rsp);
|
||||
} else if (rsp->gp_state == GP_EXIT) {
|
||||
WRITE_ONCE(rsp->gp_state, GP_REPLAY);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&rsp->rss_lock);
|
||||
@@ -209,18 +188,19 @@ void rcu_sync_exit(struct rcu_sync *rsp)
|
||||
*/
|
||||
void rcu_sync_dtor(struct rcu_sync *rsp)
|
||||
{
|
||||
int cb_state;
|
||||
int gp_state;
|
||||
|
||||
WARN_ON_ONCE(rsp->gp_count);
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
|
||||
|
||||
spin_lock_irq(&rsp->rss_lock);
|
||||
if (rsp->cb_state == CB_REPLAY)
|
||||
rsp->cb_state = CB_PENDING;
|
||||
cb_state = rsp->cb_state;
|
||||
if (rsp->gp_state == GP_REPLAY)
|
||||
WRITE_ONCE(rsp->gp_state, GP_EXIT);
|
||||
gp_state = rsp->gp_state;
|
||||
spin_unlock_irq(&rsp->rss_lock);
|
||||
|
||||
if (cb_state != CB_IDLE) {
|
||||
gp_ops[rsp->gp_type].wait();
|
||||
WARN_ON_ONCE(rsp->cb_state != CB_IDLE);
|
||||
if (gp_state != GP_IDLE) {
|
||||
rcu_barrier();
|
||||
WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
|
||||
}
|
||||
}
|
||||
|
@@ -51,6 +51,12 @@
|
||||
#include <linux/tick.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/oom.h>
|
||||
#include <linux/smpboot.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
#include "../time/tick-internal.h"
|
||||
|
||||
#include "tree.h"
|
||||
#include "rcu.h"
|
||||
@@ -92,6 +98,9 @@ struct rcu_state rcu_state = {
|
||||
/* Dump rcu_node combining tree at boot to verify correct setup. */
|
||||
static bool dump_tree;
|
||||
module_param(dump_tree, bool, 0444);
|
||||
/* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
|
||||
static bool use_softirq = 1;
|
||||
module_param(use_softirq, bool, 0444);
|
||||
/* Control rcu_node-tree auto-balancing at boot time. */
|
||||
static bool rcu_fanout_exact;
|
||||
module_param(rcu_fanout_exact, bool, 0444);
|
||||
@@ -138,7 +147,6 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
|
||||
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
|
||||
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
|
||||
static void invoke_rcu_core(void);
|
||||
static void invoke_rcu_callbacks(struct rcu_data *rdp);
|
||||
static void rcu_report_exp_rdp(struct rcu_data *rdp);
|
||||
static void sync_sched_exp_online_cleanup(int cpu);
|
||||
|
||||
@@ -368,19 +376,33 @@ static void __maybe_unused rcu_momentary_dyntick_idle(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
|
||||
* rcu_is_cpu_rrupt_from_idle - see if interrupted from idle
|
||||
*
|
||||
* If the current CPU is idle or running at a first-level (not nested)
|
||||
* If the current CPU is idle and running at a first-level (not nested)
|
||||
* interrupt from idle, return true. The caller must have at least
|
||||
* disabled preemption.
|
||||
*/
|
||||
static int rcu_is_cpu_rrupt_from_idle(void)
|
||||
{
|
||||
return __this_cpu_read(rcu_data.dynticks_nesting) <= 0 &&
|
||||
__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 1;
|
||||
/* Called only from within the scheduling-clock interrupt */
|
||||
lockdep_assert_in_irq();
|
||||
|
||||
/* Check for counter underflows */
|
||||
RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
|
||||
"RCU dynticks_nesting counter underflow!");
|
||||
RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
|
||||
"RCU dynticks_nmi_nesting counter underflow/zero!");
|
||||
|
||||
/* Are we at first interrupt nesting level? */
|
||||
if (__this_cpu_read(rcu_data.dynticks_nmi_nesting) != 1)
|
||||
return false;
|
||||
|
||||
/* Does CPU appear to be idle from an RCU standpoint? */
|
||||
return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
|
||||
}
|
||||
|
||||
#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */
|
||||
#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */
|
||||
#define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */
|
||||
static long blimit = DEFAULT_RCU_BLIMIT;
|
||||
#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
|
||||
static long qhimark = DEFAULT_RCU_QHIMARK;
|
||||
@@ -2113,7 +2135,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
|
||||
/* Reinstate batch limit if we have worked down the excess. */
|
||||
count = rcu_segcblist_n_cbs(&rdp->cblist);
|
||||
if (rdp->blimit == LONG_MAX && count <= qlowmark)
|
||||
if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
|
||||
rdp->blimit = blimit;
|
||||
|
||||
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
|
||||
@@ -2253,7 +2275,7 @@ void rcu_force_quiescent_state(void)
|
||||
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
|
||||
|
||||
/* Perform RCU core processing work for the current CPU. */
|
||||
static __latent_entropy void rcu_core(struct softirq_action *unused)
|
||||
static __latent_entropy void rcu_core(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
|
||||
@@ -2287,38 +2309,127 @@ static __latent_entropy void rcu_core(struct softirq_action *unused)
|
||||
rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
|
||||
|
||||
/* If there are callbacks ready, invoke them. */
|
||||
if (rcu_segcblist_ready_cbs(&rdp->cblist))
|
||||
invoke_rcu_callbacks(rdp);
|
||||
if (rcu_segcblist_ready_cbs(&rdp->cblist) &&
|
||||
likely(READ_ONCE(rcu_scheduler_fully_active)))
|
||||
rcu_do_batch(rdp);
|
||||
|
||||
/* Do any needed deferred wakeups of rcuo kthreads. */
|
||||
do_nocb_deferred_wakeup(rdp);
|
||||
trace_rcu_utilization(TPS("End RCU core"));
|
||||
}
|
||||
|
||||
/*
|
||||
* Schedule RCU callback invocation. If the running implementation of RCU
|
||||
* does not support RCU priority boosting, just do a direct call, otherwise
|
||||
* wake up the per-CPU kernel kthread. Note that because we are running
|
||||
* on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
|
||||
* cannot disappear out from under us.
|
||||
*/
|
||||
static void invoke_rcu_callbacks(struct rcu_data *rdp)
|
||||
static void rcu_core_si(struct softirq_action *h)
|
||||
{
|
||||
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
|
||||
return;
|
||||
if (likely(!rcu_state.boost)) {
|
||||
rcu_do_batch(rdp);
|
||||
return;
|
||||
}
|
||||
invoke_rcu_callbacks_kthread();
|
||||
rcu_core();
|
||||
}
|
||||
|
||||
static void rcu_wake_cond(struct task_struct *t, int status)
|
||||
{
|
||||
/*
|
||||
* If the thread is yielding, only wake it when this
|
||||
* is invoked from idle
|
||||
*/
|
||||
if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
|
||||
wake_up_process(t);
|
||||
}
|
||||
|
||||
static void invoke_rcu_core_kthread(void)
|
||||
{
|
||||
struct task_struct *t;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
|
||||
t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
|
||||
if (t != NULL && t != current)
|
||||
rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up this CPU's rcuc kthread to do RCU core processing.
|
||||
*/
|
||||
static void invoke_rcu_core(void)
|
||||
{
|
||||
if (cpu_online(smp_processor_id()))
|
||||
if (!cpu_online(smp_processor_id()))
|
||||
return;
|
||||
if (use_softirq)
|
||||
raise_softirq(RCU_SOFTIRQ);
|
||||
else
|
||||
invoke_rcu_core_kthread();
|
||||
}
|
||||
|
||||
static void rcu_cpu_kthread_park(unsigned int cpu)
|
||||
{
|
||||
per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
||||
}
|
||||
|
||||
static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
||||
{
|
||||
return __this_cpu_read(rcu_data.rcu_cpu_has_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-CPU kernel thread that invokes RCU callbacks. This replaces
|
||||
* the RCU softirq used in configurations of RCU that do not support RCU
|
||||
* priority boosting.
|
||||
*/
|
||||
static void rcu_cpu_kthread(unsigned int cpu)
|
||||
{
|
||||
unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
|
||||
char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
|
||||
int spincnt;
|
||||
|
||||
for (spincnt = 0; spincnt < 10; spincnt++) {
|
||||
trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
|
||||
local_bh_disable();
|
||||
*statusp = RCU_KTHREAD_RUNNING;
|
||||
local_irq_disable();
|
||||
work = *workp;
|
||||
*workp = 0;
|
||||
local_irq_enable();
|
||||
if (work)
|
||||
rcu_core();
|
||||
local_bh_enable();
|
||||
if (*workp == 0) {
|
||||
trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
|
||||
*statusp = RCU_KTHREAD_WAITING;
|
||||
return;
|
||||
}
|
||||
}
|
||||
*statusp = RCU_KTHREAD_YIELDING;
|
||||
trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
|
||||
schedule_timeout_interruptible(2);
|
||||
trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
|
||||
*statusp = RCU_KTHREAD_WAITING;
|
||||
}
|
||||
|
||||
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
||||
.store = &rcu_data.rcu_cpu_kthread_task,
|
||||
.thread_should_run = rcu_cpu_kthread_should_run,
|
||||
.thread_fn = rcu_cpu_kthread,
|
||||
.thread_comm = "rcuc/%u",
|
||||
.setup = rcu_cpu_kthread_setup,
|
||||
.park = rcu_cpu_kthread_park,
|
||||
};
|
||||
|
||||
/*
|
||||
* Spawn per-CPU RCU core processing kthreads.
|
||||
*/
|
||||
static int __init rcu_spawn_core_kthreads(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
|
||||
if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
|
||||
return 0;
|
||||
WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
|
||||
"%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(rcu_spawn_core_kthreads);
|
||||
|
||||
/*
|
||||
* Handle any core-RCU processing required by a call_rcu() invocation.
|
||||
*/
|
||||
@@ -2354,7 +2465,7 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
|
||||
rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
|
||||
} else {
|
||||
/* Give the grace period a kick. */
|
||||
rdp->blimit = LONG_MAX;
|
||||
rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
|
||||
if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
|
||||
rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
|
||||
rcu_force_quiescent_state();
|
||||
@@ -3355,7 +3466,8 @@ void __init rcu_init(void)
|
||||
rcu_init_one();
|
||||
if (dump_tree)
|
||||
rcu_dump_rcu_node_tree();
|
||||
open_softirq(RCU_SOFTIRQ, rcu_core);
|
||||
if (use_softirq)
|
||||
open_softirq(RCU_SOFTIRQ, rcu_core_si);
|
||||
|
||||
/*
|
||||
* We don't need protection against CPU-hotplug here because
|
||||
|
@@ -154,13 +154,15 @@ struct rcu_data {
|
||||
bool core_needs_qs; /* Core waits for quiesc state. */
|
||||
bool beenonline; /* CPU online at least once. */
|
||||
bool gpwrap; /* Possible ->gp_seq wrap. */
|
||||
bool deferred_qs; /* This CPU awaiting a deferred QS? */
|
||||
bool exp_deferred_qs; /* This CPU awaiting a deferred QS? */
|
||||
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
||||
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
||||
unsigned long ticks_this_gp; /* The number of scheduling-clock */
|
||||
/* ticks this CPU has handled */
|
||||
/* during and after the last grace */
|
||||
/* period it is aware of. */
|
||||
struct irq_work defer_qs_iw; /* Obtain later scheduler attention. */
|
||||
bool defer_qs_iw_pending; /* Scheduler attention pending? */
|
||||
|
||||
/* 2) batch handling */
|
||||
struct rcu_segcblist cblist; /* Segmented callback list, with */
|
||||
@@ -407,8 +409,8 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
|
||||
static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
|
||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
||||
static void invoke_rcu_callbacks_kthread(void);
|
||||
static bool rcu_is_callbacks_kthread(void);
|
||||
static void rcu_cpu_kthread_setup(unsigned int cpu);
|
||||
static void __init rcu_spawn_boost_kthreads(void);
|
||||
static void rcu_prepare_kthreads(int cpu);
|
||||
static void rcu_cleanup_after_idle(void);
|
||||
|
@@ -250,7 +250,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
|
||||
*/
|
||||
static void rcu_report_exp_rdp(struct rcu_data *rdp)
|
||||
{
|
||||
WRITE_ONCE(rdp->deferred_qs, false);
|
||||
WRITE_ONCE(rdp->exp_deferred_qs, false);
|
||||
rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
|
||||
}
|
||||
|
||||
@@ -259,8 +259,7 @@ static bool sync_exp_work_done(unsigned long s)
|
||||
{
|
||||
if (rcu_exp_gp_seq_done(s)) {
|
||||
trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
|
||||
/* Ensure test happens before caller kfree(). */
|
||||
smp_mb__before_atomic(); /* ^^^ */
|
||||
smp_mb(); /* Ensure test happens before caller kfree(). */
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -384,7 +383,12 @@ retry_ipi:
|
||||
mask_ofl_test |= mask;
|
||||
continue;
|
||||
}
|
||||
if (get_cpu() == cpu) {
|
||||
put_cpu();
|
||||
continue;
|
||||
}
|
||||
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
|
||||
put_cpu();
|
||||
if (!ret) {
|
||||
mask_ofl_ipi &= ~mask;
|
||||
continue;
|
||||
@@ -611,7 +615,7 @@ static void rcu_exp_handler(void *unused)
|
||||
rcu_dynticks_curr_cpu_in_eqs()) {
|
||||
rcu_report_exp_rdp(rdp);
|
||||
} else {
|
||||
rdp->deferred_qs = true;
|
||||
rdp->exp_deferred_qs = true;
|
||||
set_tsk_need_resched(t);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
@@ -633,7 +637,7 @@ static void rcu_exp_handler(void *unused)
|
||||
if (t->rcu_read_lock_nesting > 0) {
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
if (rnp->expmask & rdp->grpmask) {
|
||||
rdp->deferred_qs = true;
|
||||
rdp->exp_deferred_qs = true;
|
||||
t->rcu_read_unlock_special.b.exp_hint = true;
|
||||
}
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
@@ -656,7 +660,7 @@ static void rcu_exp_handler(void *unused)
|
||||
*
|
||||
* Otherwise, force a context switch after the CPU enables everything.
|
||||
*/
|
||||
rdp->deferred_qs = true;
|
||||
rdp->exp_deferred_qs = true;
|
||||
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
|
||||
WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
|
||||
rcu_preempt_deferred_qs(t);
|
||||
@@ -694,6 +698,16 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
|
||||
|
||||
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
/* Request an expedited quiescent state. */
|
||||
static void rcu_exp_need_qs(void)
|
||||
{
|
||||
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
|
||||
/* Store .exp before .rcu_urgent_qs. */
|
||||
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
|
||||
/* Invoked on each online non-idle CPU for expedited quiescent state. */
|
||||
static void rcu_exp_handler(void *unused)
|
||||
{
|
||||
@@ -709,25 +723,38 @@ static void rcu_exp_handler(void *unused)
|
||||
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
|
||||
return;
|
||||
}
|
||||
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
|
||||
/* Store .exp before .rcu_urgent_qs. */
|
||||
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
rcu_exp_need_qs();
|
||||
}
|
||||
|
||||
/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
|
||||
static void sync_sched_exp_online_cleanup(int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
int my_cpu;
|
||||
struct rcu_data *rdp;
|
||||
int ret;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
rnp = rdp->mynode;
|
||||
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
|
||||
my_cpu = get_cpu();
|
||||
/* Quiescent state either not needed or already requested, leave. */
|
||||
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
|
||||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
|
||||
put_cpu();
|
||||
return;
|
||||
}
|
||||
/* Quiescent state needed on current CPU, so set it up locally. */
|
||||
if (my_cpu == cpu) {
|
||||
local_irq_save(flags);
|
||||
rcu_exp_need_qs();
|
||||
local_irq_restore(flags);
|
||||
put_cpu();
|
||||
return;
|
||||
}
|
||||
/* Quiescent state needed on some other CPU, send IPI. */
|
||||
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
|
||||
put_cpu();
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
@@ -765,7 +792,6 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
|
||||
*/
|
||||
void synchronize_rcu_expedited(void)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_exp_work rew;
|
||||
struct rcu_node *rnp;
|
||||
unsigned long s;
|
||||
@@ -802,7 +828,6 @@ void synchronize_rcu_expedited(void)
|
||||
}
|
||||
|
||||
/* Wait for expedited grace period to complete. */
|
||||
rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
|
||||
rnp = rcu_get_root();
|
||||
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
|
||||
sync_exp_work_done(s));
|
||||
|
@@ -11,29 +11,7 @@
|
||||
* Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/oom.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/smpboot.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
#include "../time/tick-internal.h"
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
#include "../locking/rtmutex_common.h"
|
||||
#else /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
/*
|
||||
* Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
|
||||
* all uses are in dead code. Provide a definition to keep the compiler
|
||||
* happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
|
||||
* This probably needs to be excluded from -rt builds.
|
||||
*/
|
||||
#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
|
||||
#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
|
||||
|
||||
#endif /* #else #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
|
||||
@@ -94,6 +72,8 @@ static void __init rcu_bootup_announce_oddness(void)
|
||||
pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
|
||||
if (gp_cleanup_delay)
|
||||
pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
|
||||
if (!use_softirq)
|
||||
pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
|
||||
if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
|
||||
pr_info("\tRCU debug extended QS entry/exit.\n");
|
||||
rcupdate_announce_bootup_oddness();
|
||||
@@ -257,10 +237,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
* no need to check for a subsequent expedited GP. (Though we are
|
||||
* still in a quiescent state in any case.)
|
||||
*/
|
||||
if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
|
||||
if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
|
||||
rcu_report_exp_rdp(rdp);
|
||||
else
|
||||
WARN_ON_ONCE(rdp->deferred_qs);
|
||||
WARN_ON_ONCE(rdp->exp_deferred_qs);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -357,7 +337,7 @@ void rcu_note_context_switch(bool preempt)
|
||||
* means that we continue to block the current grace period.
|
||||
*/
|
||||
rcu_qs();
|
||||
if (rdp->deferred_qs)
|
||||
if (rdp->exp_deferred_qs)
|
||||
rcu_report_exp_rdp(rdp);
|
||||
trace_rcu_utilization(TPS("End context switch"));
|
||||
barrier(); /* Avoid RCU read-side critical sections leaking up. */
|
||||
@@ -471,14 +451,15 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||
*/
|
||||
special = t->rcu_read_unlock_special;
|
||||
rdp = this_cpu_ptr(&rcu_data);
|
||||
if (!special.s && !rdp->deferred_qs) {
|
||||
if (!special.s && !rdp->exp_deferred_qs) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
t->rcu_read_unlock_special.b.deferred_qs = false;
|
||||
if (special.b.need_qs) {
|
||||
rcu_qs();
|
||||
t->rcu_read_unlock_special.b.need_qs = false;
|
||||
if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) {
|
||||
if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
@@ -490,7 +471,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||
* tasks are handled when removing the task from the
|
||||
* blocked-tasks list below.
|
||||
*/
|
||||
if (rdp->deferred_qs) {
|
||||
if (rdp->exp_deferred_qs) {
|
||||
rcu_report_exp_rdp(rdp);
|
||||
if (!t->rcu_read_unlock_special.s) {
|
||||
local_irq_restore(flags);
|
||||
@@ -579,7 +560,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||
*/
|
||||
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
|
||||
{
|
||||
return (__this_cpu_read(rcu_data.deferred_qs) ||
|
||||
return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
|
||||
READ_ONCE(t->rcu_read_unlock_special.s)) &&
|
||||
t->rcu_read_lock_nesting <= 0;
|
||||
}
|
||||
@@ -606,6 +587,17 @@ static void rcu_preempt_deferred_qs(struct task_struct *t)
|
||||
t->rcu_read_lock_nesting += RCU_NEST_BIAS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Minimal handler to give the scheduler a chance to re-evaluate.
|
||||
*/
|
||||
static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
|
||||
rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
|
||||
rdp->defer_qs_iw_pending = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle special cases during rcu_read_unlock(), such as needing to
|
||||
* notify RCU core processing or task having blocked during the RCU
|
||||
@@ -625,16 +617,41 @@ static void rcu_read_unlock_special(struct task_struct *t)
|
||||
local_irq_save(flags);
|
||||
irqs_were_disabled = irqs_disabled_flags(flags);
|
||||
if (preempt_bh_were_disabled || irqs_were_disabled) {
|
||||
WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
|
||||
/* Need to defer quiescent state until everything is enabled. */
|
||||
if (irqs_were_disabled) {
|
||||
/* Enabling irqs does not reschedule, so... */
|
||||
bool exp;
|
||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
t->rcu_read_unlock_special.b.exp_hint = false;
|
||||
exp = (t->rcu_blocked_node && t->rcu_blocked_node->exp_tasks) ||
|
||||
(rdp->grpmask & rnp->expmask) ||
|
||||
tick_nohz_full_cpu(rdp->cpu);
|
||||
// Need to defer quiescent state until everything is enabled.
|
||||
if ((exp || in_irq()) && irqs_were_disabled && use_softirq &&
|
||||
(in_irq() || !t->rcu_read_unlock_special.b.deferred_qs)) {
|
||||
// Using softirq, safe to awaken, and we get
|
||||
// no help from enabling irqs, unlike bh/preempt.
|
||||
raise_softirq_irqoff(RCU_SOFTIRQ);
|
||||
} else if (exp && irqs_were_disabled && !use_softirq &&
|
||||
!t->rcu_read_unlock_special.b.deferred_qs) {
|
||||
// Safe to awaken and we get no help from enabling
|
||||
// irqs, unlike bh/preempt.
|
||||
invoke_rcu_core();
|
||||
} else {
|
||||
/* Enabling BH or preempt does reschedule, so... */
|
||||
// Enabling BH or preempt does reschedule, so...
|
||||
// Also if no expediting or NO_HZ_FULL, slow is OK.
|
||||
set_tsk_need_resched(current);
|
||||
set_preempt_need_resched();
|
||||
if (IS_ENABLED(CONFIG_IRQ_WORK) &&
|
||||
!rdp->defer_qs_iw_pending && exp) {
|
||||
// Get scheduler to re-evaluate and call hooks.
|
||||
// If !IRQ_WORK, FQS scan will eventually IPI.
|
||||
init_irq_work(&rdp->defer_qs_iw,
|
||||
rcu_preempt_deferred_qs_handler);
|
||||
rdp->defer_qs_iw_pending = true;
|
||||
irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
|
||||
}
|
||||
}
|
||||
t->rcu_read_unlock_special.b.deferred_qs = true;
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
@@ -760,7 +777,7 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
|
||||
i = 0;
|
||||
list_for_each(lhp, &rnp->blkd_tasks) {
|
||||
pr_cont(" %p", lhp);
|
||||
if (++i >= 10)
|
||||
if (++i >= ncheck)
|
||||
break;
|
||||
}
|
||||
pr_cont("\n");
|
||||
@@ -944,18 +961,21 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
|
||||
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
static void rcu_wake_cond(struct task_struct *t, int status)
|
||||
/*
|
||||
* If boosting, set rcuc kthreads to realtime priority.
|
||||
*/
|
||||
static void rcu_cpu_kthread_setup(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* If the thread is yielding, only wake it when this
|
||||
* is invoked from idle
|
||||
*/
|
||||
if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
|
||||
wake_up_process(t);
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
struct sched_param sp;
|
||||
|
||||
sp.sched_priority = kthread_prio;
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
/*
|
||||
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
|
||||
* or ->boost_tasks, advancing the pointer to the next task in the
|
||||
@@ -1090,23 +1110,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up the per-CPU kthread to invoke RCU callbacks.
|
||||
*/
|
||||
static void invoke_rcu_callbacks_kthread(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
|
||||
if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL &&
|
||||
current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) {
|
||||
rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task),
|
||||
__this_cpu_read(rcu_data.rcu_cpu_kthread_status));
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Is the current CPU running the RCU-callbacks kthread?
|
||||
* Caller must have preemption disabled.
|
||||
@@ -1160,59 +1163,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcu_cpu_kthread_setup(unsigned int cpu)
|
||||
{
|
||||
struct sched_param sp;
|
||||
|
||||
sp.sched_priority = kthread_prio;
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
||||
}
|
||||
|
||||
static void rcu_cpu_kthread_park(unsigned int cpu)
|
||||
{
|
||||
per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
||||
}
|
||||
|
||||
static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
||||
{
|
||||
return __this_cpu_read(rcu_data.rcu_cpu_has_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-CPU kernel thread that invokes RCU callbacks. This replaces
|
||||
* the RCU softirq used in configurations of RCU that do not support RCU
|
||||
* priority boosting.
|
||||
*/
|
||||
static void rcu_cpu_kthread(unsigned int cpu)
|
||||
{
|
||||
unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
|
||||
char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
|
||||
int spincnt;
|
||||
|
||||
for (spincnt = 0; spincnt < 10; spincnt++) {
|
||||
trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
|
||||
local_bh_disable();
|
||||
*statusp = RCU_KTHREAD_RUNNING;
|
||||
local_irq_disable();
|
||||
work = *workp;
|
||||
*workp = 0;
|
||||
local_irq_enable();
|
||||
if (work)
|
||||
rcu_do_batch(this_cpu_ptr(&rcu_data));
|
||||
local_bh_enable();
|
||||
if (*workp == 0) {
|
||||
trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
|
||||
*statusp = RCU_KTHREAD_WAITING;
|
||||
return;
|
||||
}
|
||||
}
|
||||
*statusp = RCU_KTHREAD_YIELDING;
|
||||
trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
|
||||
schedule_timeout_interruptible(2);
|
||||
trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
|
||||
*statusp = RCU_KTHREAD_WAITING;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
|
||||
* served by the rcu_node in question. The CPU hotplug lock is still
|
||||
@@ -1243,27 +1193,13 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
||||
free_cpumask_var(cm);
|
||||
}
|
||||
|
||||
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
||||
.store = &rcu_data.rcu_cpu_kthread_task,
|
||||
.thread_should_run = rcu_cpu_kthread_should_run,
|
||||
.thread_fn = rcu_cpu_kthread,
|
||||
.thread_comm = "rcuc/%u",
|
||||
.setup = rcu_cpu_kthread_setup,
|
||||
.park = rcu_cpu_kthread_park,
|
||||
};
|
||||
|
||||
/*
|
||||
* Spawn boost kthreads -- called as soon as the scheduler is running.
|
||||
*/
|
||||
static void __init rcu_spawn_boost_kthreads(void)
|
||||
{
|
||||
struct rcu_node *rnp;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
|
||||
if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
|
||||
return;
|
||||
rcu_for_each_leaf_node(rnp)
|
||||
(void)rcu_spawn_one_boost_kthread(rnp);
|
||||
}
|
||||
@@ -1286,11 +1222,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
}
|
||||
|
||||
static void invoke_rcu_callbacks_kthread(void)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
static bool rcu_is_callbacks_kthread(void)
|
||||
{
|
||||
return false;
|
||||
|
@@ -630,7 +630,9 @@ static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
|
||||
time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
|
||||
time_before(j, rcu_state.gp_activity + gpssdelay) ||
|
||||
atomic_xchg(&warned, 1)) {
|
||||
raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
|
||||
if (rnp_root != rnp)
|
||||
/* irqs remain disabled. */
|
||||
raw_spin_unlock_rcu_node(rnp_root);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
|
@@ -423,6 +423,19 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
|
||||
do { } while (0)
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
|
||||
/* Get rcutorture access to sched_setaffinity(). */
|
||||
long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sched_setaffinity(pid, in_mask);
|
||||
WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RCU_STALL_COMMON
|
||||
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
|
||||
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
|
||||
|
@@ -570,6 +570,7 @@ static void torture_shutdown_cleanup(void)
|
||||
static struct task_struct *stutter_task;
|
||||
static int stutter_pause_test;
|
||||
static int stutter;
|
||||
static int stutter_gap;
|
||||
|
||||
/*
|
||||
* Block until the stutter interval ends. This must be called periodically
|
||||
@@ -578,10 +579,12 @@ static int stutter;
|
||||
bool stutter_wait(const char *title)
|
||||
{
|
||||
int spt;
|
||||
bool ret = false;
|
||||
|
||||
cond_resched_tasks_rcu_qs();
|
||||
spt = READ_ONCE(stutter_pause_test);
|
||||
for (; spt; spt = READ_ONCE(stutter_pause_test)) {
|
||||
ret = true;
|
||||
if (spt == 1) {
|
||||
schedule_timeout_interruptible(1);
|
||||
} else if (spt == 2) {
|
||||
@@ -592,7 +595,7 @@ bool stutter_wait(const char *title)
|
||||
}
|
||||
torture_shutdown_absorb(title);
|
||||
}
|
||||
return !!spt;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stutter_wait);
|
||||
|
||||
@@ -602,17 +605,24 @@ EXPORT_SYMBOL_GPL(stutter_wait);
|
||||
*/
|
||||
static int torture_stutter(void *arg)
|
||||
{
|
||||
int wtime;
|
||||
|
||||
VERBOSE_TOROUT_STRING("torture_stutter task started");
|
||||
do {
|
||||
if (!torture_must_stop() && stutter > 1) {
|
||||
WRITE_ONCE(stutter_pause_test, 1);
|
||||
schedule_timeout_interruptible(stutter - 1);
|
||||
wtime = stutter;
|
||||
if (stutter > HZ + 1) {
|
||||
WRITE_ONCE(stutter_pause_test, 1);
|
||||
wtime = stutter - HZ - 1;
|
||||
schedule_timeout_interruptible(wtime);
|
||||
wtime = HZ + 1;
|
||||
}
|
||||
WRITE_ONCE(stutter_pause_test, 2);
|
||||
schedule_timeout_interruptible(1);
|
||||
schedule_timeout_interruptible(wtime);
|
||||
}
|
||||
WRITE_ONCE(stutter_pause_test, 0);
|
||||
if (!torture_must_stop())
|
||||
schedule_timeout_interruptible(stutter);
|
||||
schedule_timeout_interruptible(stutter_gap);
|
||||
torture_shutdown_absorb("torture_stutter");
|
||||
} while (!torture_must_stop());
|
||||
torture_kthread_stopping("torture_stutter");
|
||||
@@ -622,9 +632,10 @@ static int torture_stutter(void *arg)
|
||||
/*
|
||||
* Initialize and kick off the torture_stutter kthread.
|
||||
*/
|
||||
int torture_stutter_init(const int s)
|
||||
int torture_stutter_init(const int s, const int sgap)
|
||||
{
|
||||
stutter = s;
|
||||
stutter_gap = sgap;
|
||||
return torture_create_kthread(torture_stutter, NULL, stutter_task);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(torture_stutter_init);
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user