Merge branches 'consolidate.2019.05.28a', 'doc.2019.05.28a', 'fixes.2019.06.13a', 'srcu.2019.05.28a', 'sync.2019.05.28a' and 'torture.2019.05.28a' into HEAD
consolidate.2019.05.28a: RCU flavor consolidation cleanups and optmizations. doc.2019.05.28a: Documentation updates. fixes.2019.06.13a: Miscellaneous fixes. srcu.2019.05.28a: SRCU updates. sync.2019.05.28a: RCU-sync flavor consolidation. torture.2019.05.28a: Torture-test updates.
Esse commit está contido em:

@@ -446,6 +446,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t);
|
||||
enum rcutorture_type {
|
||||
RCU_FLAVOR,
|
||||
RCU_TASKS_FLAVOR,
|
||||
RCU_TRIVIAL_FLAVOR,
|
||||
SRCU_FLAVOR,
|
||||
INVALID_RCU_FLAVOR
|
||||
};
|
||||
@@ -479,6 +480,10 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
|
||||
long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TINY_SRCU
|
||||
|
||||
static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
|
||||
|
@@ -299,6 +299,7 @@ struct rcu_torture_ops {
|
||||
int irq_capable;
|
||||
int can_boost;
|
||||
int extendables;
|
||||
int slow_gps;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
@@ -667,9 +668,51 @@ static struct rcu_torture_ops tasks_ops = {
|
||||
.fqs = NULL,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.slow_gps = 1,
|
||||
.name = "tasks"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
|
||||
* This implementation does not necessarily work well with CPU hotplug.
|
||||
*/
|
||||
|
||||
static void synchronize_rcu_trivial(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
|
||||
{
|
||||
preempt_disable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static struct rcu_torture_ops trivial_ops = {
|
||||
.ttype = RCU_TRIVIAL_FLAVOR,
|
||||
.init = rcu_sync_torture_init,
|
||||
.readlock = rcu_torture_read_lock_trivial,
|
||||
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
|
||||
.readunlock = rcu_torture_read_unlock_trivial,
|
||||
.get_gp_seq = rcu_no_completed,
|
||||
.sync = synchronize_rcu_trivial,
|
||||
.exp_sync = synchronize_rcu_trivial,
|
||||
.fqs = NULL,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "trivial"
|
||||
};
|
||||
|
||||
static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
|
||||
{
|
||||
if (!cur_ops->gp_diff)
|
||||
@@ -1010,10 +1053,17 @@ rcu_torture_writer(void *arg)
|
||||
!rcu_gp_is_normal();
|
||||
}
|
||||
rcu_torture_writer_state = RTWS_STUTTER;
|
||||
if (stutter_wait("rcu_torture_writer"))
|
||||
if (stutter_wait("rcu_torture_writer") &&
|
||||
!READ_ONCE(rcu_fwd_cb_nodelay) &&
|
||||
!cur_ops->slow_gps &&
|
||||
!torture_must_stop())
|
||||
for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
|
||||
if (list_empty(&rcu_tortures[i].rtort_free))
|
||||
WARN_ON_ONCE(1);
|
||||
if (list_empty(&rcu_tortures[i].rtort_free) &&
|
||||
rcu_access_pointer(rcu_torture_current) !=
|
||||
&rcu_tortures[i]) {
|
||||
rcu_ftrace_dump(DUMP_ALL);
|
||||
WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
|
||||
}
|
||||
} while (!torture_must_stop());
|
||||
/* Reset expediting back to unexpedited. */
|
||||
if (expediting > 0)
|
||||
@@ -1358,8 +1408,9 @@ rcu_torture_stats_print(void)
|
||||
}
|
||||
|
||||
pr_alert("%s%s ", torture_type, TORTURE_FLAG);
|
||||
pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
|
||||
pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
|
||||
rcu_torture_current,
|
||||
rcu_torture_current ? "ver" : "VER",
|
||||
rcu_torture_current_version,
|
||||
list_empty(&rcu_torture_freelist),
|
||||
atomic_read(&n_rcu_torture_alloc),
|
||||
@@ -1661,6 +1712,17 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
}
|
||||
|
||||
// Give the scheduler a chance, even on nohz_full CPUs.
|
||||
static void rcu_torture_fwd_prog_cond_resched(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
|
||||
if (need_resched())
|
||||
schedule();
|
||||
} else {
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Free all callbacks on the rcu_fwd_cb_head list, either because the
|
||||
* test is over or because we hit an OOM event.
|
||||
@@ -1674,16 +1736,18 @@ static unsigned long rcu_torture_fwd_prog_cbfree(void)
|
||||
for (;;) {
|
||||
spin_lock_irqsave(&rcu_fwd_lock, flags);
|
||||
rfcp = rcu_fwd_cb_head;
|
||||
if (!rfcp)
|
||||
if (!rfcp) {
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
break;
|
||||
}
|
||||
rcu_fwd_cb_head = rfcp->rfc_next;
|
||||
if (!rcu_fwd_cb_head)
|
||||
rcu_fwd_cb_tail = &rcu_fwd_cb_head;
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
kfree(rfcp);
|
||||
freed++;
|
||||
rcu_torture_fwd_prog_cond_resched();
|
||||
}
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
return freed;
|
||||
}
|
||||
|
||||
@@ -1707,6 +1771,8 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
|
||||
}
|
||||
|
||||
/* Tight loop containing cond_resched(). */
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
|
||||
cur_ops->sync(); /* Later readers see above write. */
|
||||
if (selfpropcb) {
|
||||
WRITE_ONCE(fcs.stop, 0);
|
||||
cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
|
||||
@@ -1724,7 +1790,7 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
|
||||
udelay(10);
|
||||
cur_ops->readunlock(idx);
|
||||
if (!fwd_progress_need_resched || need_resched())
|
||||
cond_resched();
|
||||
rcu_torture_fwd_prog_cond_resched();
|
||||
}
|
||||
(*tested_tries)++;
|
||||
if (!time_before(jiffies, stopat) &&
|
||||
@@ -1745,6 +1811,8 @@ static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
|
||||
WARN_ON(READ_ONCE(fcs.stop) != 2);
|
||||
destroy_rcu_head_on_stack(&fcs.rh);
|
||||
}
|
||||
schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
|
||||
}
|
||||
|
||||
/* Carry out call_rcu() forward-progress testing. */
|
||||
@@ -1765,6 +1833,8 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
|
||||
if (READ_ONCE(rcu_fwd_emergency_stop))
|
||||
return; /* Get out of the way quickly, no GP wait! */
|
||||
if (!cur_ops->call)
|
||||
return; /* Can't do call_rcu() fwd prog without ->call. */
|
||||
|
||||
/* Loop continuously posting RCU callbacks. */
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, true);
|
||||
@@ -1805,7 +1875,7 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
rfcp->rfc_gps = 0;
|
||||
}
|
||||
cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
|
||||
cond_resched();
|
||||
rcu_torture_fwd_prog_cond_resched();
|
||||
}
|
||||
stoppedat = jiffies;
|
||||
n_launders_cb_snap = READ_ONCE(n_launders_cb);
|
||||
@@ -1814,7 +1884,6 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
|
||||
(void)rcu_torture_fwd_prog_cbfree();
|
||||
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
|
||||
if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) {
|
||||
WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
|
||||
pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
|
||||
@@ -1825,6 +1894,8 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
n_max_gps, n_max_cbs, cver, gps);
|
||||
rcu_torture_fwd_cb_hist();
|
||||
}
|
||||
schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
|
||||
WRITE_ONCE(rcu_fwd_cb_nodelay, false);
|
||||
}
|
||||
|
||||
|
||||
@@ -2240,7 +2311,7 @@ rcu_torture_init(void)
|
||||
int firsterr = 0;
|
||||
static struct rcu_torture_ops *torture_ops[] = {
|
||||
&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
|
||||
&busted_srcud_ops, &tasks_ops,
|
||||
&busted_srcud_ops, &tasks_ops, &trivial_ops,
|
||||
};
|
||||
|
||||
if (!torture_init_begin(torture_type, verbose))
|
||||
@@ -2363,7 +2434,10 @@ rcu_torture_init(void)
|
||||
if (stutter < 0)
|
||||
stutter = 0;
|
||||
if (stutter) {
|
||||
firsterr = torture_stutter_init(stutter * HZ);
|
||||
int t;
|
||||
|
||||
t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
|
||||
firsterr = torture_stutter_init(stutter * HZ, t);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
|
@@ -831,8 +831,8 @@ static void srcu_leak_callback(struct rcu_head *rhp)
|
||||
* srcu_read_lock(), and srcu_read_unlock() that are all passed the same
|
||||
* srcu_struct structure.
|
||||
*/
|
||||
void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
|
||||
rcu_callback_t func, bool do_norm)
|
||||
static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
|
||||
rcu_callback_t func, bool do_norm)
|
||||
{
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
@@ -1310,3 +1310,68 @@ void __init srcu_init(void)
|
||||
queue_work(rcu_gp_wq, &ssp->work.work);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
/* Initialize any global-scope srcu_struct structures used by this module. */
|
||||
static int srcu_module_coming(struct module *mod)
|
||||
{
|
||||
int i;
|
||||
struct srcu_struct **sspp = mod->srcu_struct_ptrs;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < mod->num_srcu_structs; i++) {
|
||||
ret = init_srcu_struct(*(sspp++));
|
||||
if (WARN_ON_ONCE(ret))
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Clean up any global-scope srcu_struct structures used by this module. */
|
||||
static void srcu_module_going(struct module *mod)
|
||||
{
|
||||
int i;
|
||||
struct srcu_struct **sspp = mod->srcu_struct_ptrs;
|
||||
|
||||
for (i = 0; i < mod->num_srcu_structs; i++)
|
||||
cleanup_srcu_struct(*(sspp++));
|
||||
}
|
||||
|
||||
/* Handle one module, either coming or going. */
|
||||
static int srcu_module_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct module *mod = data;
|
||||
int ret = 0;
|
||||
|
||||
switch (val) {
|
||||
case MODULE_STATE_COMING:
|
||||
ret = srcu_module_coming(mod);
|
||||
break;
|
||||
case MODULE_STATE_GOING:
|
||||
srcu_module_going(mod);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct notifier_block srcu_module_nb = {
|
||||
.notifier_call = srcu_module_notify,
|
||||
.priority = 0,
|
||||
};
|
||||
|
||||
static __init int init_srcu_module_notifier(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_module_notifier(&srcu_module_nb);
|
||||
if (ret)
|
||||
pr_warn("Failed to register srcu module notifier\n");
|
||||
return ret;
|
||||
}
|
||||
late_initcall(init_srcu_module_notifier);
|
||||
|
||||
#endif /* #ifdef CONFIG_MODULES */
|
||||
|
@@ -10,65 +10,18 @@
|
||||
#include <linux/rcu_sync.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
#define __INIT_HELD(func) .held = func,
|
||||
#else
|
||||
#define __INIT_HELD(func)
|
||||
#endif
|
||||
|
||||
static const struct {
|
||||
void (*sync)(void);
|
||||
void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
|
||||
void (*wait)(void);
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
int (*held)(void);
|
||||
#endif
|
||||
} gp_ops[] = {
|
||||
[RCU_SYNC] = {
|
||||
.sync = synchronize_rcu,
|
||||
.call = call_rcu,
|
||||
.wait = rcu_barrier,
|
||||
__INIT_HELD(rcu_read_lock_held)
|
||||
},
|
||||
[RCU_SCHED_SYNC] = {
|
||||
.sync = synchronize_rcu,
|
||||
.call = call_rcu,
|
||||
.wait = rcu_barrier,
|
||||
__INIT_HELD(rcu_read_lock_sched_held)
|
||||
},
|
||||
[RCU_BH_SYNC] = {
|
||||
.sync = synchronize_rcu,
|
||||
.call = call_rcu,
|
||||
.wait = rcu_barrier,
|
||||
__INIT_HELD(rcu_read_lock_bh_held)
|
||||
},
|
||||
};
|
||||
|
||||
enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
|
||||
enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
|
||||
enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
|
||||
|
||||
#define rss_lock gp_wait.lock
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
|
||||
{
|
||||
RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
|
||||
"suspicious rcu_sync_is_idle() usage");
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* rcu_sync_init() - Initialize an rcu_sync structure
|
||||
* @rsp: Pointer to rcu_sync structure to be initialized
|
||||
* @type: Flavor of RCU with which to synchronize rcu_sync structure
|
||||
*/
|
||||
void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
|
||||
void rcu_sync_init(struct rcu_sync *rsp)
|
||||
{
|
||||
memset(rsp, 0, sizeof(*rsp));
|
||||
init_waitqueue_head(&rsp->gp_wait);
|
||||
rsp->gp_type = type;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -86,6 +39,70 @@ void rcu_sync_enter_start(struct rcu_sync *rsp)
|
||||
rsp->gp_state = GP_PASSED;
|
||||
}
|
||||
|
||||
|
||||
static void rcu_sync_func(struct rcu_head *rhp);
|
||||
|
||||
static void rcu_sync_call(struct rcu_sync *rsp)
|
||||
{
|
||||
call_rcu(&rsp->cb_head, rcu_sync_func);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_sync_func() - Callback function managing reader access to fastpath
|
||||
* @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
|
||||
*
|
||||
* This function is passed to call_rcu() function by rcu_sync_enter() and
|
||||
* rcu_sync_exit(), so that it is invoked after a grace period following the
|
||||
* that invocation of enter/exit.
|
||||
*
|
||||
* If it is called by rcu_sync_enter() it signals that all the readers were
|
||||
* switched onto slow path.
|
||||
*
|
||||
* If it is called by rcu_sync_exit() it takes action based on events that
|
||||
* have taken place in the meantime, so that closely spaced rcu_sync_enter()
|
||||
* and rcu_sync_exit() pairs need not wait for a grace period.
|
||||
*
|
||||
* If another rcu_sync_enter() is invoked before the grace period
|
||||
* ended, reset state to allow the next rcu_sync_exit() to let the
|
||||
* readers back onto their fastpaths (after a grace period). If both
|
||||
* another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
|
||||
* before the grace period ended, re-invoke call_rcu() on behalf of that
|
||||
* rcu_sync_exit(). Otherwise, set all state back to idle so that readers
|
||||
* can again use their fastpaths.
|
||||
*/
|
||||
static void rcu_sync_func(struct rcu_head *rhp)
|
||||
{
|
||||
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
|
||||
|
||||
spin_lock_irqsave(&rsp->rss_lock, flags);
|
||||
if (rsp->gp_count) {
|
||||
/*
|
||||
* We're at least a GP after the GP_IDLE->GP_ENTER transition.
|
||||
*/
|
||||
WRITE_ONCE(rsp->gp_state, GP_PASSED);
|
||||
wake_up_locked(&rsp->gp_wait);
|
||||
} else if (rsp->gp_state == GP_REPLAY) {
|
||||
/*
|
||||
* A new rcu_sync_exit() has happened; requeue the callback to
|
||||
* catch a later GP.
|
||||
*/
|
||||
WRITE_ONCE(rsp->gp_state, GP_EXIT);
|
||||
rcu_sync_call(rsp);
|
||||
} else {
|
||||
/*
|
||||
* We're at least a GP after the last rcu_sync_exit(); eveybody
|
||||
* will now have observed the write side critical section.
|
||||
* Let 'em rip!.
|
||||
*/
|
||||
WRITE_ONCE(rsp->gp_state, GP_IDLE);
|
||||
}
|
||||
spin_unlock_irqrestore(&rsp->rss_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_sync_enter() - Force readers onto slowpath
|
||||
* @rsp: Pointer to rcu_sync structure to use for synchronization
|
||||
@@ -103,84 +120,43 @@ void rcu_sync_enter_start(struct rcu_sync *rsp)
|
||||
*/
|
||||
void rcu_sync_enter(struct rcu_sync *rsp)
|
||||
{
|
||||
bool need_wait, need_sync;
|
||||
int gp_state;
|
||||
|
||||
spin_lock_irq(&rsp->rss_lock);
|
||||
need_wait = rsp->gp_count++;
|
||||
need_sync = rsp->gp_state == GP_IDLE;
|
||||
if (need_sync)
|
||||
rsp->gp_state = GP_PENDING;
|
||||
gp_state = rsp->gp_state;
|
||||
if (gp_state == GP_IDLE) {
|
||||
WRITE_ONCE(rsp->gp_state, GP_ENTER);
|
||||
WARN_ON_ONCE(rsp->gp_count);
|
||||
/*
|
||||
* Note that we could simply do rcu_sync_call(rsp) here and
|
||||
* avoid the "if (gp_state == GP_IDLE)" block below.
|
||||
*
|
||||
* However, synchronize_rcu() can be faster if rcu_expedited
|
||||
* or rcu_blocking_is_gp() is true.
|
||||
*
|
||||
* Another reason is that we can't wait for rcu callback if
|
||||
* we are called at early boot time but this shouldn't happen.
|
||||
*/
|
||||
}
|
||||
rsp->gp_count++;
|
||||
spin_unlock_irq(&rsp->rss_lock);
|
||||
|
||||
WARN_ON_ONCE(need_wait && need_sync);
|
||||
if (need_sync) {
|
||||
gp_ops[rsp->gp_type].sync();
|
||||
rsp->gp_state = GP_PASSED;
|
||||
wake_up_all(&rsp->gp_wait);
|
||||
} else if (need_wait) {
|
||||
wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
|
||||
} else {
|
||||
if (gp_state == GP_IDLE) {
|
||||
/*
|
||||
* Possible when there's a pending CB from a rcu_sync_exit().
|
||||
* Nobody has yet been allowed the 'fast' path and thus we can
|
||||
* avoid doing any sync(). The callback will get 'dropped'.
|
||||
* See the comment above, this simply does the "synchronous"
|
||||
* call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
|
||||
*/
|
||||
WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
|
||||
synchronize_rcu();
|
||||
rcu_sync_func(&rsp->cb_head);
|
||||
/* Not really needed, wait_event() would see GP_PASSED. */
|
||||
return;
|
||||
}
|
||||
|
||||
wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_sync_func() - Callback function managing reader access to fastpath
|
||||
* @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
|
||||
*
|
||||
* This function is passed to one of the call_rcu() functions by
|
||||
* rcu_sync_exit(), so that it is invoked after a grace period following the
|
||||
* that invocation of rcu_sync_exit(). It takes action based on events that
|
||||
* have taken place in the meantime, so that closely spaced rcu_sync_enter()
|
||||
* and rcu_sync_exit() pairs need not wait for a grace period.
|
||||
*
|
||||
* If another rcu_sync_enter() is invoked before the grace period
|
||||
* ended, reset state to allow the next rcu_sync_exit() to let the
|
||||
* readers back onto their fastpaths (after a grace period). If both
|
||||
* another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
|
||||
* before the grace period ended, re-invoke call_rcu() on behalf of that
|
||||
* rcu_sync_exit(). Otherwise, set all state back to idle so that readers
|
||||
* can again use their fastpaths.
|
||||
*/
|
||||
static void rcu_sync_func(struct rcu_head *rhp)
|
||||
{
|
||||
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
|
||||
WARN_ON_ONCE(rsp->cb_state == CB_IDLE);
|
||||
|
||||
spin_lock_irqsave(&rsp->rss_lock, flags);
|
||||
if (rsp->gp_count) {
|
||||
/*
|
||||
* A new rcu_sync_begin() has happened; drop the callback.
|
||||
*/
|
||||
rsp->cb_state = CB_IDLE;
|
||||
} else if (rsp->cb_state == CB_REPLAY) {
|
||||
/*
|
||||
* A new rcu_sync_exit() has happened; requeue the callback
|
||||
* to catch a later GP.
|
||||
*/
|
||||
rsp->cb_state = CB_PENDING;
|
||||
gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
|
||||
} else {
|
||||
/*
|
||||
* We're at least a GP after rcu_sync_exit(); eveybody will now
|
||||
* have observed the write side critical section. Let 'em rip!.
|
||||
*/
|
||||
rsp->cb_state = CB_IDLE;
|
||||
rsp->gp_state = GP_IDLE;
|
||||
}
|
||||
spin_unlock_irqrestore(&rsp->rss_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_sync_exit() - Allow readers back onto fast patch after grace period
|
||||
* rcu_sync_exit() - Allow readers back onto fast path after grace period
|
||||
* @rsp: Pointer to rcu_sync structure to use for synchronization
|
||||
*
|
||||
* This function is used by updaters who have completed, and can therefore
|
||||
@@ -191,13 +167,16 @@ static void rcu_sync_func(struct rcu_head *rhp)
|
||||
*/
|
||||
void rcu_sync_exit(struct rcu_sync *rsp)
|
||||
{
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
|
||||
|
||||
spin_lock_irq(&rsp->rss_lock);
|
||||
if (!--rsp->gp_count) {
|
||||
if (rsp->cb_state == CB_IDLE) {
|
||||
rsp->cb_state = CB_PENDING;
|
||||
gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
|
||||
} else if (rsp->cb_state == CB_PENDING) {
|
||||
rsp->cb_state = CB_REPLAY;
|
||||
if (rsp->gp_state == GP_PASSED) {
|
||||
WRITE_ONCE(rsp->gp_state, GP_EXIT);
|
||||
rcu_sync_call(rsp);
|
||||
} else if (rsp->gp_state == GP_EXIT) {
|
||||
WRITE_ONCE(rsp->gp_state, GP_REPLAY);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&rsp->rss_lock);
|
||||
@@ -209,18 +188,19 @@ void rcu_sync_exit(struct rcu_sync *rsp)
|
||||
*/
|
||||
void rcu_sync_dtor(struct rcu_sync *rsp)
|
||||
{
|
||||
int cb_state;
|
||||
int gp_state;
|
||||
|
||||
WARN_ON_ONCE(rsp->gp_count);
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
|
||||
WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
|
||||
|
||||
spin_lock_irq(&rsp->rss_lock);
|
||||
if (rsp->cb_state == CB_REPLAY)
|
||||
rsp->cb_state = CB_PENDING;
|
||||
cb_state = rsp->cb_state;
|
||||
if (rsp->gp_state == GP_REPLAY)
|
||||
WRITE_ONCE(rsp->gp_state, GP_EXIT);
|
||||
gp_state = rsp->gp_state;
|
||||
spin_unlock_irq(&rsp->rss_lock);
|
||||
|
||||
if (cb_state != CB_IDLE) {
|
||||
gp_ops[rsp->gp_type].wait();
|
||||
WARN_ON_ONCE(rsp->cb_state != CB_IDLE);
|
||||
if (gp_state != GP_IDLE) {
|
||||
rcu_barrier();
|
||||
WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
|
||||
}
|
||||
}
|
||||
|
@@ -401,7 +401,8 @@ static int rcu_is_cpu_rrupt_from_idle(void)
|
||||
return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
|
||||
}
|
||||
|
||||
#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */
|
||||
#define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */
|
||||
#define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */
|
||||
static long blimit = DEFAULT_RCU_BLIMIT;
|
||||
#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
|
||||
static long qhimark = DEFAULT_RCU_QHIMARK;
|
||||
@@ -2134,7 +2135,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
|
||||
/* Reinstate batch limit if we have worked down the excess. */
|
||||
count = rcu_segcblist_n_cbs(&rdp->cblist);
|
||||
if (rdp->blimit == LONG_MAX && count <= qlowmark)
|
||||
if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
|
||||
rdp->blimit = blimit;
|
||||
|
||||
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
|
||||
@@ -2464,7 +2465,7 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
|
||||
rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
|
||||
} else {
|
||||
/* Give the grace period a kick. */
|
||||
rdp->blimit = LONG_MAX;
|
||||
rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
|
||||
if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
|
||||
rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
|
||||
rcu_force_quiescent_state();
|
||||
|
@@ -259,8 +259,7 @@ static bool sync_exp_work_done(unsigned long s)
|
||||
{
|
||||
if (rcu_exp_gp_seq_done(s)) {
|
||||
trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
|
||||
/* Ensure test happens before caller kfree(). */
|
||||
smp_mb__before_atomic(); /* ^^^ */
|
||||
smp_mb(); /* Ensure test happens before caller kfree(). */
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@@ -777,7 +777,7 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
|
||||
i = 0;
|
||||
list_for_each(lhp, &rnp->blkd_tasks) {
|
||||
pr_cont(" %p", lhp);
|
||||
if (++i >= 10)
|
||||
if (++i >= ncheck)
|
||||
break;
|
||||
}
|
||||
pr_cont("\n");
|
||||
|
@@ -630,7 +630,9 @@ static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
|
||||
time_before(j, rcu_state.gp_req_activity + gpssdelay) ||
|
||||
time_before(j, rcu_state.gp_activity + gpssdelay) ||
|
||||
atomic_xchg(&warned, 1)) {
|
||||
raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
|
||||
if (rnp_root != rnp)
|
||||
/* irqs remain disabled. */
|
||||
raw_spin_unlock_rcu_node(rnp_root);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
|
@@ -423,6 +423,19 @@ EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
|
||||
do { } while (0)
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
|
||||
/* Get rcutorture access to sched_setaffinity(). */
|
||||
long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sched_setaffinity(pid, in_mask);
|
||||
WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RCU_STALL_COMMON
|
||||
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
|
||||
EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
|
||||
|
Referência em uma nova issue
Block a user