Merge branches 'fixes.2018.02.23a', 'srcu.2018.02.20a' and 'torture.2018.02.20a' into HEAD
fixes.2018.02.23a: Miscellaneous fixes srcu.2018.02.20a: SRCU updates torture.2018.02.20a: Torture-test updates
This commit is contained in:
@@ -61,11 +61,30 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
|
||||
#define VERBOSE_PERFOUT_ERRSTRING(s) \
|
||||
do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
|
||||
|
||||
/*
|
||||
* The intended use cases for the nreaders and nwriters module parameters
|
||||
* are as follows:
|
||||
*
|
||||
* 1. Specify only the nr_cpus kernel boot parameter. This will
|
||||
* set both nreaders and nwriters to the value specified by
|
||||
* nr_cpus for a mixed reader/writer test.
|
||||
*
|
||||
* 2. Specify the nr_cpus kernel boot parameter, but set
|
||||
* rcuperf.nreaders to zero. This will set nwriters to the
|
||||
* value specified by nr_cpus for an update-only test.
|
||||
*
|
||||
* 3. Specify the nr_cpus kernel boot parameter, but set
|
||||
* rcuperf.nwriters to zero. This will set nreaders to the
|
||||
* value specified by nr_cpus for a read-only test.
|
||||
*
|
||||
* Various other use cases may of course be specified.
|
||||
*/
|
||||
|
||||
torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
|
||||
torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
|
||||
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
|
||||
torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
|
||||
torture_param(int, nreaders, 0, "Number of RCU reader threads");
|
||||
torture_param(int, nreaders, -1, "Number of RCU reader threads");
|
||||
torture_param(int, nwriters, -1, "Number of RCU updater threads");
|
||||
torture_param(bool, shutdown, !IS_ENABLED(MODULE),
|
||||
"Shutdown at end of performance tests.");
|
||||
|
@@ -909,34 +909,38 @@ rcu_torture_writer(void *arg)
|
||||
int nsynctypes = 0;
|
||||
|
||||
VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
|
||||
if (!can_expedite) {
|
||||
if (!can_expedite)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
" GP expediting controlled from boot/sysfs for %s,\n",
|
||||
" GP expediting controlled from boot/sysfs for %s.\n",
|
||||
torture_type, cur_ops->name);
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
" Disabled dynamic grace-period expediting.\n",
|
||||
torture_type);
|
||||
}
|
||||
|
||||
/* Initialize synctype[] array. If none set, take default. */
|
||||
if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
|
||||
gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
|
||||
if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync)
|
||||
if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
|
||||
synctype[nsynctypes++] = RTWS_COND_GET;
|
||||
else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync))
|
||||
pr_alert("rcu_torture_writer: gp_cond without primitives.\n");
|
||||
if (gp_exp1 && cur_ops->exp_sync)
|
||||
pr_info("%s: Testing conditional GPs.\n", __func__);
|
||||
} else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
|
||||
pr_alert("%s: gp_cond without primitives.\n", __func__);
|
||||
}
|
||||
if (gp_exp1 && cur_ops->exp_sync) {
|
||||
synctype[nsynctypes++] = RTWS_EXP_SYNC;
|
||||
else if (gp_exp && !cur_ops->exp_sync)
|
||||
pr_alert("rcu_torture_writer: gp_exp without primitives.\n");
|
||||
if (gp_normal1 && cur_ops->deferred_free)
|
||||
pr_info("%s: Testing expedited GPs.\n", __func__);
|
||||
} else if (gp_exp && !cur_ops->exp_sync) {
|
||||
pr_alert("%s: gp_exp without primitives.\n", __func__);
|
||||
}
|
||||
if (gp_normal1 && cur_ops->deferred_free) {
|
||||
synctype[nsynctypes++] = RTWS_DEF_FREE;
|
||||
else if (gp_normal && !cur_ops->deferred_free)
|
||||
pr_alert("rcu_torture_writer: gp_normal without primitives.\n");
|
||||
if (gp_sync1 && cur_ops->sync)
|
||||
pr_info("%s: Testing asynchronous GPs.\n", __func__);
|
||||
} else if (gp_normal && !cur_ops->deferred_free) {
|
||||
pr_alert("%s: gp_normal without primitives.\n", __func__);
|
||||
}
|
||||
if (gp_sync1 && cur_ops->sync) {
|
||||
synctype[nsynctypes++] = RTWS_SYNC;
|
||||
else if (gp_sync && !cur_ops->sync)
|
||||
pr_alert("rcu_torture_writer: gp_sync without primitives.\n");
|
||||
pr_info("%s: Testing normal GPs.\n", __func__);
|
||||
} else if (gp_sync && !cur_ops->sync) {
|
||||
pr_alert("%s: gp_sync without primitives.\n", __func__);
|
||||
}
|
||||
if (WARN_ONCE(nsynctypes == 0,
|
||||
"rcu_torture_writer: No update-side primitives.\n")) {
|
||||
/*
|
||||
@@ -1011,6 +1015,9 @@ rcu_torture_writer(void *arg)
|
||||
rcu_unexpedite_gp();
|
||||
if (++expediting > 3)
|
||||
expediting = -expediting;
|
||||
} else if (!can_expedite) { /* Disabled during boot, recheck. */
|
||||
can_expedite = !rcu_gp_is_expedited() &&
|
||||
!rcu_gp_is_normal();
|
||||
}
|
||||
rcu_torture_writer_state = RTWS_STUTTER;
|
||||
stutter_wait("rcu_torture_writer");
|
||||
@@ -1021,6 +1028,10 @@ rcu_torture_writer(void *arg)
|
||||
while (can_expedite && expediting++ < 0)
|
||||
rcu_unexpedite_gp();
|
||||
WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
|
||||
if (!can_expedite)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
" Dynamic grace-period expediting was disabled.\n",
|
||||
torture_type);
|
||||
rcu_torture_writer_state = RTWS_STOPPING;
|
||||
torture_kthread_stopping("rcu_torture_writer");
|
||||
return 0;
|
||||
@@ -1045,13 +1056,13 @@ rcu_torture_fakewriter(void *arg)
|
||||
torture_random(&rand) % (nfakewriters * 8) == 0) {
|
||||
cur_ops->cb_barrier();
|
||||
} else if (gp_normal == gp_exp) {
|
||||
if (torture_random(&rand) & 0x80)
|
||||
if (cur_ops->sync && torture_random(&rand) & 0x80)
|
||||
cur_ops->sync();
|
||||
else
|
||||
else if (cur_ops->exp_sync)
|
||||
cur_ops->exp_sync();
|
||||
} else if (gp_normal) {
|
||||
} else if (gp_normal && cur_ops->sync) {
|
||||
cur_ops->sync();
|
||||
} else {
|
||||
} else if (cur_ops->exp_sync) {
|
||||
cur_ops->exp_sync();
|
||||
}
|
||||
stutter_wait("rcu_torture_fakewriter");
|
||||
@@ -1557,11 +1568,10 @@ static int rcu_torture_barrier_init(void)
|
||||
atomic_set(&barrier_cbs_count, 0);
|
||||
atomic_set(&barrier_cbs_invoked, 0);
|
||||
barrier_cbs_tasks =
|
||||
kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
|
||||
kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
|
||||
GFP_KERNEL);
|
||||
barrier_cbs_wq =
|
||||
kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
|
||||
GFP_KERNEL);
|
||||
kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
|
||||
if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < n_barrier_cbs; i++) {
|
||||
@@ -1674,7 +1684,7 @@ static void rcu_torture_err_cb(struct rcu_head *rhp)
|
||||
* next grace period. Unlikely, but can happen. If it
|
||||
* does happen, the debug-objects subsystem won't have splatted.
|
||||
*/
|
||||
pr_alert("rcutorture: duplicated callback was invoked.\n");
|
||||
pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
|
||||
}
|
||||
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
|
||||
@@ -1691,7 +1701,7 @@ static void rcu_test_debug_objects(void)
|
||||
|
||||
init_rcu_head_on_stack(&rh1);
|
||||
init_rcu_head_on_stack(&rh2);
|
||||
pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
|
||||
pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
|
||||
|
||||
/* Try to queue the rh2 pair of callbacks for the same grace period. */
|
||||
preempt_disable(); /* Prevent preemption from interrupting test. */
|
||||
@@ -1706,11 +1716,11 @@ static void rcu_test_debug_objects(void)
|
||||
|
||||
/* Wait for them all to get done so we can safely return. */
|
||||
rcu_barrier();
|
||||
pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
|
||||
pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
|
||||
destroy_rcu_head_on_stack(&rh1);
|
||||
destroy_rcu_head_on_stack(&rh2);
|
||||
#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
|
||||
pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
}
|
||||
|
||||
@@ -1799,7 +1809,7 @@ rcu_torture_init(void)
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
if (nfakewriters > 0) {
|
||||
fakewriter_tasks = kzalloc(nfakewriters *
|
||||
fakewriter_tasks = kcalloc(nfakewriters,
|
||||
sizeof(fakewriter_tasks[0]),
|
||||
GFP_KERNEL);
|
||||
if (fakewriter_tasks == NULL) {
|
||||
@@ -1814,7 +1824,7 @@ rcu_torture_init(void)
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
|
||||
reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
|
||||
GFP_KERNEL);
|
||||
if (reader_tasks == NULL) {
|
||||
VERBOSE_TOROUT_ERRSTRING("out of memory");
|
||||
|
@@ -386,7 +386,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
|
||||
flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
|
||||
if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
|
||||
WARN_ON(srcu_readers_active(sp))) {
|
||||
pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
|
||||
pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
|
||||
return; /* Caller forgot to stop doing call_srcu()? */
|
||||
}
|
||||
free_percpu(sp->sda);
|
||||
@@ -526,11 +526,11 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
||||
{
|
||||
unsigned long cbdelay;
|
||||
bool cbs;
|
||||
bool last_lvl;
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
unsigned long gpseq;
|
||||
int idx;
|
||||
int idxnext;
|
||||
unsigned long mask;
|
||||
struct srcu_data *sdp;
|
||||
struct srcu_node *snp;
|
||||
@@ -554,11 +554,11 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
||||
|
||||
/* Initiate callback invocation as needed. */
|
||||
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
|
||||
idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
|
||||
rcu_for_each_node_breadth_first(sp, snp) {
|
||||
spin_lock_irq_rcu_node(snp);
|
||||
cbs = false;
|
||||
if (snp >= sp->level[rcu_num_lvls - 1])
|
||||
last_lvl = snp >= sp->level[rcu_num_lvls - 1];
|
||||
if (last_lvl)
|
||||
cbs = snp->srcu_have_cbs[idx] == gpseq;
|
||||
snp->srcu_have_cbs[idx] = gpseq;
|
||||
rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
|
||||
@@ -571,13 +571,16 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
||||
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
|
||||
|
||||
/* Occasionally prevent srcu_data counter wrap. */
|
||||
if (!(gpseq & counter_wrap_check))
|
||||
if (!(gpseq & counter_wrap_check) && last_lvl)
|
||||
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
|
||||
sdp = per_cpu_ptr(sp->sda, cpu);
|
||||
spin_lock_irqsave_rcu_node(sdp, flags);
|
||||
if (ULONG_CMP_GE(gpseq,
|
||||
sdp->srcu_gp_seq_needed + 100))
|
||||
sdp->srcu_gp_seq_needed = gpseq;
|
||||
if (ULONG_CMP_GE(gpseq,
|
||||
sdp->srcu_gp_seq_needed_exp + 100))
|
||||
sdp->srcu_gp_seq_needed_exp = gpseq;
|
||||
spin_unlock_irqrestore_rcu_node(sdp, flags);
|
||||
}
|
||||
}
|
||||
@@ -592,9 +595,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
|
||||
ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
|
||||
srcu_gp_start(sp);
|
||||
spin_unlock_irq_rcu_node(sp);
|
||||
/* Throttle expedited grace periods: Should be rare! */
|
||||
srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
|
||||
? 0 : SRCU_INTERVAL);
|
||||
srcu_reschedule(sp, 0);
|
||||
} else {
|
||||
spin_unlock_irq_rcu_node(sp);
|
||||
}
|
||||
|
Reference in New Issue
Block a user