rcu: Discard separate per-CPU callback counts
Back when there were multiple flavors of RCU, it was necessary to separately count lazy and non-lazy callbacks for each CPU. These counts were used in CONFIG_RCU_FAST_NO_HZ kernels to determine how long a newly idle CPU should be allowed to sleep before handling its RCU callbacks. But now that there is only one flavor, the callback counts for a given CPU's sole rcu_data structure are the counts for that CPU. This commit therefore removes the rcu_data structure's ->nonlazy_posted and ->nonlazy_posted_snap fields, the rcu_idle_count_callbacks_posted() and rcu_cpu_has_callbacks() functions, repurposes the rcu_data structure's ->all_lazy field to record the laziness state at the beginning of the latest idle sojourn, and modifies CONFIG_RCU_FAST_NO_HZ RCU CPU stall warnings accordingly. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
@@ -2878,9 +2878,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
|
||||
rcu_segcblist_init(&rdp->cblist);
|
||||
}
|
||||
rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
|
||||
if (!lazy)
|
||||
rcu_idle_count_callbacks_posted();
|
||||
|
||||
if (__is_kfree_rcu_offset((unsigned long)func))
|
||||
trace_rcu_kfree_callback(rcu_state.name, head,
|
||||
(unsigned long)func,
|
||||
@@ -3110,28 +3107,6 @@ static int rcu_pending(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the specified CPU has any callback. If all_lazy is
|
||||
* non-NULL, store an indication of whether all callbacks are lazy.
|
||||
* (If there are no callbacks, all of them are deemed to be lazy.)
|
||||
*/
|
||||
static bool rcu_cpu_has_callbacks(bool *all_lazy)
|
||||
{
|
||||
bool al = true;
|
||||
bool hc = false;
|
||||
struct rcu_data *rdp;
|
||||
|
||||
rdp = this_cpu_ptr(&rcu_data);
|
||||
if (!rcu_segcblist_empty(&rdp->cblist)) {
|
||||
hc = true;
|
||||
if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist))
|
||||
al = false;
|
||||
}
|
||||
if (all_lazy)
|
||||
*all_lazy = al;
|
||||
return hc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for rcu_barrier() tracing. If tracing is disabled,
|
||||
* the compiler is expected to optimize this away.
|
||||
|
@@ -193,10 +193,7 @@ struct rcu_data {
|
||||
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
|
||||
bool rcu_urgent_qs; /* GP old need light quiescent state. */
|
||||
#ifdef CONFIG_RCU_FAST_NO_HZ
|
||||
bool all_lazy; /* Are all CPU's CBs lazy? */
|
||||
unsigned long nonlazy_posted; /* # times non-lazy CB posted to CPU. */
|
||||
unsigned long nonlazy_posted_snap;
|
||||
/* Nonlazy_posted snapshot. */
|
||||
bool all_lazy; /* All CPU's CBs lazy at idle start? */
|
||||
unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
|
||||
unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
|
||||
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
|
||||
@@ -430,7 +427,6 @@ static void __init rcu_spawn_boost_kthreads(void);
|
||||
static void rcu_prepare_kthreads(int cpu);
|
||||
static void rcu_cleanup_after_idle(void);
|
||||
static void rcu_prepare_for_idle(void);
|
||||
static void rcu_idle_count_callbacks_posted(void);
|
||||
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
|
||||
static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
|
||||
static void rcu_preempt_deferred_qs(struct task_struct *t);
|
||||
|
@@ -1474,7 +1474,7 @@ static void rcu_prepare_kthreads(int cpu)
|
||||
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
{
|
||||
*nextevt = KTIME_MAX;
|
||||
return rcu_cpu_has_callbacks(NULL);
|
||||
return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1493,14 +1493,6 @@ static void rcu_prepare_for_idle(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't bother keeping a running count of the number of RCU callbacks
|
||||
* posted because CONFIG_RCU_FAST_NO_HZ=n.
|
||||
*/
|
||||
static void rcu_idle_count_callbacks_posted(void)
|
||||
{
|
||||
}
|
||||
|
||||
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
||||
|
||||
/*
|
||||
@@ -1583,11 +1575,8 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/* Snapshot to detect later posting of non-lazy callback. */
|
||||
rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
|
||||
|
||||
/* If no callbacks, RCU doesn't need the CPU. */
|
||||
if (!rcu_cpu_has_callbacks(&rdp->all_lazy)) {
|
||||
if (rcu_segcblist_empty(&rdp->cblist)) {
|
||||
*nextevt = KTIME_MAX;
|
||||
return 0;
|
||||
}
|
||||
@@ -1601,11 +1590,12 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
rdp->last_accelerate = jiffies;
|
||||
|
||||
/* Request timer delay depending on laziness, and round. */
|
||||
if (!rdp->all_lazy) {
|
||||
rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist);
|
||||
if (rdp->all_lazy) {
|
||||
dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
|
||||
} else {
|
||||
dj = round_up(rcu_idle_gp_delay + jiffies,
|
||||
rcu_idle_gp_delay) - jiffies;
|
||||
} else {
|
||||
dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
|
||||
}
|
||||
*nextevt = basemono + dj * TICK_NSEC;
|
||||
return 0;
|
||||
@@ -1635,7 +1625,7 @@ static void rcu_prepare_for_idle(void)
|
||||
/* Handle nohz enablement switches conservatively. */
|
||||
tne = READ_ONCE(tick_nohz_active);
|
||||
if (tne != rdp->tick_nohz_enabled_snap) {
|
||||
if (rcu_cpu_has_callbacks(NULL))
|
||||
if (!rcu_segcblist_empty(&rdp->cblist))
|
||||
invoke_rcu_core(); /* force nohz to see update. */
|
||||
rdp->tick_nohz_enabled_snap = tne;
|
||||
return;
|
||||
@@ -1648,10 +1638,8 @@ static void rcu_prepare_for_idle(void)
|
||||
* callbacks, invoke RCU core for the side-effect of recalculating
|
||||
* idle duration on re-entry to idle.
|
||||
*/
|
||||
if (rdp->all_lazy &&
|
||||
rdp->nonlazy_posted != rdp->nonlazy_posted_snap) {
|
||||
if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) {
|
||||
rdp->all_lazy = false;
|
||||
rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
|
||||
invoke_rcu_core();
|
||||
return;
|
||||
}
|
||||
@@ -1687,19 +1675,6 @@ static void rcu_cleanup_after_idle(void)
|
||||
invoke_rcu_core();
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep a running count of the number of non-lazy callbacks posted
|
||||
* on this CPU. This running counter (which is never decremented) allows
|
||||
* rcu_prepare_for_idle() to detect when something out of the idle loop
|
||||
* posts a callback, even if an equal number of callbacks are invoked.
|
||||
* Of course, callbacks should only be posted from within a trace event
|
||||
* designed to be called from idle or from within RCU_NONIDLE().
|
||||
*/
|
||||
static void rcu_idle_count_callbacks_posted(void)
|
||||
{
|
||||
__this_cpu_add(rcu_data.nonlazy_posted, 1);
|
||||
}
|
||||
|
||||
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
||||
|
||||
#ifdef CONFIG_RCU_FAST_NO_HZ
|
||||
@@ -1707,13 +1682,12 @@ static void rcu_idle_count_callbacks_posted(void)
|
||||
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
|
||||
unsigned long nlpd = rdp->nonlazy_posted - rdp->nonlazy_posted_snap;
|
||||
|
||||
sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
|
||||
sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
|
||||
rdp->last_accelerate & 0xffff, jiffies & 0xffff,
|
||||
ulong2long(nlpd),
|
||||
rdp->all_lazy ? 'L' : '.',
|
||||
rdp->tick_nohz_enabled_snap ? '.' : 'D');
|
||||
".l"[rdp->all_lazy],
|
||||
".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
|
||||
".D"[!rdp->tick_nohz_enabled_snap]);
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
|
Reference in New Issue
Block a user