rcu: Avoid waking up CPUs having only kfree_rcu() callbacks
When CONFIG_RCU_FAST_NO_HZ is enabled, RCU will allow a given CPU to enter dyntick-idle mode even if it still has RCU callbacks queued. RCU avoids system hangs in this case by scheduling a timer for several jiffies in the future. However, if all of the callbacks on that CPU are from kfree_rcu(), there is no reason to wake the CPU up, as it is not a problem to defer freeing of memory. This commit therefore tracks the number of callbacks on a given CPU that are from kfree_rcu(), and avoids scheduling the timer if all of a given CPU's callbacks are from kfree_rcu(). Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:

committed by
Paul E. McKenney

parent
0bb7b59d6e
commit
486e259340
@@ -313,19 +313,22 @@ TRACE_EVENT(rcu_prep_idle,
|
||||
/*
|
||||
* Tracepoint for the registration of a single RCU callback function.
|
||||
* The first argument is the type of RCU, the second argument is
|
||||
* a pointer to the RCU callback itself, and the third element is the
|
||||
* new RCU callback queue length for the current CPU.
|
||||
* a pointer to the RCU callback itself, the third element is the
|
||||
* number of lazy callbacks queued, and the fourth element is the
|
||||
* total number of callbacks queued.
|
||||
*/
|
||||
TRACE_EVENT(rcu_callback,
|
||||
|
||||
TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
|
||||
TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
|
||||
long qlen),
|
||||
|
||||
TP_ARGS(rcuname, rhp, qlen),
|
||||
TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(char *, rcuname)
|
||||
__field(void *, rhp)
|
||||
__field(void *, func)
|
||||
__field(long, qlen_lazy)
|
||||
__field(long, qlen)
|
||||
),
|
||||
|
||||
@@ -333,11 +336,13 @@ TRACE_EVENT(rcu_callback,
|
||||
__entry->rcuname = rcuname;
|
||||
__entry->rhp = rhp;
|
||||
__entry->func = rhp->func;
|
||||
__entry->qlen_lazy = qlen_lazy;
|
||||
__entry->qlen = qlen;
|
||||
),
|
||||
|
||||
TP_printk("%s rhp=%p func=%pf %ld",
|
||||
__entry->rcuname, __entry->rhp, __entry->func, __entry->qlen)
|
||||
TP_printk("%s rhp=%p func=%pf %ld/%ld",
|
||||
__entry->rcuname, __entry->rhp, __entry->func,
|
||||
__entry->qlen_lazy, __entry->qlen)
|
||||
);
|
||||
|
||||
/*
|
||||
@@ -345,20 +350,21 @@ TRACE_EVENT(rcu_callback,
|
||||
* kfree() form. The first argument is the RCU type, the second argument
|
||||
* is a pointer to the RCU callback, the third argument is the offset
|
||||
* of the callback within the enclosing RCU-protected data structure,
|
||||
* and the fourth argument is the new RCU callback queue length for the
|
||||
* current CPU.
|
||||
* the fourth argument is the number of lazy callbacks queued, and the
|
||||
* fifth argument is the total number of callbacks queued.
|
||||
*/
|
||||
TRACE_EVENT(rcu_kfree_callback,
|
||||
|
||||
TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
|
||||
long qlen),
|
||||
long qlen_lazy, long qlen),
|
||||
|
||||
TP_ARGS(rcuname, rhp, offset, qlen),
|
||||
TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(char *, rcuname)
|
||||
__field(void *, rhp)
|
||||
__field(unsigned long, offset)
|
||||
__field(long, qlen_lazy)
|
||||
__field(long, qlen)
|
||||
),
|
||||
|
||||
@@ -366,41 +372,45 @@ TRACE_EVENT(rcu_kfree_callback,
|
||||
__entry->rcuname = rcuname;
|
||||
__entry->rhp = rhp;
|
||||
__entry->offset = offset;
|
||||
__entry->qlen_lazy = qlen_lazy;
|
||||
__entry->qlen = qlen;
|
||||
),
|
||||
|
||||
TP_printk("%s rhp=%p func=%ld %ld",
|
||||
TP_printk("%s rhp=%p func=%ld %ld/%ld",
|
||||
__entry->rcuname, __entry->rhp, __entry->offset,
|
||||
__entry->qlen)
|
||||
__entry->qlen_lazy, __entry->qlen)
|
||||
);
|
||||
|
||||
/*
|
||||
* Tracepoint for marking the beginning rcu_do_batch, performed to start
|
||||
* RCU callback invocation. The first argument is the RCU flavor,
|
||||
* the second is the total number of callbacks (including those that
|
||||
* are not yet ready to be invoked), and the third argument is the
|
||||
* current RCU-callback batch limit.
|
||||
* the second is the number of lazy callbacks queued, the third is
|
||||
* the total number of callbacks queued, and the fourth argument is
|
||||
* the current RCU-callback batch limit.
|
||||
*/
|
||||
TRACE_EVENT(rcu_batch_start,
|
||||
|
||||
TP_PROTO(char *rcuname, long qlen, int blimit),
|
||||
TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
|
||||
|
||||
TP_ARGS(rcuname, qlen, blimit),
|
||||
TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(char *, rcuname)
|
||||
__field(long, qlen_lazy)
|
||||
__field(long, qlen)
|
||||
__field(int, blimit)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->rcuname = rcuname;
|
||||
__entry->qlen_lazy = qlen_lazy;
|
||||
__entry->qlen = qlen;
|
||||
__entry->blimit = blimit;
|
||||
),
|
||||
|
||||
TP_printk("%s CBs=%ld bl=%d",
|
||||
__entry->rcuname, __entry->qlen, __entry->blimit)
|
||||
TP_printk("%s CBs=%ld/%ld bl=%d",
|
||||
__entry->rcuname, __entry->qlen_lazy, __entry->qlen,
|
||||
__entry->blimit)
|
||||
);
|
||||
|
||||
/*
|
||||
@@ -531,16 +541,21 @@ TRACE_EVENT(rcu_torture_read,
|
||||
#else /* #ifdef CONFIG_RCU_TRACE */
|
||||
|
||||
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
|
||||
#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0)
|
||||
#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
|
||||
qsmask) do { } while (0)
|
||||
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
|
||||
#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
|
||||
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
|
||||
#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
|
||||
grplo, grphi, gp_tasks) do { } \
|
||||
while (0)
|
||||
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
|
||||
#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
|
||||
#define trace_rcu_prep_idle(reason) do { } while (0)
|
||||
#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
|
||||
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
|
||||
#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
|
||||
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
|
||||
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
|
||||
do { } while (0)
|
||||
#define trace_rcu_batch_start(rcuname, qlen_lazy, qlen, blimit) \
|
||||
do { } while (0)
|
||||
#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
|
||||
#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
|
||||
#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
|
||||
|
Reference in New Issue
Block a user