|
|
|
@@ -181,12 +181,17 @@ void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
|
|
|
|
|
preempt_enable();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void exit_tasks_rcu_finish_trace(struct task_struct *t);
|
|
|
|
|
|
|
|
|
|
/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
|
|
|
|
|
void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
|
|
|
|
|
{
|
|
|
|
|
struct task_struct *t = current;
|
|
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
__srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
|
|
|
|
|
__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
|
|
|
|
|
preempt_enable();
|
|
|
|
|
exit_tasks_rcu_finish_trace(t);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifndef CONFIG_TINY_RCU
|
|
|
|
@@ -196,15 +201,19 @@ void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
|
|
|
|
|
*/
|
|
|
|
|
static void __init rcu_tasks_bootup_oddness(void)
|
|
|
|
|
{
|
|
|
|
|
#ifdef CONFIG_TASKS_RCU
|
|
|
|
|
#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
|
|
|
|
|
if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
|
|
|
|
|
pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
|
|
|
|
|
else
|
|
|
|
|
pr_info("\tTasks RCU enabled.\n");
|
|
|
|
|
#endif /* #ifdef CONFIG_TASKS_RCU */
|
|
|
|
|
#ifdef CONFIG_TASKS_RCU
|
|
|
|
|
pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
|
|
|
|
|
#endif /* #ifdef CONFIG_TASKS_RCU */
|
|
|
|
|
#ifdef CONFIG_TASKS_RUDE_RCU
|
|
|
|
|
pr_info("\tRude variant of Tasks RCU enabled.\n");
|
|
|
|
|
#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
|
|
|
|
|
#ifdef CONFIG_TASKS_TRACE_RCU
|
|
|
|
|
pr_info("\tTracing variant of Tasks RCU enabled.\n");
|
|
|
|
|
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif /* #ifndef CONFIG_TINY_RCU */
|
|
|
|
@@ -569,3 +578,347 @@ static int __init rcu_spawn_tasks_rude_kthread(void)
|
|
|
|
|
core_initcall(rcu_spawn_tasks_rude_kthread);
|
|
|
|
|
|
|
|
|
|
#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
|
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////
|
|
|
|
|
//
|
|
|
|
|
// Tracing variant of Tasks RCU. This variant is designed to be used
|
|
|
|
|
// to protect tracing hooks, including those of BPF. This variant
|
|
|
|
|
// therefore:
|
|
|
|
|
//
|
|
|
|
|
// 1. Has explicit read-side markers to allow finite grace periods
|
|
|
|
|
// in the face of in-kernel loops for PREEMPT=n builds.
|
|
|
|
|
//
|
|
|
|
|
// 2. Protects code in the idle loop, exception entry/exit, and
|
|
|
|
|
// CPU-hotplug code paths, similar to the capabilities of SRCU.
|
|
|
|
|
//
|
|
|
|
|
// 3. Avoids expensive read-side instruction, having overhead similar
|
|
|
|
|
// to that of Preemptible RCU.
|
|
|
|
|
//
|
|
|
|
|
// There are of course downsides. The grace-period code can send IPIs to
|
|
|
|
|
// CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
|
|
|
|
|
// It is necessary to scan the full tasklist, much as for Tasks RCU. There
|
|
|
|
|
// is a single callback queue guarded by a single lock, again, much as for
|
|
|
|
|
// Tasks RCU. If needed, these downsides can be at least partially remedied.
|
|
|
|
|
//
|
|
|
|
|
// Perhaps most important, this variant of RCU does not affect the vanilla
|
|
|
|
|
// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
|
|
|
|
|
// readers can operate from idle, offline, and exception entry/exit in no
|
|
|
|
|
// way allows rcu_preempt and rcu_sched readers to also do so.
|
|
|
|
|
|
|
|
|
|
// The lockdep state must be outside of #ifdef to be useful.
|
|
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
|
static struct lock_class_key rcu_lock_trace_key;
|
|
|
|
|
struct lockdep_map rcu_trace_lock_map =
|
|
|
|
|
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
|
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
|
|
|
|
|
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_TASKS_TRACE_RCU
|
|
|
|
|
|
|
|
|
|
atomic_t trc_n_readers_need_end; // Number of waited-for readers.
|
|
|
|
|
DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
|
|
|
|
|
|
|
|
|
|
// Record outstanding IPIs to each CPU. No point in sending two...
|
|
|
|
|
static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
|
|
|
|
|
|
|
|
|
|
/* If we are the last reader, wake up the grace-period kthread. */
|
|
|
|
|
void rcu_read_unlock_trace_special(struct task_struct *t)
|
|
|
|
|
{
|
|
|
|
|
WRITE_ONCE(t->trc_reader_need_end, false);
|
|
|
|
|
if (atomic_dec_and_test(&trc_n_readers_need_end))
|
|
|
|
|
wake_up(&trc_wait);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
|
|
|
|
|
|
|
|
|
|
/* Add a task to the holdout list, if it is not already on the list. */
|
|
|
|
|
static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
|
|
|
|
|
{
|
|
|
|
|
if (list_empty(&t->trc_holdout_list)) {
|
|
|
|
|
get_task_struct(t);
|
|
|
|
|
list_add(&t->trc_holdout_list, bhp);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Remove a task from the holdout list, if it is in fact present. */
|
|
|
|
|
static void trc_del_holdout(struct task_struct *t)
|
|
|
|
|
{
|
|
|
|
|
if (!list_empty(&t->trc_holdout_list)) {
|
|
|
|
|
list_del_init(&t->trc_holdout_list);
|
|
|
|
|
put_task_struct(t);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* IPI handler to check task state. */
|
|
|
|
|
static void trc_read_check_handler(void *t_in)
|
|
|
|
|
{
|
|
|
|
|
struct task_struct *t = current;
|
|
|
|
|
struct task_struct *texp = t_in;
|
|
|
|
|
|
|
|
|
|
// If the task is no longer running on this CPU, leave.
|
|
|
|
|
if (unlikely(texp != t)) {
|
|
|
|
|
if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
|
|
|
|
|
wake_up(&trc_wait);
|
|
|
|
|
goto reset_ipi; // Already on holdout list, so will check later.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the task is not in a read-side critical section, and
|
|
|
|
|
// if this is the last reader, awaken the grace-period kthread.
|
|
|
|
|
if (likely(!t->trc_reader_nesting)) {
|
|
|
|
|
if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
|
|
|
|
|
wake_up(&trc_wait);
|
|
|
|
|
// Mark as checked after decrement to avoid false
|
|
|
|
|
// positives on the above WARN_ON_ONCE().
|
|
|
|
|
WRITE_ONCE(t->trc_reader_checked, true);
|
|
|
|
|
goto reset_ipi;
|
|
|
|
|
}
|
|
|
|
|
WRITE_ONCE(t->trc_reader_checked, true);
|
|
|
|
|
|
|
|
|
|
// Get here if the task is in a read-side critical section. Set
|
|
|
|
|
// its state so that it will awaken the grace-period kthread upon
|
|
|
|
|
// exit from that critical section.
|
|
|
|
|
WARN_ON_ONCE(t->trc_reader_need_end);
|
|
|
|
|
WRITE_ONCE(t->trc_reader_need_end, true);
|
|
|
|
|
|
|
|
|
|
reset_ipi:
|
|
|
|
|
// Allow future IPIs to be sent on CPU and for task.
|
|
|
|
|
// Also order this IPI handler against any later manipulations of
|
|
|
|
|
// the intended task.
|
|
|
|
|
smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
|
|
|
|
|
smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Callback function for scheduler to check locked-down task. */
|
|
|
|
|
static bool trc_inspect_reader(struct task_struct *t, void *arg)
|
|
|
|
|
{
|
|
|
|
|
if (task_curr(t))
|
|
|
|
|
return false; // It is running, so decline to inspect it.
|
|
|
|
|
|
|
|
|
|
// Mark as checked. Because this is called from the grace-period
|
|
|
|
|
// kthread, also remove the task from the holdout list.
|
|
|
|
|
t->trc_reader_checked = true;
|
|
|
|
|
trc_del_holdout(t);
|
|
|
|
|
|
|
|
|
|
// If the task is in a read-side critical section, set up its
|
|
|
|
|
// its state so that it will awaken the grace-period kthread upon
|
|
|
|
|
// exit from that critical section.
|
|
|
|
|
if (unlikely(t->trc_reader_nesting)) {
|
|
|
|
|
atomic_inc(&trc_n_readers_need_end); // One more to wait on.
|
|
|
|
|
WARN_ON_ONCE(t->trc_reader_need_end);
|
|
|
|
|
WRITE_ONCE(t->trc_reader_need_end, true);
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Attempt to extract the state for the specified task. */
|
|
|
|
|
static void trc_wait_for_one_reader(struct task_struct *t,
|
|
|
|
|
struct list_head *bhp)
|
|
|
|
|
{
|
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
|
|
// If a previous IPI is still in flight, let it complete.
|
|
|
|
|
if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
// The current task had better be in a quiescent state.
|
|
|
|
|
if (t == current) {
|
|
|
|
|
t->trc_reader_checked = true;
|
|
|
|
|
trc_del_holdout(t);
|
|
|
|
|
WARN_ON_ONCE(t->trc_reader_nesting);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Attempt to nail down the task for inspection.
|
|
|
|
|
get_task_struct(t);
|
|
|
|
|
if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
|
|
|
|
|
put_task_struct(t);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
put_task_struct(t);
|
|
|
|
|
|
|
|
|
|
// If currently running, send an IPI, either way, add to list.
|
|
|
|
|
trc_add_holdout(t, bhp);
|
|
|
|
|
if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
|
|
|
|
|
// The task is currently running, so try IPIing it.
|
|
|
|
|
cpu = task_cpu(t);
|
|
|
|
|
|
|
|
|
|
// If there is already an IPI outstanding, let it happen.
|
|
|
|
|
if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
atomic_inc(&trc_n_readers_need_end);
|
|
|
|
|
per_cpu(trc_ipi_to_cpu, cpu) = true;
|
|
|
|
|
t->trc_ipi_to_cpu = cpu;
|
|
|
|
|
if (smp_call_function_single(cpu,
|
|
|
|
|
trc_read_check_handler, t, 0)) {
|
|
|
|
|
// Just in case there is some other reason for
|
|
|
|
|
// failure than the target CPU being offline.
|
|
|
|
|
per_cpu(trc_ipi_to_cpu, cpu) = false;
|
|
|
|
|
t->trc_ipi_to_cpu = cpu;
|
|
|
|
|
if (atomic_dec_and_test(&trc_n_readers_need_end)) {
|
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
|
wake_up(&trc_wait);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initialize for a new RCU-tasks-trace grace period. */
|
|
|
|
|
static void rcu_tasks_trace_pregp_step(void)
|
|
|
|
|
{
|
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
|
|
// Wait for CPU-hotplug paths to complete.
|
|
|
|
|
cpus_read_lock();
|
|
|
|
|
cpus_read_unlock();
|
|
|
|
|
|
|
|
|
|
// Allow for fast-acting IPIs.
|
|
|
|
|
atomic_set(&trc_n_readers_need_end, 1);
|
|
|
|
|
|
|
|
|
|
// There shouldn't be any old IPIs, but...
|
|
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
|
WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Do first-round processing for the specified task. */
|
|
|
|
|
static void rcu_tasks_trace_pertask(struct task_struct *t,
|
|
|
|
|
struct list_head *hop)
|
|
|
|
|
{
|
|
|
|
|
WRITE_ONCE(t->trc_reader_need_end, false);
|
|
|
|
|
t->trc_reader_checked = false;
|
|
|
|
|
t->trc_ipi_to_cpu = -1;
|
|
|
|
|
trc_wait_for_one_reader(t, hop);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Do intermediate processing between task and holdout scans. */
|
|
|
|
|
static void rcu_tasks_trace_postscan(void)
|
|
|
|
|
{
|
|
|
|
|
// Wait for late-stage exiting tasks to finish exiting.
|
|
|
|
|
// These might have passed the call to exit_tasks_rcu_finish().
|
|
|
|
|
synchronize_rcu();
|
|
|
|
|
// Any tasks that exit after this point will set ->trc_reader_checked.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Do one scan of the holdout list. */
|
|
|
|
|
static void check_all_holdout_tasks_trace(struct list_head *hop,
|
|
|
|
|
bool ndrpt, bool *frptp)
|
|
|
|
|
{
|
|
|
|
|
struct task_struct *g, *t;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
|
|
|
|
|
// If safe and needed, try to check the current task.
|
|
|
|
|
if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
|
|
|
|
|
!READ_ONCE(t->trc_reader_checked))
|
|
|
|
|
trc_wait_for_one_reader(t, hop);
|
|
|
|
|
|
|
|
|
|
// If check succeeded, remove this task from the list.
|
|
|
|
|
if (READ_ONCE(t->trc_reader_checked))
|
|
|
|
|
trc_del_holdout(t);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Wait for grace period to complete and provide ordering. */
|
|
|
|
|
static void rcu_tasks_trace_postgp(void)
|
|
|
|
|
{
|
|
|
|
|
// Remove the safety count.
|
|
|
|
|
smp_mb__before_atomic(); // Order vs. earlier atomics
|
|
|
|
|
atomic_dec(&trc_n_readers_need_end);
|
|
|
|
|
smp_mb__after_atomic(); // Order vs. later atomics
|
|
|
|
|
|
|
|
|
|
// Wait for readers.
|
|
|
|
|
wait_event_idle_exclusive(trc_wait,
|
|
|
|
|
atomic_read(&trc_n_readers_need_end) == 0);
|
|
|
|
|
|
|
|
|
|
smp_mb(); // Caller's code must be ordered after wakeup.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Report any needed quiescent state for this exiting task. */
|
|
|
|
|
void exit_tasks_rcu_finish_trace(struct task_struct *t)
|
|
|
|
|
{
|
|
|
|
|
WRITE_ONCE(t->trc_reader_checked, true);
|
|
|
|
|
WARN_ON_ONCE(t->trc_reader_nesting);
|
|
|
|
|
WRITE_ONCE(t->trc_reader_nesting, 0);
|
|
|
|
|
if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_need_end)))
|
|
|
|
|
rcu_read_unlock_trace_special(t);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
|
|
|
|
|
DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
|
|
|
|
|
"RCU Tasks Trace");
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* call_rcu_tasks_trace() - Queue a callback trace task-based grace period
|
|
|
|
|
* @rhp: structure to be used for queueing the RCU updates.
|
|
|
|
|
* @func: actual callback function to be invoked after the grace period
|
|
|
|
|
*
|
|
|
|
|
* The callback function will be invoked some time after a full grace
|
|
|
|
|
* period elapses, in other words after all currently executing RCU
|
|
|
|
|
* read-side critical sections have completed. call_rcu_tasks_trace()
|
|
|
|
|
* assumes that the read-side critical sections end at context switch,
|
|
|
|
|
* cond_resched_rcu_qs(), or transition to usermode execution. As such,
|
|
|
|
|
* there are no read-side primitives analogous to rcu_read_lock() and
|
|
|
|
|
* rcu_read_unlock() because this primitive is intended to determine
|
|
|
|
|
* that all tasks have passed through a safe state, not so much for
|
|
|
|
|
* data-strcuture synchronization.
|
|
|
|
|
*
|
|
|
|
|
* See the description of call_rcu() for more detailed information on
|
|
|
|
|
* memory ordering guarantees.
|
|
|
|
|
*/
|
|
|
|
|
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
|
|
|
|
|
{
|
|
|
|
|
call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
|
|
|
|
|
*
|
|
|
|
|
* Control will return to the caller some time after a trace rcu-tasks
|
|
|
|
|
* grace period has elapsed, in other words after all currently
|
|
|
|
|
* executing rcu-tasks read-side critical sections have elapsed. These
|
|
|
|
|
* read-side critical sections are delimited by calls to schedule(),
|
|
|
|
|
* cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
|
|
|
|
|
* anyway) cond_resched().
|
|
|
|
|
*
|
|
|
|
|
* This is a very specialized primitive, intended only for a few uses in
|
|
|
|
|
* tracing and other situations requiring manipulation of function preambles
|
|
|
|
|
* and profiling hooks. The synchronize_rcu_tasks_trace() function is not
|
|
|
|
|
* (yet) intended for heavy use from multiple CPUs.
|
|
|
|
|
*
|
|
|
|
|
* See the description of synchronize_rcu() for more detailed information
|
|
|
|
|
* on memory ordering guarantees.
|
|
|
|
|
*/
|
|
|
|
|
void synchronize_rcu_tasks_trace(void)
|
|
|
|
|
{
|
|
|
|
|
RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
|
|
|
|
|
synchronize_rcu_tasks_generic(&rcu_tasks_trace);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
|
|
|
|
|
*
|
|
|
|
|
* Although the current implementation is guaranteed to wait, it is not
|
|
|
|
|
* obligated to, for example, if there are no pending callbacks.
|
|
|
|
|
*/
|
|
|
|
|
void rcu_barrier_tasks_trace(void)
|
|
|
|
|
{
|
|
|
|
|
/* There is only one callback queue, so this is easy. ;-) */
|
|
|
|
|
synchronize_rcu_tasks_trace();
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
|
|
|
|
|
|
|
|
|
|
static int __init rcu_spawn_tasks_trace_kthread(void)
|
|
|
|
|
{
|
|
|
|
|
rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
|
|
|
|
|
rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
|
|
|
|
|
rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
|
|
|
|
|
rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
|
|
|
|
|
rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
|
|
|
|
|
rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
core_initcall(rcu_spawn_tasks_trace_kthread);
|
|
|
|
|
|
|
|
|
|
#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
|
|
|
|
void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
|
|
|
|
|
#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
|
|
|
|
|