Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull v4.20 RCU changes from Paul E. McKenney: - Documentation updates, including some good-eye catches from Joel Fernandes. - SRCU updates, most notably changes enabling call_srcu() to be invoked very early in the boot sequence. - Torture-test updates, including some preliminary work towards making rcutorture better able to find problems that result in insufficient grace-period forward progress. - Consolidate the RCU-bh, RCU-preempt, and RCU-sched flavors into a single flavor similar to RCU-sched in !PREEMPT kernels and into a single flavor similar to RCU-preempt (but also waiting on preempt-disabled sequences of code) in PREEMPT kernels. This branch also includes a refactoring of rcu_{nmi,irq}_{enter,exit}() from Byungchul Park. - Now that there is only one RCU flavor in any given running kernel, the many "rsp" pointers are no longer required, and this cleanup series removes them. - This branch carries out additional cleanups made possible by the RCU flavor consolidation, including inlining how-trivial functions, updating comments and definitions, and removing now-unneeded rcutorture scenarios. - Initial changes to RCU to better promote forward progress of grace periods, including fixing a bug found by Marius Hillenbrand and David Woodhouse, with the fix suggested by Peter Zijlstra. - Now that there is only one flavor of RCU in any running kernel, there is also only on rcu_data structure per CPU. This means that the rcu_dynticks structure can be merged into the rcu_data structure, a task taken on by this branch. This branch also contains a -rt-related fix from Mike Galbraith. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -182,7 +182,7 @@ static inline void list_replace_rcu(struct list_head *old,
|
||||
* @list: the RCU-protected list to splice
|
||||
* @prev: points to the last element of the existing list
|
||||
* @next: points to the first element of the existing list
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
* @sync: synchronize_rcu, synchronize_rcu_expedited, ...
|
||||
*
|
||||
* The list pointed to by @prev and @next can be RCU-read traversed
|
||||
* concurrently with this function.
|
||||
@@ -240,7 +240,7 @@ static inline void __list_splice_init_rcu(struct list_head *list,
|
||||
* designed for stacks.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the existing list to splice the first list into
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
* @sync: synchronize_rcu, synchronize_rcu_expedited, ...
|
||||
*/
|
||||
static inline void list_splice_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
@@ -255,7 +255,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
||||
* list, designed for queues.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the existing list to splice the first list into
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
* @sync: synchronize_rcu, synchronize_rcu_expedited, ...
|
||||
*/
|
||||
static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
@@ -359,13 +359,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu(), but requires some implicit RCU
|
||||
* read-side guarding. One example is running within a special
|
||||
* exception-time environment where preemption is disabled and where
|
||||
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
|
||||
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
|
||||
* example is when items are added to the list, but never deleted.
|
||||
* This primitive may safely run concurrently with the _rcu
|
||||
* list-mutation primitives such as list_add_rcu(), but requires some
|
||||
* implicit RCU read-side guarding. One example is running within a special
|
||||
* exception-time environment where preemption is disabled and where lockdep
|
||||
* cannot be invoked. Another example is when items are added to the list,
|
||||
* but never deleted.
|
||||
*/
|
||||
#define list_entry_lockless(ptr, type, member) \
|
||||
container_of((typeof(ptr))READ_ONCE(ptr), type, member)
|
||||
@@ -376,13 +375,12 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_struct within the struct.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu(), but requires some implicit RCU
|
||||
* read-side guarding. One example is running within a special
|
||||
* exception-time environment where preemption is disabled and where
|
||||
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
|
||||
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
|
||||
* example is when items are added to the list, but never deleted.
|
||||
* This primitive may safely run concurrently with the _rcu
|
||||
* list-mutation primitives such as list_add_rcu(), but requires some
|
||||
* implicit RCU read-side guarding. One example is running within a special
|
||||
* exception-time environment where preemption is disabled and where lockdep
|
||||
* cannot be invoked. Another example is when items are added to the list,
|
||||
* but never deleted.
|
||||
*/
|
||||
#define list_for_each_entry_lockless(pos, head, member) \
|
||||
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
|
||||
|
@@ -48,23 +48,14 @@
|
||||
#define ulong2long(a) (*(long *)(&(a)))
|
||||
|
||||
/* Exported common interfaces */
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
void call_rcu(struct rcu_head *head, rcu_callback_t func);
|
||||
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
#define call_rcu call_rcu_sched
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
|
||||
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
|
||||
void synchronize_sched(void);
|
||||
void rcu_barrier_tasks(void);
|
||||
void synchronize_rcu(void);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
|
||||
void __rcu_read_lock(void);
|
||||
void __rcu_read_unlock(void);
|
||||
void synchronize_rcu(void);
|
||||
|
||||
/*
|
||||
* Defined as a macro as it is a very low level header included from
|
||||
@@ -88,11 +79,6 @@ static inline void __rcu_read_unlock(void)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu(void)
|
||||
{
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
static inline int rcu_preempt_depth(void)
|
||||
{
|
||||
return 0;
|
||||
@@ -103,8 +89,6 @@ static inline int rcu_preempt_depth(void)
|
||||
/* Internal to kernel */
|
||||
void rcu_init(void);
|
||||
extern int rcu_scheduler_active __read_mostly;
|
||||
void rcu_sched_qs(void);
|
||||
void rcu_bh_qs(void);
|
||||
void rcu_check_callbacks(int user);
|
||||
void rcu_report_dead(unsigned int cpu);
|
||||
void rcutree_migrate_callbacks(int cpu);
|
||||
@@ -135,11 +119,10 @@ static inline void rcu_init_nohz(void) { }
|
||||
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
|
||||
* @a: Code that RCU needs to pay attention to.
|
||||
*
|
||||
* RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
|
||||
* in the inner idle loop, that is, between the rcu_idle_enter() and
|
||||
* the rcu_idle_exit() -- RCU will happily ignore any such read-side
|
||||
* critical sections. However, things like powertop need tracepoints
|
||||
* in the inner idle loop.
|
||||
* RCU read-side critical sections are forbidden in the inner idle loop,
|
||||
* that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
|
||||
* will happily ignore any such read-side critical sections. However,
|
||||
* things like powertop need tracepoints in the inner idle loop.
|
||||
*
|
||||
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
|
||||
* will tell RCU that it needs to pay attention, invoke its argument
|
||||
@@ -167,20 +150,16 @@ static inline void rcu_init_nohz(void) { }
|
||||
if (READ_ONCE((t)->rcu_tasks_holdout)) \
|
||||
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
|
||||
} while (0)
|
||||
#define rcu_note_voluntary_context_switch(t) \
|
||||
do { \
|
||||
rcu_all_qs(); \
|
||||
rcu_tasks_qs(t); \
|
||||
} while (0)
|
||||
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t)
|
||||
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
|
||||
void synchronize_rcu_tasks(void);
|
||||
void exit_tasks_rcu_start(void);
|
||||
void exit_tasks_rcu_finish(void);
|
||||
#else /* #ifdef CONFIG_TASKS_RCU */
|
||||
#define rcu_tasks_qs(t) do { } while (0)
|
||||
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
|
||||
#define call_rcu_tasks call_rcu_sched
|
||||
#define synchronize_rcu_tasks synchronize_sched
|
||||
#define rcu_note_voluntary_context_switch(t) do { } while (0)
|
||||
#define call_rcu_tasks call_rcu
|
||||
#define synchronize_rcu_tasks synchronize_rcu
|
||||
static inline void exit_tasks_rcu_start(void) { }
|
||||
static inline void exit_tasks_rcu_finish(void) { }
|
||||
#endif /* #else #ifdef CONFIG_TASKS_RCU */
|
||||
@@ -325,9 +304,8 @@ static inline void rcu_preempt_sleep_check(void) { }
|
||||
* Helper functions for rcu_dereference_check(), rcu_dereference_protected()
|
||||
* and rcu_assign_pointer(). Some of these could be folded into their
|
||||
* callers, but they are left separate in order to ease introduction of
|
||||
* multiple flavors of pointers to match the multiple flavors of RCU
|
||||
* (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
|
||||
* the future.
|
||||
* multiple pointers markings to match different RCU implementations
|
||||
* (e.g., __srcu), should this make sense in the future.
|
||||
*/
|
||||
|
||||
#ifdef __CHECKER__
|
||||
@@ -686,14 +664,9 @@ static inline void rcu_read_unlock(void)
|
||||
/**
|
||||
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
|
||||
*
|
||||
* This is equivalent of rcu_read_lock(), but to be used when updates
|
||||
* are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
|
||||
* both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
|
||||
* softirq handler to be a quiescent state, a process in RCU read-side
|
||||
* critical section must be protected by disabling softirqs. Read-side
|
||||
* critical sections in interrupt context can use just rcu_read_lock(),
|
||||
* though this should at least be commented to avoid confusing people
|
||||
* reading the code.
|
||||
* This is equivalent of rcu_read_lock(), but also disables softirqs.
|
||||
* Note that anything else that disables softirqs can also serve as
|
||||
* an RCU read-side critical section.
|
||||
*
|
||||
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
|
||||
* must occur in the same context, for example, it is illegal to invoke
|
||||
@@ -726,10 +699,9 @@ static inline void rcu_read_unlock_bh(void)
|
||||
/**
|
||||
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
|
||||
*
|
||||
* This is equivalent of rcu_read_lock(), but to be used when updates
|
||||
* are being done using call_rcu_sched() or synchronize_rcu_sched().
|
||||
* Read-side critical sections can also be introduced by anything that
|
||||
* disables preemption, including local_irq_disable() and friends.
|
||||
* This is equivalent of rcu_read_lock(), but disables preemption.
|
||||
* Read-side critical sections can also be introduced by anything else
|
||||
* that disables preemption, including local_irq_disable() and friends.
|
||||
*
|
||||
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
|
||||
* must occur in the same context, for example, it is illegal to invoke
|
||||
@@ -885,4 +857,96 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
|
||||
#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
|
||||
|
||||
|
||||
/* Has the specified rcu_head structure been handed to call_rcu()? */
|
||||
|
||||
/*
|
||||
* rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
|
||||
* @rhp: The rcu_head structure to initialize.
|
||||
*
|
||||
* If you intend to invoke rcu_head_after_call_rcu() to test whether a
|
||||
* given rcu_head structure has already been passed to call_rcu(), then
|
||||
* you must also invoke this rcu_head_init() function on it just after
|
||||
* allocating that structure. Calls to this function must not race with
|
||||
* calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
|
||||
*/
|
||||
static inline void rcu_head_init(struct rcu_head *rhp)
|
||||
{
|
||||
rhp->func = (rcu_callback_t)~0L;
|
||||
}
|
||||
|
||||
/*
|
||||
* rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()?
|
||||
* @rhp: The rcu_head structure to test.
|
||||
* @func: The function passed to call_rcu() along with @rhp.
|
||||
*
|
||||
* Returns @true if the @rhp has been passed to call_rcu() with @func,
|
||||
* and @false otherwise. Emits a warning in any other case, including
|
||||
* the case where @rhp has already been invoked after a grace period.
|
||||
* Calls to this function must not race with callback invocation. One way
|
||||
* to avoid such races is to enclose the call to rcu_head_after_call_rcu()
|
||||
* in an RCU read-side critical section that includes a read-side fetch
|
||||
* of the pointer to the structure containing @rhp.
|
||||
*/
|
||||
static inline bool
|
||||
rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
|
||||
{
|
||||
if (READ_ONCE(rhp->func) == f)
|
||||
return true;
|
||||
WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/* Transitional pre-consolidation compatibility definitions. */
|
||||
|
||||
static inline void synchronize_rcu_bh(void)
|
||||
{
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu_bh_expedited(void)
|
||||
{
|
||||
synchronize_rcu_expedited();
|
||||
}
|
||||
|
||||
static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
|
||||
{
|
||||
call_rcu(head, func);
|
||||
}
|
||||
|
||||
static inline void rcu_barrier_bh(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
}
|
||||
|
||||
static inline void synchronize_sched(void)
|
||||
{
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
static inline void synchronize_sched_expedited(void)
|
||||
{
|
||||
synchronize_rcu_expedited();
|
||||
}
|
||||
|
||||
static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
|
||||
{
|
||||
call_rcu(head, func);
|
||||
}
|
||||
|
||||
static inline void rcu_barrier_sched(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
}
|
||||
|
||||
static inline unsigned long get_state_synchronize_sched(void)
|
||||
{
|
||||
return get_state_synchronize_rcu();
|
||||
}
|
||||
|
||||
static inline void cond_synchronize_sched(unsigned long oldstate)
|
||||
{
|
||||
cond_synchronize_rcu(oldstate);
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUPDATE_H */
|
||||
|
@@ -33,17 +33,17 @@ do { \
|
||||
|
||||
/**
|
||||
* synchronize_rcu_mult - Wait concurrently for multiple grace periods
|
||||
* @...: List of call_rcu() functions for the flavors to wait on.
|
||||
* @...: List of call_rcu() functions for different grace periods to wait on
|
||||
*
|
||||
* This macro waits concurrently for multiple flavors of RCU grace periods.
|
||||
* For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
|
||||
* on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU
|
||||
* This macro waits concurrently for multiple types of RCU grace periods.
|
||||
* For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
|
||||
* on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
|
||||
* domain requires you to write a wrapper function for that SRCU domain's
|
||||
* call_srcu() function, supplying the corresponding srcu_struct.
|
||||
*
|
||||
* If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
|
||||
* or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
|
||||
* is automatically a grace period.
|
||||
* If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
|
||||
* given that anywhere synchronize_rcu_mult() can be called is automatically
|
||||
* a grace period.
|
||||
*/
|
||||
#define synchronize_rcu_mult(...) \
|
||||
_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
|
||||
|
@@ -27,12 +27,6 @@
|
||||
|
||||
#include <linux/ktime.h>
|
||||
|
||||
struct rcu_dynticks;
|
||||
static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Never flag non-existent other CPUs! */
|
||||
static inline bool rcu_eqs_special_set(int cpu) { return false; }
|
||||
|
||||
@@ -46,53 +40,28 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
|
||||
might_sleep();
|
||||
}
|
||||
|
||||
static inline unsigned long get_state_synchronize_sched(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void cond_synchronize_sched(unsigned long oldstate)
|
||||
{
|
||||
might_sleep();
|
||||
}
|
||||
|
||||
extern void rcu_barrier_bh(void);
|
||||
extern void rcu_barrier_sched(void);
|
||||
extern void rcu_barrier(void);
|
||||
|
||||
static inline void synchronize_rcu_expedited(void)
|
||||
{
|
||||
synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
static inline void rcu_barrier(void)
|
||||
{
|
||||
rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu_bh(void)
|
||||
{
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu_bh_expedited(void)
|
||||
{
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
static inline void synchronize_sched_expedited(void)
|
||||
{
|
||||
synchronize_sched();
|
||||
}
|
||||
|
||||
static inline void kfree_call_rcu(struct rcu_head *head,
|
||||
rcu_callback_t func)
|
||||
static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
{
|
||||
call_rcu(head, func);
|
||||
}
|
||||
|
||||
void rcu_qs(void);
|
||||
|
||||
static inline void rcu_softirq_qs(void)
|
||||
{
|
||||
rcu_qs();
|
||||
}
|
||||
|
||||
#define rcu_note_context_switch(preempt) \
|
||||
do { \
|
||||
rcu_sched_qs(); \
|
||||
rcu_qs(); \
|
||||
rcu_tasks_qs(current); \
|
||||
} while (0)
|
||||
|
||||
@@ -108,6 +77,7 @@ static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
*/
|
||||
static inline void rcu_virt_note_context_switch(int cpu) { }
|
||||
static inline void rcu_cpu_stall_reset(void) { }
|
||||
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
|
||||
static inline void rcu_idle_enter(void) { }
|
||||
static inline void rcu_idle_exit(void) { }
|
||||
static inline void rcu_irq_enter(void) { }
|
||||
@@ -115,6 +85,11 @@ static inline void rcu_irq_exit_irqson(void) { }
|
||||
static inline void rcu_irq_enter_irqson(void) { }
|
||||
static inline void rcu_irq_exit(void) { }
|
||||
static inline void exit_rcu(void) { }
|
||||
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
|
||||
#ifdef CONFIG_SRCU
|
||||
void rcu_scheduler_starting(void);
|
||||
#else /* #ifndef CONFIG_SRCU */
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#ifndef __LINUX_RCUTREE_H
|
||||
#define __LINUX_RCUTREE_H
|
||||
|
||||
void rcu_softirq_qs(void);
|
||||
void rcu_note_context_switch(bool preempt);
|
||||
int rcu_needs_cpu(u64 basem, u64 *nextevt);
|
||||
void rcu_cpu_stall_reset(void);
|
||||
@@ -44,41 +45,13 @@ static inline void rcu_virt_note_context_switch(int cpu)
|
||||
rcu_note_context_switch(false);
|
||||
}
|
||||
|
||||
void synchronize_rcu_bh(void);
|
||||
void synchronize_sched_expedited(void);
|
||||
void synchronize_rcu_expedited(void);
|
||||
|
||||
void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
|
||||
|
||||
/**
|
||||
* synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
|
||||
*
|
||||
* Wait for an RCU-bh grace period to elapse, but use a "big hammer"
|
||||
* approach to force the grace period to end quickly. This consumes
|
||||
* significant time on all CPUs and is unfriendly to real-time workloads,
|
||||
* so is thus not recommended for any sort of common-case code. In fact,
|
||||
* if you are using synchronize_rcu_bh_expedited() in a loop, please
|
||||
* restructure your code to batch your updates, and then use a single
|
||||
* synchronize_rcu_bh() instead.
|
||||
*
|
||||
* Note that it is illegal to call this function while holding any lock
|
||||
* that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
|
||||
* to call this function from a CPU-hotplug notifier. Failing to observe
|
||||
* these restriction will result in deadlock.
|
||||
*/
|
||||
static inline void synchronize_rcu_bh_expedited(void)
|
||||
{
|
||||
synchronize_sched_expedited();
|
||||
}
|
||||
|
||||
void rcu_barrier(void);
|
||||
void rcu_barrier_bh(void);
|
||||
void rcu_barrier_sched(void);
|
||||
bool rcu_eqs_special_set(int cpu);
|
||||
unsigned long get_state_synchronize_rcu(void);
|
||||
void cond_synchronize_rcu(unsigned long oldstate);
|
||||
unsigned long get_state_synchronize_sched(void);
|
||||
void cond_synchronize_sched(unsigned long oldstate);
|
||||
|
||||
void rcu_idle_enter(void);
|
||||
void rcu_idle_exit(void);
|
||||
@@ -93,7 +66,9 @@ void rcu_scheduler_starting(void);
|
||||
extern int rcu_scheduler_active __read_mostly;
|
||||
void rcu_end_inkernel_boot(void);
|
||||
bool rcu_is_watching(void);
|
||||
#ifndef CONFIG_PREEMPT
|
||||
void rcu_all_qs(void);
|
||||
#endif
|
||||
|
||||
/* RCUtree hotplug events */
|
||||
int rcutree_prepare_cpu(unsigned int cpu);
|
||||
|
@@ -571,12 +571,8 @@ union rcu_special {
|
||||
struct {
|
||||
u8 blocked;
|
||||
u8 need_qs;
|
||||
u8 exp_need_qs;
|
||||
|
||||
/* Otherwise the compiler can store garbage here: */
|
||||
u8 pad;
|
||||
} b; /* Bits. */
|
||||
u32 s; /* Set of bits. */
|
||||
u16 s; /* Set of bits. */
|
||||
};
|
||||
|
||||
enum perf_event_task_context {
|
||||
|
@@ -105,12 +105,13 @@ struct srcu_struct {
|
||||
#define SRCU_STATE_SCAN2 2
|
||||
|
||||
#define __SRCU_STRUCT_INIT(name, pcpu_name) \
|
||||
{ \
|
||||
.sda = &pcpu_name, \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.srcu_gp_seq_needed = 0 - 1, \
|
||||
__SRCU_DEP_MAP_INIT(name) \
|
||||
}
|
||||
{ \
|
||||
.sda = &pcpu_name, \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.srcu_gp_seq_needed = -1UL, \
|
||||
.work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
|
||||
__SRCU_DEP_MAP_INIT(name) \
|
||||
}
|
||||
|
||||
/*
|
||||
* Define and initialize a srcu struct at build time.
|
||||
|
@@ -77,7 +77,7 @@ void torture_shutdown_absorb(const char *title);
|
||||
int torture_shutdown_init(int ssecs, void (*cleanup)(void));
|
||||
|
||||
/* Task stuttering, which forces load/no-load transitions. */
|
||||
void stutter_wait(const char *title);
|
||||
bool stutter_wait(const char *title);
|
||||
int torture_stutter_init(int s);
|
||||
|
||||
/* Initialization and cleanup. */
|
||||
|
Reference in New Issue
Block a user