Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Thomas Gleixner:
"A large update to RCU:
Preparatory work for consolidating the RCU flavors:
- Introduce grace-period sequence numbers to the RCU-bh, RCU-preempt,
and RCU-sched flavors, replacing the old ->gpnum and ->completed
pair of fields.
This change allows lockless code to obtain the complete
grace-period state with a single READ_ONCE(), which is needed to
maintain tolerable lock contention during the upcoming
consolidation of the three RCU flavors.
Note that grace-period sequence numbers are already used by
rcu_barrier(), expedited RCU grace periods, and SRCU, and are thus
already heavily used and well-tested. Joel Fernandes contributed a
number of excellent fixes and improvements.
- Clean up some grace-period-reporting loose ends, including
improving the handling of quiescent states from offline CPUs and
fixing some false-positive WARN_ON_ONCE() invocations.
(Strictly speaking, the WARN_ON_ONCE() invocations were quite
correct, but their invariants were (harmlessly) violated by the
earlier sloppy handling of quiescent states from offline CPUs.)
In addition, improve grace-period forward-progress guarantees so as
to allow removal of fail-safe checks that required otherwise
needless lock acquisitions. Finally, add more diagnostics to help
debug the upcoming consolidation of the RCU-bh, RCU-preempt, and
RCU-sched flavors.
The rest:
- SRCU updates
- Updates to rcutorture and associated scripting.
- The usual pile of miscellaneous fixes"
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (118 commits)
rcutorture: Fix rcu_barrier successes counter
rcutorture: Add support to detect if boost kthread prio is too low
rcutorture: Use monotonic timestamp for stall detection
rcutorture: Make boost test more robust
rcutorture: Disable RT throttling for boost tests
rcutorture: Emphasize testing of single reader protection type
rcutorture: Handle extended read-side critical sections
rcutorture: Make rcu_torture_timer() use rcu_torture_one_read()
rcutorture: Use per-CPU random state for rcu_torture_timer()
rcutorture: Use atomic increment for n_rcu_torture_timers
rcutorture: Extract common code from rcu_torture_reader()
rcuperf: Remove unused torturing_tasks() function
rcu: Remove rcutorture test version and sequence number
rcutorture: Change units of onoff_interval to jiffies
rcu: Assign higher prio to RCU threads if rcutorture is built-in
rculist: Improve documentation for list_for_each_entry_from_rcu()
srcu: Add grace-period number to rcutorture statistics printout
rcu: Print stall-warning NMI dyntick state in hexadecimal
MAINTAINERS: Update RCU, SRCU, and TORTURE-TEST entries
rcu: Make rcu_seq_diff() more exact
...
This commit is contained in:
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Continue to iterate over list of given type, continuing after
|
||||
* the current position.
|
||||
* the current position which must have been in the list when the RCU read
|
||||
* lock was taken.
|
||||
* This would typically require either that you obtained the node from a
|
||||
* previous walk of the list in the same RCU read-side critical section, or
|
||||
* that you held some sort of non-RCU reference (such as a reference count)
|
||||
* to keep the node alive *and* in the list.
|
||||
*
|
||||
* This iterator is similar to list_for_each_entry_from_rcu() except
|
||||
* this starts after the given position and that one starts at the given
|
||||
* position.
|
||||
*/
|
||||
#define list_for_each_entry_continue_rcu(pos, head, member) \
|
||||
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
|
||||
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
*
|
||||
* Iterate over the tail of a list starting from a given position,
|
||||
* which must have been in the list when the RCU read lock was taken.
|
||||
* This would typically require either that you obtained the node from a
|
||||
* previous walk of the list in the same RCU read-side critical section, or
|
||||
* that you held some sort of non-RCU reference (such as a reference count)
|
||||
* to keep the node alive *and* in the list.
|
||||
*
|
||||
* This iterator is similar to list_for_each_entry_continue_rcu() except
|
||||
* this starts from the given position and that one starts from the position
|
||||
* after the given position.
|
||||
*/
|
||||
#define list_for_each_entry_from_rcu(pos, head, member) \
|
||||
for (; &(pos)->member != (head); \
|
||||
|
||||
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
|
||||
|
||||
void __rcu_read_lock(void);
|
||||
void __rcu_read_unlock(void);
|
||||
void rcu_read_unlock_special(struct task_struct *t);
|
||||
void synchronize_rcu(void);
|
||||
|
||||
/*
|
||||
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Note a voluntary context switch for RCU-tasks benefit. This is a
|
||||
* macro rather than an inline function to avoid #include hell.
|
||||
* Note a quasi-voluntary context switch for RCU-tasks's benefit.
|
||||
* This is a macro rather than an inline function to avoid #include hell.
|
||||
*/
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
#define rcu_note_voluntary_context_switch_lite(t) \
|
||||
#define rcu_tasks_qs(t) \
|
||||
do { \
|
||||
if (READ_ONCE((t)->rcu_tasks_holdout)) \
|
||||
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
|
||||
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
|
||||
#define rcu_note_voluntary_context_switch(t) \
|
||||
do { \
|
||||
rcu_all_qs(); \
|
||||
rcu_note_voluntary_context_switch_lite(t); \
|
||||
rcu_tasks_qs(t); \
|
||||
} while (0)
|
||||
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
|
||||
void synchronize_rcu_tasks(void);
|
||||
void exit_tasks_rcu_start(void);
|
||||
void exit_tasks_rcu_finish(void);
|
||||
#else /* #ifdef CONFIG_TASKS_RCU */
|
||||
#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
|
||||
#define rcu_tasks_qs(t) do { } while (0)
|
||||
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
|
||||
#define call_rcu_tasks call_rcu_sched
|
||||
#define synchronize_rcu_tasks synchronize_sched
|
||||
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
|
||||
*/
|
||||
#define cond_resched_tasks_rcu_qs() \
|
||||
do { \
|
||||
if (!cond_resched()) \
|
||||
rcu_note_voluntary_context_switch_lite(current); \
|
||||
rcu_tasks_qs(current); \
|
||||
cond_resched(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
|
||||
* This is simply an identity function, but it documents where a pointer
|
||||
* is handed off from RCU to some other synchronization mechanism, for
|
||||
* example, reference counting or locking. In C11, it would map to
|
||||
* kill_dependency(). It could be used as follows:
|
||||
* ``
|
||||
* kill_dependency(). It could be used as follows::
|
||||
*
|
||||
* rcu_read_lock();
|
||||
* p = rcu_dereference(gp);
|
||||
* long_lived = is_long_lived(p);
|
||||
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
|
||||
* p = rcu_pointer_handoff(p);
|
||||
* }
|
||||
* rcu_read_unlock();
|
||||
*``
|
||||
*/
|
||||
#define rcu_pointer_handoff(p) (p)
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
|
||||
#define rcu_note_context_switch(preempt) \
|
||||
do { \
|
||||
rcu_sched_qs(); \
|
||||
rcu_note_voluntary_context_switch_lite(current); \
|
||||
rcu_tasks_qs(current); \
|
||||
} while (0)
|
||||
|
||||
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
|
||||
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
|
||||
static inline notrace int
|
||||
srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = __srcu_read_lock(sp);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
|
||||
* @sp: srcu_struct in which to unregister the old reader.
|
||||
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
|
||||
__srcu_read_unlock(sp, idx);
|
||||
}
|
||||
|
||||
/* Used by tracing, cannot be traced and cannot call lockdep. */
|
||||
static inline notrace void
|
||||
srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
|
||||
{
|
||||
__srcu_read_unlock(sp, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
|
||||
*
|
||||
|
||||
@@ -64,6 +64,8 @@ struct torture_random_state {
|
||||
long trs_count;
|
||||
};
|
||||
#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
|
||||
#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
|
||||
DEFINE_PER_CPU(struct torture_random_state, name)
|
||||
unsigned long torture_random(struct torture_random_state *trsp);
|
||||
|
||||
/* Task shuffler, which causes CPUs to occasionally go idle. */
|
||||
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
|
||||
int torture_stutter_init(int s);
|
||||
|
||||
/* Initialization and cleanup. */
|
||||
bool torture_init_begin(char *ttype, bool v);
|
||||
bool torture_init_begin(char *ttype, int v);
|
||||
void torture_init_end(void);
|
||||
bool torture_cleanup_begin(void);
|
||||
void torture_cleanup_end(void);
|
||||
|
||||
Reference in New Issue
Block a user