Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull RCU updates from Ingo Molnar: "This cycle's RCU changes were: - A few more RCU flavor consolidation cleanups. - Updates to RCU's list-traversal macros improving lockdep usability. - Forward-progress improvements for no-CBs CPUs: Avoid ignoring incoming callbacks during grace-period waits. - Forward-progress improvements for no-CBs CPUs: Use ->cblist structure to take advantage of others' grace periods. - Also added a small commit that avoids needlessly inflicting scheduler-clock ticks on callback-offloaded CPUs. - Forward-progress improvements for no-CBs CPUs: Reduce contention on ->nocb_lock guarding ->cblist. - Forward-progress improvements for no-CBs CPUs: Add ->nocb_bypass list to further reduce contention on ->nocb_lock guarding ->cblist. - Miscellaneous fixes. - Torture-test updates. - minor LKMM updates" * 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (86 commits) MAINTAINERS: Update from paulmck@linux.ibm.com to paulmck@kernel.org rcu: Don't include <linux/ktime.h> in rcutiny.h rcu: Allow rcu_do_batch() to dynamically adjust batch sizes rcu/nocb: Don't wake no-CBs GP kthread if timer posted under overload rcu/nocb: Reduce __call_rcu_nocb_wake() leaf rcu_node ->lock contention rcu/nocb: Reduce nocb_cb_wait() leaf rcu_node ->lock contention rcu/nocb: Advance CBs after merge in rcutree_migrate_callbacks() rcu/nocb: Avoid synchronous wakeup in __call_rcu_nocb_wake() rcu/nocb: Print no-CBs diagnostics when rcutorture writer unduly delayed rcu/nocb: EXP Check use and usefulness of ->nocb_lock_contended rcu/nocb: Add bypass callback queueing rcu/nocb: Atomic ->len field in rcu_segcblist structure rcu/nocb: Unconditionally advance and wake for excessive CBs rcu/nocb: Reduce ->nocb_lock contention with separate ->nocb_gp_lock rcu/nocb: Reduce contention at no-CBs invocation-done time rcu/nocb: Reduce contention at no-CBs registry-time CB advancement rcu/nocb: Round down for number of no-CBs grace-period kthreads rcu/nocb: Avoid ->nocb_lock capture by corresponding CPU rcu/nocb: Avoid needless wakeups of no-CBs grace-period kthread rcu/nocb: Make __call_rcu_nocb_wake() safe for many callbacks ...
This commit is contained in:
@@ -14,6 +14,9 @@
|
||||
#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
|
||||
#define __INCLUDE_LINUX_RCU_SEGCBLIST_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
/* Simple unsegmented callback lists. */
|
||||
struct rcu_cblist {
|
||||
struct rcu_head *head;
|
||||
@@ -65,8 +68,14 @@ struct rcu_segcblist {
|
||||
struct rcu_head *head;
|
||||
struct rcu_head **tails[RCU_CBLIST_NSEGS];
|
||||
unsigned long gp_seq[RCU_CBLIST_NSEGS];
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
atomic_long_t len;
|
||||
#else
|
||||
long len;
|
||||
#endif
|
||||
long len_lazy;
|
||||
u8 enabled;
|
||||
u8 offloaded;
|
||||
};
|
||||
|
||||
#define RCU_SEGCBLIST_INITIALIZER(n) \
|
||||
|
@@ -31,9 +31,7 @@ struct rcu_sync {
|
||||
*/
|
||||
static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
|
||||
{
|
||||
RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
|
||||
!rcu_read_lock_bh_held() &&
|
||||
!rcu_read_lock_sched_held(),
|
||||
RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
|
||||
"suspicious rcu_sync_is_idle() usage");
|
||||
return !READ_ONCE(rsp->gp_state); /* GP_IDLE */
|
||||
}
|
||||
|
@@ -40,6 +40,24 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
|
||||
*/
|
||||
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
|
||||
|
||||
/*
|
||||
* Check during list traversal that we are within an RCU reader
|
||||
*/
|
||||
|
||||
#define check_arg_count_one(dummy)
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU_LIST
|
||||
#define __list_check_rcu(dummy, cond, extra...) \
|
||||
({ \
|
||||
check_arg_count_one(extra); \
|
||||
RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \
|
||||
"RCU-list traversed in non-reader section!"); \
|
||||
})
|
||||
#else
|
||||
#define __list_check_rcu(dummy, cond, extra...) \
|
||||
({ check_arg_count_one(extra); })
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Insert a new entry between two known consecutive entries.
|
||||
*
|
||||
@@ -343,14 +361,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
* @cond: optional lockdep expression if called from non-RCU protection.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
#define list_for_each_entry_rcu(pos, head, member, cond...) \
|
||||
for (__list_check_rcu(dummy, ## cond, 0), \
|
||||
pos = list_entry_rcu((head)->next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
@@ -616,13 +636,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
* @cond: optional lockdep expression if called from non-RCU protection.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
|
||||
#define hlist_for_each_entry_rcu(pos, head, member, cond...) \
|
||||
for (__list_check_rcu(dummy, ## cond, 0), \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
|
||||
@@ -642,10 +664,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
|
||||
* not do any RCU debugging or tracing.
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
|
||||
for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
|
||||
for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
|
||||
pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
|
@@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void);
|
||||
int rcu_read_lock_held(void);
|
||||
int rcu_read_lock_bh_held(void);
|
||||
int rcu_read_lock_sched_held(void);
|
||||
int rcu_read_lock_any_held(void);
|
||||
|
||||
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
@@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void)
|
||||
{
|
||||
return !preemptible();
|
||||
}
|
||||
|
||||
static inline int rcu_read_lock_any_held(void)
|
||||
{
|
||||
return !preemptible();
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
@@ -476,7 +483,7 @@ do { \
|
||||
* The no-tracing version of rcu_dereference_raw() must not call
|
||||
* rcu_read_lock_held().
|
||||
*/
|
||||
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
|
||||
#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
|
||||
|
||||
/**
|
||||
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
|
||||
|
@@ -12,7 +12,7 @@
|
||||
#ifndef __LINUX_TINY_H
|
||||
#define __LINUX_TINY_H
|
||||
|
||||
#include <linux/ktime.h>
|
||||
#include <asm/param.h> /* for HZ */
|
||||
|
||||
/* Never flag non-existent other CPUs! */
|
||||
static inline bool rcu_eqs_special_set(int cpu) { return false; }
|
||||
|
Reference in New Issue
Block a user