Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU changes from Paul E. McKenney: "The major changes for this series are: 1. Simplify RCU's grace-period and callback processing based on the new numbering for callbacks. These were posted to LKML at https://lkml.org/lkml/2013/5/20/330. 2. Documentation updates. These were posted to LKML at https://lkml.org/lkml/2013/5/20/348. 3. Miscellaneous fixes, including converting a few remaining printk() calls to pr_*(). These were posted to LKML at https://lkml.org/lkml/2013/5/20/324. 4. SRCU-related changes and fixes. These were posted to LKML at https://lkml.org/lkml/2013/5/20/425. 5. Removal of TINY_PREEMPT_RCU in favor of TREE_PREEMPT_RCU for single-CPU low-latency systems. These were posted to LKML at https://lkml.org/lkml/2013/5/20/427." Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -104,31 +104,7 @@ void __rcu_read_unlock(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
|
||||
|
||||
/*
|
||||
* Check for a task exiting while in a preemptible-RCU read-side
|
||||
* critical section, clean up if so. No need to issue warnings,
|
||||
* as debug_check_no_locks_held() already does this if lockdep
|
||||
* is enabled.
|
||||
*/
|
||||
void exit_rcu(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
if (likely(list_empty(¤t->rcu_node_entry)))
|
||||
return;
|
||||
t->rcu_read_lock_nesting = 1;
|
||||
barrier();
|
||||
t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
|
||||
__rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
void exit_rcu(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
||||
#endif /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
static struct lock_class_key rcu_lock_key;
|
||||
@@ -145,9 +121,6 @@ static struct lock_class_key rcu_sched_lock_key;
|
||||
struct lockdep_map rcu_sched_lock_map =
|
||||
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
|
||||
EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
int debug_lockdep_rcu_enabled(void)
|
||||
{
|
||||
|
@@ -44,7 +44,6 @@
|
||||
|
||||
/* Forward declarations for rcutiny_plugin.h. */
|
||||
struct rcu_ctrlblk;
|
||||
static void invoke_rcu_callbacks(void);
|
||||
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
|
||||
static void rcu_process_callbacks(struct softirq_action *unused);
|
||||
static void __call_rcu(struct rcu_head *head,
|
||||
@@ -205,7 +204,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
|
||||
*/
|
||||
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
|
||||
{
|
||||
reset_cpu_stall_ticks(rcp);
|
||||
RCU_TRACE(reset_cpu_stall_ticks(rcp));
|
||||
if (rcp->rcucblist != NULL &&
|
||||
rcp->donetail != rcp->curtail) {
|
||||
rcp->donetail = rcp->curtail;
|
||||
@@ -227,7 +226,7 @@ void rcu_sched_qs(int cpu)
|
||||
local_irq_save(flags);
|
||||
if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
|
||||
rcu_qsctr_help(&rcu_bh_ctrlblk))
|
||||
invoke_rcu_callbacks();
|
||||
raise_softirq(RCU_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -240,7 +239,7 @@ void rcu_bh_qs(int cpu)
|
||||
|
||||
local_irq_save(flags);
|
||||
if (rcu_qsctr_help(&rcu_bh_ctrlblk))
|
||||
invoke_rcu_callbacks();
|
||||
raise_softirq(RCU_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -252,12 +251,11 @@ void rcu_bh_qs(int cpu)
|
||||
*/
|
||||
void rcu_check_callbacks(int cpu, int user)
|
||||
{
|
||||
check_cpu_stalls();
|
||||
RCU_TRACE(check_cpu_stalls());
|
||||
if (user || rcu_is_cpu_rrupt_from_idle())
|
||||
rcu_sched_qs(cpu);
|
||||
else if (!in_softirq())
|
||||
rcu_bh_qs(cpu);
|
||||
rcu_preempt_check_callbacks();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -278,7 +276,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
|
||||
ACCESS_ONCE(rcp->rcucblist),
|
||||
need_resched(),
|
||||
is_idle_task(current),
|
||||
rcu_is_callbacks_kthread()));
|
||||
false));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -290,7 +288,6 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
|
||||
*rcp->donetail = NULL;
|
||||
if (rcp->curtail == rcp->donetail)
|
||||
rcp->curtail = &rcp->rcucblist;
|
||||
rcu_preempt_remove_callbacks(rcp);
|
||||
rcp->donetail = &rcp->rcucblist;
|
||||
local_irq_restore(flags);
|
||||
|
||||
@@ -309,14 +306,13 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
|
||||
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
|
||||
RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
|
||||
is_idle_task(current),
|
||||
rcu_is_callbacks_kthread()));
|
||||
false));
|
||||
}
|
||||
|
||||
static void rcu_process_callbacks(struct softirq_action *unused)
|
||||
{
|
||||
__rcu_process_callbacks(&rcu_sched_ctrlblk);
|
||||
__rcu_process_callbacks(&rcu_bh_ctrlblk);
|
||||
rcu_preempt_process_callbacks();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -382,3 +378,8 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
||||
__call_rcu(head, func, &rcu_bh_ctrlblk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(call_rcu_bh);
|
||||
|
||||
void rcu_init(void)
|
||||
{
|
||||
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -695,44 +695,6 @@ static struct rcu_torture_ops srcu_sync_ops = {
|
||||
.name = "srcu_sync"
|
||||
};
|
||||
|
||||
static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl)
|
||||
{
|
||||
return srcu_read_lock_raw(&srcu_ctl);
|
||||
}
|
||||
|
||||
static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
|
||||
{
|
||||
srcu_read_unlock_raw(&srcu_ctl, idx);
|
||||
}
|
||||
|
||||
static struct rcu_torture_ops srcu_raw_ops = {
|
||||
.init = rcu_sync_torture_init,
|
||||
.readlock = srcu_torture_read_lock_raw,
|
||||
.read_delay = srcu_read_delay,
|
||||
.readunlock = srcu_torture_read_unlock_raw,
|
||||
.completed = srcu_torture_completed,
|
||||
.deferred_free = srcu_torture_deferred_free,
|
||||
.sync = srcu_torture_synchronize,
|
||||
.call = NULL,
|
||||
.cb_barrier = NULL,
|
||||
.stats = srcu_torture_stats,
|
||||
.name = "srcu_raw"
|
||||
};
|
||||
|
||||
static struct rcu_torture_ops srcu_raw_sync_ops = {
|
||||
.init = rcu_sync_torture_init,
|
||||
.readlock = srcu_torture_read_lock_raw,
|
||||
.read_delay = srcu_read_delay,
|
||||
.readunlock = srcu_torture_read_unlock_raw,
|
||||
.completed = srcu_torture_completed,
|
||||
.deferred_free = rcu_sync_torture_deferred_free,
|
||||
.sync = srcu_torture_synchronize,
|
||||
.call = NULL,
|
||||
.cb_barrier = NULL,
|
||||
.stats = srcu_torture_stats,
|
||||
.name = "srcu_raw_sync"
|
||||
};
|
||||
|
||||
static void srcu_torture_synchronize_expedited(void)
|
||||
{
|
||||
synchronize_srcu_expedited(&srcu_ctl);
|
||||
@@ -1983,7 +1945,6 @@ rcu_torture_init(void)
|
||||
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
|
||||
&rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
|
||||
&srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
|
||||
&srcu_raw_ops, &srcu_raw_sync_ops,
|
||||
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
|
||||
|
||||
mutex_lock(&fullstop_mutex);
|
||||
|
168
kernel/rcutree.c
168
kernel/rcutree.c
@@ -218,8 +218,8 @@ module_param(blimit, long, 0444);
|
||||
module_param(qhimark, long, 0444);
|
||||
module_param(qlowmark, long, 0444);
|
||||
|
||||
static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;
|
||||
static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS;
|
||||
static ulong jiffies_till_first_fqs = ULONG_MAX;
|
||||
static ulong jiffies_till_next_fqs = ULONG_MAX;
|
||||
|
||||
module_param(jiffies_till_first_fqs, ulong, 0644);
|
||||
module_param(jiffies_till_next_fqs, ulong, 0644);
|
||||
@@ -866,7 +866,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
||||
* See Documentation/RCU/stallwarn.txt for info on how to debug
|
||||
* RCU CPU stall warnings.
|
||||
*/
|
||||
printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks:",
|
||||
pr_err("INFO: %s detected stalls on CPUs/tasks:",
|
||||
rsp->name);
|
||||
print_cpu_stall_info_begin();
|
||||
rcu_for_each_leaf_node(rsp, rnp) {
|
||||
@@ -899,7 +899,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
|
||||
smp_processor_id(), (long)(jiffies - rsp->gp_start),
|
||||
rsp->gpnum, rsp->completed, totqlen);
|
||||
if (ndetected == 0)
|
||||
printk(KERN_ERR "INFO: Stall ended before state dump start\n");
|
||||
pr_err("INFO: Stall ended before state dump start\n");
|
||||
else if (!trigger_all_cpu_backtrace())
|
||||
rcu_dump_cpu_stacks(rsp);
|
||||
|
||||
@@ -922,7 +922,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
||||
* See Documentation/RCU/stallwarn.txt for info on how to debug
|
||||
* RCU CPU stall warnings.
|
||||
*/
|
||||
printk(KERN_ERR "INFO: %s self-detected stall on CPU", rsp->name);
|
||||
pr_err("INFO: %s self-detected stall on CPU", rsp->name);
|
||||
print_cpu_stall_info_begin();
|
||||
print_cpu_stall_info(rsp, smp_processor_id());
|
||||
print_cpu_stall_info_end();
|
||||
@@ -984,65 +984,6 @@ void rcu_cpu_stall_reset(void)
|
||||
rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update CPU-local rcu_data state to record the newly noticed grace period.
|
||||
* This is used both when we started the grace period and when we notice
|
||||
* that someone else started the grace period. The caller must hold the
|
||||
* ->lock of the leaf rcu_node structure corresponding to the current CPU,
|
||||
* and must have irqs disabled.
|
||||
*/
|
||||
static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
{
|
||||
if (rdp->gpnum != rnp->gpnum) {
|
||||
/*
|
||||
* If the current grace period is waiting for this CPU,
|
||||
* set up to detect a quiescent state, otherwise don't
|
||||
* go looking for one.
|
||||
*/
|
||||
rdp->gpnum = rnp->gpnum;
|
||||
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
|
||||
rdp->passed_quiesce = 0;
|
||||
rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
|
||||
zero_cpu_stall_ticks(rdp);
|
||||
}
|
||||
}
|
||||
|
||||
static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
local_irq_save(flags);
|
||||
rnp = rdp->mynode;
|
||||
if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
|
||||
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
__note_new_gpnum(rsp, rnp, rdp);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Did someone else start a new RCU grace period start since we last
|
||||
* checked? Update local state appropriately if so. Must be called
|
||||
* on the CPU corresponding to rdp.
|
||||
*/
|
||||
static int
|
||||
check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (rdp->gpnum != rsp->gpnum) {
|
||||
note_new_gpnum(rsp, rdp);
|
||||
ret = 1;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the specified rcu_data structure's callback list to empty.
|
||||
*/
|
||||
@@ -1313,18 +1254,16 @@ static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
}
|
||||
|
||||
/*
|
||||
* Advance this CPU's callbacks, but only if the current grace period
|
||||
* has ended. This may be called only from the CPU to whom the rdp
|
||||
* belongs. In addition, the corresponding leaf rcu_node structure's
|
||||
* ->lock must be held by the caller, with irqs disabled.
|
||||
* Update CPU-local rcu_data state to record the beginnings and ends of
|
||||
* grace periods. The caller must hold the ->lock of the leaf rcu_node
|
||||
* structure corresponding to the current CPU, and must have irqs disabled.
|
||||
*/
|
||||
static void
|
||||
__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
{
|
||||
/* Did another grace period end? */
|
||||
/* Handle the ends of any preceding grace periods first. */
|
||||
if (rdp->completed == rnp->completed) {
|
||||
|
||||
/* No, so just accelerate recent callbacks. */
|
||||
/* No grace period end, so just accelerate recent callbacks. */
|
||||
rcu_accelerate_cbs(rsp, rnp, rdp);
|
||||
|
||||
} else {
|
||||
@@ -1335,67 +1274,39 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
|
||||
/* Remember that we saw this grace-period completion. */
|
||||
rdp->completed = rnp->completed;
|
||||
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
|
||||
}
|
||||
|
||||
if (rdp->gpnum != rnp->gpnum) {
|
||||
/*
|
||||
* If we were in an extended quiescent state, we may have
|
||||
* missed some grace periods that others CPUs handled on
|
||||
* our behalf. Catch up with this state to avoid noting
|
||||
* spurious new grace periods. If another grace period
|
||||
* has started, then rnp->gpnum will have advanced, so
|
||||
* we will detect this later on. Of course, any quiescent
|
||||
* states we found for the old GP are now invalid.
|
||||
* If the current grace period is waiting for this CPU,
|
||||
* set up to detect a quiescent state, otherwise don't
|
||||
* go looking for one.
|
||||
*/
|
||||
if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) {
|
||||
rdp->gpnum = rdp->completed;
|
||||
rdp->passed_quiesce = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If RCU does not need a quiescent state from this CPU,
|
||||
* then make sure that this CPU doesn't go looking for one.
|
||||
*/
|
||||
if ((rnp->qsmask & rdp->grpmask) == 0)
|
||||
rdp->qs_pending = 0;
|
||||
rdp->gpnum = rnp->gpnum;
|
||||
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
|
||||
rdp->passed_quiesce = 0;
|
||||
rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
|
||||
zero_cpu_stall_ticks(rdp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Advance this CPU's callbacks, but only if the current grace period
|
||||
* has ended. This may be called only from the CPU to whom the rdp
|
||||
* belongs.
|
||||
*/
|
||||
static void
|
||||
rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
local_irq_save(flags);
|
||||
rnp = rdp->mynode;
|
||||
if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
|
||||
if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
|
||||
rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
|
||||
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
__rcu_process_gp_end(rsp, rnp, rdp);
|
||||
__note_gp_changes(rsp, rnp, rdp);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do per-CPU grace-period initialization for running CPU. The caller
|
||||
* must hold the lock of the leaf rcu_node structure corresponding to
|
||||
* this CPU.
|
||||
*/
|
||||
static void
|
||||
rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
{
|
||||
/* Prior grace period ended, so advance callbacks for current CPU. */
|
||||
__rcu_process_gp_end(rsp, rnp, rdp);
|
||||
|
||||
/* Set state so that this CPU will detect the next quiescent state. */
|
||||
__note_new_gpnum(rsp, rnp, rdp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize a new grace period.
|
||||
*/
|
||||
@@ -1444,7 +1355,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
||||
WARN_ON_ONCE(rnp->completed != rsp->completed);
|
||||
ACCESS_ONCE(rnp->completed) = rsp->completed;
|
||||
if (rnp == rdp->mynode)
|
||||
rcu_start_gp_per_cpu(rsp, rnp, rdp);
|
||||
__note_gp_changes(rsp, rnp, rdp);
|
||||
rcu_preempt_boost_start_gp(rnp);
|
||||
trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
|
||||
rnp->level, rnp->grplo,
|
||||
@@ -1527,7 +1438,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
||||
ACCESS_ONCE(rnp->completed) = rsp->gpnum;
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
if (rnp == rdp->mynode)
|
||||
__rcu_process_gp_end(rsp, rnp, rdp);
|
||||
__note_gp_changes(rsp, rnp, rdp);
|
||||
nocb += rcu_future_gp_cleanup(rsp, rnp);
|
||||
raw_spin_unlock_irq(&rnp->lock);
|
||||
cond_resched();
|
||||
@@ -1805,9 +1716,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
static void
|
||||
rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
{
|
||||
/* If there is now a new grace period, record and return. */
|
||||
if (check_for_new_grace_period(rsp, rdp))
|
||||
return;
|
||||
/* Check for grace-period ends and beginnings. */
|
||||
note_gp_changes(rsp, rdp);
|
||||
|
||||
/*
|
||||
* Does this CPU still need to do its part for current grace period?
|
||||
@@ -2271,9 +2181,6 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
||||
|
||||
WARN_ON_ONCE(rdp->beenonline == 0);
|
||||
|
||||
/* Handle the end of a grace period that some other CPU ended. */
|
||||
rcu_process_gp_end(rsp, rdp);
|
||||
|
||||
/* Update RCU state based on any recent quiescent states. */
|
||||
rcu_check_quiescent_state(rsp, rdp);
|
||||
|
||||
@@ -2358,8 +2265,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
||||
if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
|
||||
|
||||
/* Are we ignoring a completed grace period? */
|
||||
rcu_process_gp_end(rsp, rdp);
|
||||
check_for_new_grace_period(rsp, rdp);
|
||||
note_gp_changes(rsp, rdp);
|
||||
|
||||
/* Start a new grace period if one not already started. */
|
||||
if (!rcu_gp_in_progress(rsp)) {
|
||||
@@ -3265,11 +3171,25 @@ static void __init rcu_init_one(struct rcu_state *rsp,
|
||||
*/
|
||||
static void __init rcu_init_geometry(void)
|
||||
{
|
||||
ulong d;
|
||||
int i;
|
||||
int j;
|
||||
int n = nr_cpu_ids;
|
||||
int rcu_capacity[MAX_RCU_LVLS + 1];
|
||||
|
||||
/*
|
||||
* Initialize any unspecified boot parameters.
|
||||
* The default values of jiffies_till_first_fqs and
|
||||
* jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
|
||||
* value, which is a function of HZ, then adding one for each
|
||||
* RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
|
||||
*/
|
||||
d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
|
||||
if (jiffies_till_first_fqs == ULONG_MAX)
|
||||
jiffies_till_first_fqs = d;
|
||||
if (jiffies_till_next_fqs == ULONG_MAX)
|
||||
jiffies_till_next_fqs = d;
|
||||
|
||||
/* If the compile-time values are accurate, just leave. */
|
||||
if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
|
||||
nr_cpu_ids == NR_CPUS)
|
||||
|
@@ -343,12 +343,17 @@ struct rcu_data {
|
||||
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
|
||||
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
|
||||
|
||||
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
|
||||
#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
|
||||
/* For jiffies_till_first_fqs and */
|
||||
/* and jiffies_till_next_fqs. */
|
||||
|
||||
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
|
||||
/* to take at least one */
|
||||
/* scheduling clock irq */
|
||||
/* before ratting on them. */
|
||||
#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */
|
||||
/* delay between bouts of */
|
||||
/* quiescent-state forcing. */
|
||||
|
||||
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */
|
||||
/* at least one scheduling clock */
|
||||
/* irq before ratting on them. */
|
||||
|
||||
#define rcu_wait(cond) \
|
||||
do { \
|
||||
|
@@ -53,38 +53,37 @@ static char __initdata nocb_buf[NR_CPUS * 5];
|
||||
static void __init rcu_bootup_announce_oddness(void)
|
||||
{
|
||||
#ifdef CONFIG_RCU_TRACE
|
||||
printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
|
||||
pr_info("\tRCU debugfs-based tracing is enabled.\n");
|
||||
#endif
|
||||
#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
|
||||
printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
|
||||
pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
|
||||
CONFIG_RCU_FANOUT);
|
||||
#endif
|
||||
#ifdef CONFIG_RCU_FANOUT_EXACT
|
||||
printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
|
||||
pr_info("\tHierarchical RCU autobalancing is disabled.\n");
|
||||
#endif
|
||||
#ifdef CONFIG_RCU_FAST_NO_HZ
|
||||
printk(KERN_INFO
|
||||
"\tRCU dyntick-idle grace-period acceleration is enabled.\n");
|
||||
pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
|
||||
#endif
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
|
||||
pr_info("\tRCU lockdep checking is enabled.\n");
|
||||
#endif
|
||||
#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
|
||||
printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
|
||||
pr_info("\tRCU torture testing starts during boot.\n");
|
||||
#endif
|
||||
#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
|
||||
printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
|
||||
pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
|
||||
#endif
|
||||
#if defined(CONFIG_RCU_CPU_STALL_INFO)
|
||||
printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
|
||||
pr_info("\tAdditional per-CPU info printed with stalls.\n");
|
||||
#endif
|
||||
#if NUM_RCU_LVL_4 != 0
|
||||
printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
|
||||
pr_info("\tFour-level hierarchy is enabled.\n");
|
||||
#endif
|
||||
if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
|
||||
printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
|
||||
pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
|
||||
if (nr_cpu_ids != NR_CPUS)
|
||||
printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
|
||||
pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_NONE
|
||||
if (!have_rcu_nocb_mask) {
|
||||
@@ -92,19 +91,19 @@ static void __init rcu_bootup_announce_oddness(void)
|
||||
have_rcu_nocb_mask = true;
|
||||
}
|
||||
#ifdef CONFIG_RCU_NOCB_CPU_ZERO
|
||||
pr_info("\tExperimental no-CBs CPU 0\n");
|
||||
pr_info("\tOffload RCU callbacks from CPU 0\n");
|
||||
cpumask_set_cpu(0, rcu_nocb_mask);
|
||||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
|
||||
#ifdef CONFIG_RCU_NOCB_CPU_ALL
|
||||
pr_info("\tExperimental no-CBs for all CPUs\n");
|
||||
pr_info("\tOffload RCU callbacks from all CPUs\n");
|
||||
cpumask_setall(rcu_nocb_mask);
|
||||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
|
||||
if (have_rcu_nocb_mask) {
|
||||
cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
|
||||
pr_info("\tExperimental no-CBs CPUs: %s.\n", nocb_buf);
|
||||
pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
|
||||
if (rcu_nocb_poll)
|
||||
pr_info("\tExperimental polled no-CBs CPUs.\n");
|
||||
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
|
||||
}
|
||||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
|
||||
}
|
||||
@@ -123,7 +122,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp);
|
||||
*/
|
||||
static void __init rcu_bootup_announce(void)
|
||||
{
|
||||
printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
|
||||
pr_info("Preemptible hierarchical RCU implementation.\n");
|
||||
rcu_bootup_announce_oddness();
|
||||
}
|
||||
|
||||
@@ -490,13 +489,13 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
|
||||
|
||||
static void rcu_print_task_stall_begin(struct rcu_node *rnp)
|
||||
{
|
||||
printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
|
||||
pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
|
||||
rnp->level, rnp->grplo, rnp->grphi);
|
||||
}
|
||||
|
||||
static void rcu_print_task_stall_end(void)
|
||||
{
|
||||
printk(KERN_CONT "\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
|
||||
@@ -526,7 +525,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
t = list_entry(rnp->gp_tasks,
|
||||
struct task_struct, rcu_node_entry);
|
||||
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
|
||||
printk(KERN_CONT " P%d", t->pid);
|
||||
pr_cont(" P%d", t->pid);
|
||||
ndetected++;
|
||||
}
|
||||
rcu_print_task_stall_end();
|
||||
@@ -933,6 +932,24 @@ static void __init __rcu_init_preempt(void)
|
||||
rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for a task exiting while in a preemptible-RCU read-side
|
||||
* critical section, clean up if so. No need to issue warnings,
|
||||
* as debug_check_no_locks_held() already does this if lockdep
|
||||
* is enabled.
|
||||
*/
|
||||
void exit_rcu(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
if (likely(list_empty(¤t->rcu_node_entry)))
|
||||
return;
|
||||
t->rcu_read_lock_nesting = 1;
|
||||
barrier();
|
||||
t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
|
||||
__rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
|
||||
static struct rcu_state *rcu_state = &rcu_sched_state;
|
||||
@@ -942,7 +959,7 @@ static struct rcu_state *rcu_state = &rcu_sched_state;
|
||||
*/
|
||||
static void __init rcu_bootup_announce(void)
|
||||
{
|
||||
printk(KERN_INFO "Hierarchical RCU implementation.\n");
|
||||
pr_info("Hierarchical RCU implementation.\n");
|
||||
rcu_bootup_announce_oddness();
|
||||
}
|
||||
|
||||
@@ -1101,6 +1118,14 @@ static void __init __rcu_init_preempt(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, tasks cannot possibly exit
|
||||
* while in preemptible RCU read-side critical sections.
|
||||
*/
|
||||
void exit_rcu(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
@@ -1629,7 +1654,7 @@ static bool rcu_try_advance_all_cbs(void)
|
||||
*/
|
||||
if (rdp->completed != rnp->completed &&
|
||||
rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
|
||||
rcu_process_gp_end(rsp, rdp);
|
||||
note_gp_changes(rsp, rdp);
|
||||
|
||||
if (cpu_has_callbacks_ready_to_invoke(rdp))
|
||||
cbs_ready = true;
|
||||
@@ -1883,7 +1908,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
||||
/* Initiate the stall-info list. */
|
||||
static void print_cpu_stall_info_begin(void)
|
||||
{
|
||||
printk(KERN_CONT "\n");
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1914,7 +1939,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
|
||||
ticks_value = rsp->gpnum - rdp->gpnum;
|
||||
}
|
||||
print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
|
||||
printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
|
||||
pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
|
||||
cpu, ticks_value, ticks_title,
|
||||
atomic_read(&rdtp->dynticks) & 0xfff,
|
||||
rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
|
||||
@@ -1925,7 +1950,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
|
||||
/* Terminate the stall-info list. */
|
||||
static void print_cpu_stall_info_end(void)
|
||||
{
|
||||
printk(KERN_ERR "\t");
|
||||
pr_err("\t");
|
||||
}
|
||||
|
||||
/* Zero ->ticks_this_gp for all flavors of RCU. */
|
||||
@@ -1948,17 +1973,17 @@ static void increment_cpu_stall_ticks(void)
|
||||
|
||||
static void print_cpu_stall_info_begin(void)
|
||||
{
|
||||
printk(KERN_CONT " {");
|
||||
pr_cont(" {");
|
||||
}
|
||||
|
||||
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
|
||||
{
|
||||
printk(KERN_CONT " %d", cpu);
|
||||
pr_cont(" %d", cpu);
|
||||
}
|
||||
|
||||
static void print_cpu_stall_info_end(void)
|
||||
{
|
||||
printk(KERN_CONT "} ");
|
||||
pr_cont("} ");
|
||||
}
|
||||
|
||||
static void zero_cpu_stall_ticks(struct rcu_data *rdp)
|
||||
|
Reference in New Issue
Block a user