Merge branches 'doc.2013.08.19a', 'fixes.2013.08.20a', 'sysidle.2013.08.31a' and 'torture.2013.08.20a' into HEAD
doc.2013.08.19a: Documentation updates fixes.2013.08.20a: Miscellaneous fixes sysidle.2013.08.31a: Detect system-wide idle state. torture.2013.08.20a: rcutorture updates.
This commit is contained in:
150
kernel/rcutree.c
150
kernel/rcutree.c
@@ -54,6 +54,7 @@
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "rcutree.h"
|
||||
#include <trace/events/rcu.h>
|
||||
@@ -224,6 +225,10 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
|
||||
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
|
||||
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
|
||||
.dynticks = ATOMIC_INIT(1),
|
||||
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
|
||||
.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
|
||||
.dynticks_idle = ATOMIC_INIT(1),
|
||||
#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
||||
};
|
||||
|
||||
static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
|
||||
@@ -242,7 +247,10 @@ module_param(jiffies_till_next_fqs, ulong, 0644);
|
||||
|
||||
static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
struct rcu_data *rdp);
|
||||
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
|
||||
static void force_qs_rnp(struct rcu_state *rsp,
|
||||
int (*f)(struct rcu_data *rsp, bool *isidle,
|
||||
unsigned long *maxj),
|
||||
bool *isidle, unsigned long *maxj);
|
||||
static void force_quiescent_state(struct rcu_state *rsp);
|
||||
static int rcu_pending(int cpu);
|
||||
|
||||
@@ -427,6 +435,7 @@ void rcu_idle_enter(void)
|
||||
|
||||
local_irq_save(flags);
|
||||
rcu_eqs_enter(false);
|
||||
rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
||||
@@ -444,27 +453,6 @@ void rcu_user_enter(void)
|
||||
{
|
||||
rcu_eqs_enter(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_user_enter_after_irq - inform RCU that we are going to resume userspace
|
||||
* after the current irq returns.
|
||||
*
|
||||
* This is similar to rcu_user_enter() but in the context of a non-nesting
|
||||
* irq. After this call, RCU enters into idle mode when the interrupt
|
||||
* returns.
|
||||
*/
|
||||
void rcu_user_enter_after_irq(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_dynticks *rdtp;
|
||||
|
||||
local_irq_save(flags);
|
||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||
/* Ensure this irq is interrupting a non-idle RCU state. */
|
||||
WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK));
|
||||
rdtp->dynticks_nesting = 1;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_RCU_USER_QS */
|
||||
|
||||
/**
|
||||
@@ -498,6 +486,7 @@ void rcu_irq_exit(void)
|
||||
trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
|
||||
else
|
||||
rcu_eqs_enter_common(rdtp, oldval, true);
|
||||
rcu_sysidle_enter(rdtp, 1);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -566,6 +555,7 @@ void rcu_idle_exit(void)
|
||||
|
||||
local_irq_save(flags);
|
||||
rcu_eqs_exit(false);
|
||||
rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
||||
@@ -581,28 +571,6 @@ void rcu_user_exit(void)
|
||||
{
|
||||
rcu_eqs_exit(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* rcu_user_exit_after_irq - inform RCU that we won't resume to userspace
|
||||
* idle mode after the current non-nesting irq returns.
|
||||
*
|
||||
* This is similar to rcu_user_exit() but in the context of an irq.
|
||||
* This is called when the irq has interrupted a userspace RCU idle mode
|
||||
* context. When the current non-nesting interrupt returns after this call,
|
||||
* the CPU won't restore the RCU idle mode.
|
||||
*/
|
||||
void rcu_user_exit_after_irq(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_dynticks *rdtp;
|
||||
|
||||
local_irq_save(flags);
|
||||
rdtp = &__get_cpu_var(rcu_dynticks);
|
||||
/* Ensure we are interrupting an RCU idle mode. */
|
||||
WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK);
|
||||
rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_RCU_USER_QS */
|
||||
|
||||
/**
|
||||
@@ -639,6 +607,7 @@ void rcu_irq_enter(void)
|
||||
trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
|
||||
else
|
||||
rcu_eqs_exit_common(rdtp, oldval, true);
|
||||
rcu_sysidle_exit(rdtp, 1);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -762,9 +731,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
|
||||
* credit them with an implicit quiescent state. Return 1 if this CPU
|
||||
* is in dynticks idle mode, which is an extended quiescent state.
|
||||
*/
|
||||
static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
||||
static int dyntick_save_progress_counter(struct rcu_data *rdp,
|
||||
bool *isidle, unsigned long *maxj)
|
||||
{
|
||||
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
|
||||
rcu_sysidle_check_cpu(rdp, isidle, maxj);
|
||||
return (rdp->dynticks_snap & 0x1) == 0;
|
||||
}
|
||||
|
||||
@@ -774,7 +745,8 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
|
||||
* idle state since the last call to dyntick_save_progress_counter()
|
||||
* for this same CPU, or by virtue of having been offline.
|
||||
*/
|
||||
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
||||
static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
|
||||
bool *isidle, unsigned long *maxj)
|
||||
{
|
||||
unsigned int curr;
|
||||
unsigned int snap;
|
||||
@@ -1332,6 +1304,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||
|
||||
rcu_bind_gp_kthread();
|
||||
raw_spin_lock_irq(&rnp->lock);
|
||||
rsp->gp_flags = 0; /* Clear all flags: New grace period. */
|
||||
|
||||
@@ -1396,16 +1369,25 @@ static int rcu_gp_init(struct rcu_state *rsp)
|
||||
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
|
||||
{
|
||||
int fqs_state = fqs_state_in;
|
||||
bool isidle = false;
|
||||
unsigned long maxj;
|
||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||
|
||||
rsp->n_force_qs++;
|
||||
if (fqs_state == RCU_SAVE_DYNTICK) {
|
||||
/* Collect dyntick-idle snapshots. */
|
||||
force_qs_rnp(rsp, dyntick_save_progress_counter);
|
||||
if (is_sysidle_rcu_state(rsp)) {
|
||||
isidle = 1;
|
||||
maxj = jiffies - ULONG_MAX / 4;
|
||||
}
|
||||
force_qs_rnp(rsp, dyntick_save_progress_counter,
|
||||
&isidle, &maxj);
|
||||
rcu_sysidle_report_gp(rsp, isidle, maxj);
|
||||
fqs_state = RCU_FORCE_QS;
|
||||
} else {
|
||||
/* Handle dyntick-idle and offline CPUs. */
|
||||
force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
|
||||
isidle = 0;
|
||||
force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
|
||||
}
|
||||
/* Clear flag to prevent immediate re-entry. */
|
||||
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
|
||||
@@ -1575,10 +1557,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
|
||||
/*
|
||||
* We can't do wakeups while holding the rnp->lock, as that
|
||||
* could cause possible deadlocks with the rq->lock. Deter
|
||||
* the wakeup to interrupt context.
|
||||
* could cause possible deadlocks with the rq->lock. Defer
|
||||
* the wakeup to interrupt context. And don't bother waking
|
||||
* up the running kthread.
|
||||
*/
|
||||
irq_work_queue(&rsp->wakeup_work);
|
||||
if (current != rsp->gp_kthread)
|
||||
irq_work_queue(&rsp->wakeup_work);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2104,7 +2088,10 @@ void rcu_check_callbacks(int cpu, int user)
|
||||
*
|
||||
* The caller must have suppressed start of new grace periods.
|
||||
*/
|
||||
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
|
||||
static void force_qs_rnp(struct rcu_state *rsp,
|
||||
int (*f)(struct rcu_data *rsp, bool *isidle,
|
||||
unsigned long *maxj),
|
||||
bool *isidle, unsigned long *maxj)
|
||||
{
|
||||
unsigned long bit;
|
||||
int cpu;
|
||||
@@ -2127,9 +2114,12 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
|
||||
cpu = rnp->grplo;
|
||||
bit = 1;
|
||||
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
|
||||
if ((rnp->qsmask & bit) != 0 &&
|
||||
f(per_cpu_ptr(rsp->rda, cpu)))
|
||||
mask |= bit;
|
||||
if ((rnp->qsmask & bit) != 0) {
|
||||
if ((rnp->qsmaskinit & bit) != 0)
|
||||
*isidle = 0;
|
||||
if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
|
||||
mask |= bit;
|
||||
}
|
||||
}
|
||||
if (mask != 0) {
|
||||
|
||||
@@ -2303,6 +2293,13 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* RCU callback function to leak a callback.
|
||||
*/
|
||||
static void rcu_leak_callback(struct rcu_head *rhp)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for call_rcu() and friends. The cpu argument will
|
||||
* normally be -1, indicating "currently running CPU". It may specify
|
||||
@@ -2317,7 +2314,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
struct rcu_data *rdp;
|
||||
|
||||
WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
|
||||
debug_rcu_head_queue(head);
|
||||
if (debug_rcu_head_queue(head)) {
|
||||
/* Probable double call_rcu(), so leak the callback. */
|
||||
ACCESS_ONCE(head->func) = rcu_leak_callback;
|
||||
WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
|
||||
return;
|
||||
}
|
||||
head->func = func;
|
||||
head->next = NULL;
|
||||
|
||||
@@ -2802,9 +2804,20 @@ static void _rcu_barrier(struct rcu_state *rsp)
|
||||
* transition. The "if" expression below therefore rounds the old
|
||||
* value up to the next even number and adds two before comparing.
|
||||
*/
|
||||
snap_done = ACCESS_ONCE(rsp->n_barrier_done);
|
||||
snap_done = rsp->n_barrier_done;
|
||||
_rcu_barrier_trace(rsp, "Check", -1, snap_done);
|
||||
if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
|
||||
|
||||
/*
|
||||
* If the value in snap is odd, we needed to wait for the current
|
||||
* rcu_barrier() to complete, then wait for the next one, in other
|
||||
* words, we need the value of snap_done to be three larger than
|
||||
* the value of snap. On the other hand, if the value in snap is
|
||||
* even, we only had to wait for the next rcu_barrier() to complete,
|
||||
* in other words, we need the value of snap_done to be only two
|
||||
* greater than the value of snap. The "(snap + 3) & ~0x1" computes
|
||||
* this for us (thank you, Linus!).
|
||||
*/
|
||||
if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
|
||||
_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
|
||||
smp_mb(); /* caller's subsequent code after above check. */
|
||||
mutex_unlock(&rsp->barrier_mutex);
|
||||
@@ -2947,6 +2960,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
|
||||
rdp->blimit = blimit;
|
||||
init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
|
||||
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
||||
rcu_sysidle_init_percpu_data(rdp->dynticks);
|
||||
atomic_set(&rdp->dynticks->dynticks,
|
||||
(atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
|
||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
@@ -3032,6 +3046,25 @@ static int rcu_cpu_notify(struct notifier_block *self,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int rcu_pm_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
switch (action) {
|
||||
case PM_HIBERNATION_PREPARE:
|
||||
case PM_SUSPEND_PREPARE:
|
||||
if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
|
||||
rcu_expedited = 1;
|
||||
break;
|
||||
case PM_POST_HIBERNATION:
|
||||
case PM_POST_SUSPEND:
|
||||
rcu_expedited = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Spawn the kthread that handles this RCU flavor's grace periods.
|
||||
*/
|
||||
@@ -3273,6 +3306,7 @@ void __init rcu_init(void)
|
||||
* or the scheduler are operational.
|
||||
*/
|
||||
cpu_notifier(rcu_cpu_notify, 0);
|
||||
pm_notifier(rcu_pm_notify, 0);
|
||||
for_each_online_cpu(cpu)
|
||||
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
|
||||
}
|
||||
|
Reference in New Issue
Block a user