Merge branch 'linus' into perf/urgent, to pick up the upstream merged bits
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
19
kernel/cpu.c
19
kernel/cpu.c
@@ -86,6 +86,16 @@ static struct {
|
||||
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
|
||||
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
|
||||
|
||||
static void apply_puts_pending(int max)
|
||||
{
|
||||
int delta;
|
||||
|
||||
if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
|
||||
delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
|
||||
cpu_hotplug.refcount -= delta;
|
||||
}
|
||||
}
|
||||
|
||||
void get_online_cpus(void)
|
||||
{
|
||||
might_sleep();
|
||||
@@ -93,6 +103,7 @@ void get_online_cpus(void)
|
||||
return;
|
||||
cpuhp_lock_acquire_read();
|
||||
mutex_lock(&cpu_hotplug.lock);
|
||||
apply_puts_pending(65536);
|
||||
cpu_hotplug.refcount++;
|
||||
mutex_unlock(&cpu_hotplug.lock);
|
||||
}
|
||||
@@ -105,6 +116,7 @@ bool try_get_online_cpus(void)
|
||||
if (!mutex_trylock(&cpu_hotplug.lock))
|
||||
return false;
|
||||
cpuhp_lock_acquire_tryread();
|
||||
apply_puts_pending(65536);
|
||||
cpu_hotplug.refcount++;
|
||||
mutex_unlock(&cpu_hotplug.lock);
|
||||
return true;
|
||||
@@ -161,12 +173,7 @@ void cpu_hotplug_begin(void)
|
||||
cpuhp_lock_acquire();
|
||||
for (;;) {
|
||||
mutex_lock(&cpu_hotplug.lock);
|
||||
if (atomic_read(&cpu_hotplug.puts_pending)) {
|
||||
int delta;
|
||||
|
||||
delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
|
||||
cpu_hotplug.refcount -= delta;
|
||||
}
|
||||
apply_puts_pending(1);
|
||||
if (likely(!cpu_hotplug.refcount))
|
||||
break;
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
@@ -4460,7 +4460,7 @@ perf_output_sample_regs(struct perf_output_handle *handle,
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_sample_regs_user(struct perf_regs_user *regs_user,
|
||||
static void perf_sample_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (!user_mode(regs)) {
|
||||
@@ -4471,11 +4471,22 @@ static void perf_sample_regs_user(struct perf_regs_user *regs_user,
|
||||
}
|
||||
|
||||
if (regs) {
|
||||
regs_user->regs = regs;
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
regs_user->regs = regs;
|
||||
} else {
|
||||
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
|
||||
regs_user->regs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_sample_regs_intr(struct perf_regs *regs_intr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs_intr->regs = regs;
|
||||
regs_intr->abi = perf_reg_abi(current);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Get remaining task size from user stack pointer.
|
||||
*
|
||||
@@ -4857,6 +4868,23 @@ void perf_output_sample(struct perf_output_handle *handle,
|
||||
if (sample_type & PERF_SAMPLE_TRANSACTION)
|
||||
perf_output_put(handle, data->txn);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_REGS_INTR) {
|
||||
u64 abi = data->regs_intr.abi;
|
||||
/*
|
||||
* If there are no regs to dump, notice it through
|
||||
* first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
|
||||
*/
|
||||
perf_output_put(handle, abi);
|
||||
|
||||
if (abi) {
|
||||
u64 mask = event->attr.sample_regs_intr;
|
||||
|
||||
perf_output_sample_regs(handle,
|
||||
data->regs_intr.regs,
|
||||
mask);
|
||||
}
|
||||
}
|
||||
|
||||
if (!event->attr.watermark) {
|
||||
int wakeup_events = event->attr.wakeup_events;
|
||||
|
||||
@@ -4922,12 +4950,13 @@ void perf_prepare_sample(struct perf_event_header *header,
|
||||
header->size += size;
|
||||
}
|
||||
|
||||
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
|
||||
perf_sample_regs_user(&data->regs_user, regs);
|
||||
|
||||
if (sample_type & PERF_SAMPLE_REGS_USER) {
|
||||
/* regs dump ABI info */
|
||||
int size = sizeof(u64);
|
||||
|
||||
perf_sample_regs_user(&data->regs_user, regs);
|
||||
|
||||
if (data->regs_user.regs) {
|
||||
u64 mask = event->attr.sample_regs_user;
|
||||
size += hweight64(mask) * sizeof(u64);
|
||||
@@ -4943,15 +4972,11 @@ void perf_prepare_sample(struct perf_event_header *header,
|
||||
* in case new sample type is added, because we could eat
|
||||
* up the rest of the sample size.
|
||||
*/
|
||||
struct perf_regs_user *uregs = &data->regs_user;
|
||||
u16 stack_size = event->attr.sample_stack_user;
|
||||
u16 size = sizeof(u64);
|
||||
|
||||
if (!uregs->abi)
|
||||
perf_sample_regs_user(uregs, regs);
|
||||
|
||||
stack_size = perf_sample_ustack_size(stack_size, header->size,
|
||||
uregs->regs);
|
||||
data->regs_user.regs);
|
||||
|
||||
/*
|
||||
* If there is something to dump, add space for the dump
|
||||
@@ -4964,6 +4989,21 @@ void perf_prepare_sample(struct perf_event_header *header,
|
||||
data->stack_user_size = stack_size;
|
||||
header->size += size;
|
||||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_REGS_INTR) {
|
||||
/* regs dump ABI info */
|
||||
int size = sizeof(u64);
|
||||
|
||||
perf_sample_regs_intr(&data->regs_intr, regs);
|
||||
|
||||
if (data->regs_intr.regs) {
|
||||
u64 mask = event->attr.sample_regs_intr;
|
||||
|
||||
size += hweight64(mask) * sizeof(u64);
|
||||
}
|
||||
|
||||
header->size += size;
|
||||
}
|
||||
}
|
||||
|
||||
static void perf_event_output(struct perf_event *event,
|
||||
@@ -7151,6 +7191,8 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
|
||||
ret = perf_reg_validate(attr->sample_regs_intr);
|
||||
out:
|
||||
return ret;
|
||||
|
||||
|
@@ -1022,11 +1022,14 @@ void __cleanup_sighand(struct sighand_struct *sighand)
|
||||
{
|
||||
if (atomic_dec_and_test(&sighand->count)) {
|
||||
signalfd_cleanup(sighand);
|
||||
/*
|
||||
* sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
|
||||
* without an RCU grace period, see __lock_task_sighand().
|
||||
*/
|
||||
kmem_cache_free(sighand_cachep, sighand);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Initialize POSIX timer handling for a thread group.
|
||||
*/
|
||||
|
@@ -1,6 +1,6 @@
|
||||
obj-y += update.o srcu.o
|
||||
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
|
||||
obj-$(CONFIG_TREE_RCU) += tree.o
|
||||
obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o
|
||||
obj-$(CONFIG_PREEMPT_RCU) += tree.o
|
||||
obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
|
||||
obj-$(CONFIG_TINY_RCU) += tiny.o
|
||||
|
@@ -135,4 +135,6 @@ int rcu_jiffies_till_stall_check(void);
|
||||
*/
|
||||
#define TPS(x) tracepoint_string(x)
|
||||
|
||||
void rcu_early_boot_tests(void);
|
||||
|
||||
#endif /* __LINUX_RCU_H */
|
||||
|
@@ -812,6 +812,7 @@ rcu_torture_cbflood(void *arg)
|
||||
cur_ops->cb_barrier();
|
||||
stutter_wait("rcu_torture_cbflood");
|
||||
} while (!torture_must_stop());
|
||||
vfree(rhp);
|
||||
torture_kthread_stopping("rcu_torture_cbflood");
|
||||
return 0;
|
||||
}
|
||||
|
@@ -247,7 +247,7 @@ void rcu_bh_qs(void)
|
||||
* be called from hardirq context. It is normally called from the
|
||||
* scheduling-clock interrupt.
|
||||
*/
|
||||
void rcu_check_callbacks(int cpu, int user)
|
||||
void rcu_check_callbacks(int user)
|
||||
{
|
||||
RCU_TRACE(check_cpu_stalls());
|
||||
if (user || rcu_is_cpu_rrupt_from_idle())
|
||||
@@ -380,7 +380,9 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(call_rcu_bh);
|
||||
|
||||
void rcu_init(void)
|
||||
void __init rcu_init(void)
|
||||
{
|
||||
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
||||
|
||||
rcu_early_boot_tests();
|
||||
}
|
||||
|
@@ -105,7 +105,7 @@ struct rcu_state sname##_state = { \
|
||||
.name = RCU_STATE_NAME(sname), \
|
||||
.abbr = sabbr, \
|
||||
}; \
|
||||
DEFINE_PER_CPU(struct rcu_data, sname##_data)
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data)
|
||||
|
||||
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
|
||||
RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
|
||||
@@ -152,19 +152,6 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
|
||||
*/
|
||||
static int rcu_scheduler_fully_active __read_mostly;
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
/*
|
||||
* Control variables for per-CPU and per-rcu_node kthreads. These
|
||||
* handle all flavors of RCU.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
|
||||
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
||||
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
||||
DEFINE_PER_CPU(char, rcu_cpu_has_work);
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
|
||||
static void invoke_rcu_core(void);
|
||||
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
|
||||
@@ -286,11 +273,11 @@ static void rcu_momentary_dyntick_idle(void)
|
||||
* and requires special handling for preemptible RCU.
|
||||
* The caller must have disabled preemption.
|
||||
*/
|
||||
void rcu_note_context_switch(int cpu)
|
||||
void rcu_note_context_switch(void)
|
||||
{
|
||||
trace_rcu_utilization(TPS("Start context switch"));
|
||||
rcu_sched_qs();
|
||||
rcu_preempt_note_context_switch(cpu);
|
||||
rcu_preempt_note_context_switch();
|
||||
if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
|
||||
rcu_momentary_dyntick_idle();
|
||||
trace_rcu_utilization(TPS("End context switch"));
|
||||
@@ -325,7 +312,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
|
||||
unsigned long *maxj),
|
||||
bool *isidle, unsigned long *maxj);
|
||||
static void force_quiescent_state(struct rcu_state *rsp);
|
||||
static int rcu_pending(int cpu);
|
||||
static int rcu_pending(void);
|
||||
|
||||
/*
|
||||
* Return the number of RCU-sched batches processed thus far for debug & stats.
|
||||
@@ -510,11 +497,11 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
* we really have entered idle, and must do the appropriate accounting.
|
||||
* The caller must have disabled interrupts.
|
||||
*/
|
||||
static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
||||
bool user)
|
||||
static void rcu_eqs_enter_common(long long oldval, bool user)
|
||||
{
|
||||
struct rcu_state *rsp;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
|
||||
trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
|
||||
if (!user && !is_idle_task(current)) {
|
||||
@@ -531,7 +518,7 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
do_nocb_deferred_wakeup(rdp);
|
||||
}
|
||||
rcu_prepare_for_idle(smp_processor_id());
|
||||
rcu_prepare_for_idle();
|
||||
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
||||
smp_mb__before_atomic(); /* See above. */
|
||||
atomic_inc(&rdtp->dynticks);
|
||||
@@ -565,7 +552,7 @@ static void rcu_eqs_enter(bool user)
|
||||
WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
|
||||
if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
|
||||
rdtp->dynticks_nesting = 0;
|
||||
rcu_eqs_enter_common(rdtp, oldval, user);
|
||||
rcu_eqs_enter_common(oldval, user);
|
||||
} else {
|
||||
rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
|
||||
}
|
||||
@@ -589,7 +576,7 @@ void rcu_idle_enter(void)
|
||||
|
||||
local_irq_save(flags);
|
||||
rcu_eqs_enter(false);
|
||||
rcu_sysidle_enter(this_cpu_ptr(&rcu_dynticks), 0);
|
||||
rcu_sysidle_enter(0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_idle_enter);
|
||||
@@ -639,8 +626,8 @@ void rcu_irq_exit(void)
|
||||
if (rdtp->dynticks_nesting)
|
||||
trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
|
||||
else
|
||||
rcu_eqs_enter_common(rdtp, oldval, true);
|
||||
rcu_sysidle_enter(rdtp, 1);
|
||||
rcu_eqs_enter_common(oldval, true);
|
||||
rcu_sysidle_enter(1);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -651,16 +638,17 @@ void rcu_irq_exit(void)
|
||||
* we really have exited idle, and must do the appropriate accounting.
|
||||
* The caller must have disabled interrupts.
|
||||
*/
|
||||
static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
|
||||
int user)
|
||||
static void rcu_eqs_exit_common(long long oldval, int user)
|
||||
{
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
|
||||
rcu_dynticks_task_exit();
|
||||
smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
|
||||
atomic_inc(&rdtp->dynticks);
|
||||
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
||||
smp_mb__after_atomic(); /* See above. */
|
||||
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
||||
rcu_cleanup_after_idle(smp_processor_id());
|
||||
rcu_cleanup_after_idle();
|
||||
trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
|
||||
if (!user && !is_idle_task(current)) {
|
||||
struct task_struct *idle __maybe_unused =
|
||||
@@ -691,7 +679,7 @@ static void rcu_eqs_exit(bool user)
|
||||
rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
|
||||
} else {
|
||||
rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
|
||||
rcu_eqs_exit_common(rdtp, oldval, user);
|
||||
rcu_eqs_exit_common(oldval, user);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -712,7 +700,7 @@ void rcu_idle_exit(void)
|
||||
|
||||
local_irq_save(flags);
|
||||
rcu_eqs_exit(false);
|
||||
rcu_sysidle_exit(this_cpu_ptr(&rcu_dynticks), 0);
|
||||
rcu_sysidle_exit(0);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_idle_exit);
|
||||
@@ -763,8 +751,8 @@ void rcu_irq_enter(void)
|
||||
if (oldval)
|
||||
trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
|
||||
else
|
||||
rcu_eqs_exit_common(rdtp, oldval, true);
|
||||
rcu_sysidle_exit(rdtp, 1);
|
||||
rcu_eqs_exit_common(oldval, true);
|
||||
rcu_sysidle_exit(1);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -2387,7 +2375,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
* invoked from the scheduling-clock interrupt. If rcu_pending returns
|
||||
* false, there is no point in invoking rcu_check_callbacks().
|
||||
*/
|
||||
void rcu_check_callbacks(int cpu, int user)
|
||||
void rcu_check_callbacks(int user)
|
||||
{
|
||||
trace_rcu_utilization(TPS("Start scheduler-tick"));
|
||||
increment_cpu_stall_ticks();
|
||||
@@ -2419,8 +2407,8 @@ void rcu_check_callbacks(int cpu, int user)
|
||||
|
||||
rcu_bh_qs();
|
||||
}
|
||||
rcu_preempt_check_callbacks(cpu);
|
||||
if (rcu_pending(cpu))
|
||||
rcu_preempt_check_callbacks();
|
||||
if (rcu_pending())
|
||||
invoke_rcu_core();
|
||||
if (user)
|
||||
rcu_note_voluntary_context_switch(current);
|
||||
@@ -2963,6 +2951,9 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
|
||||
*/
|
||||
void synchronize_sched_expedited(void)
|
||||
{
|
||||
cpumask_var_t cm;
|
||||
bool cma = false;
|
||||
int cpu;
|
||||
long firstsnap, s, snap;
|
||||
int trycount = 0;
|
||||
struct rcu_state *rsp = &rcu_sched_state;
|
||||
@@ -2997,11 +2988,26 @@ void synchronize_sched_expedited(void)
|
||||
}
|
||||
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
|
||||
|
||||
/* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
|
||||
cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
|
||||
if (cma) {
|
||||
cpumask_copy(cm, cpu_online_mask);
|
||||
cpumask_clear_cpu(raw_smp_processor_id(), cm);
|
||||
for_each_cpu(cpu, cm) {
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
|
||||
if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
|
||||
cpumask_clear_cpu(cpu, cm);
|
||||
}
|
||||
if (cpumask_weight(cm) == 0)
|
||||
goto all_cpus_idle;
|
||||
}
|
||||
|
||||
/*
|
||||
* Each pass through the following loop attempts to force a
|
||||
* context switch on each CPU.
|
||||
*/
|
||||
while (try_stop_cpus(cpu_online_mask,
|
||||
while (try_stop_cpus(cma ? cm : cpu_online_mask,
|
||||
synchronize_sched_expedited_cpu_stop,
|
||||
NULL) == -EAGAIN) {
|
||||
put_online_cpus();
|
||||
@@ -3013,6 +3019,7 @@ void synchronize_sched_expedited(void)
|
||||
/* ensure test happens before caller kfree */
|
||||
smp_mb__before_atomic(); /* ^^^ */
|
||||
atomic_long_inc(&rsp->expedited_workdone1);
|
||||
free_cpumask_var(cm);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3022,6 +3029,7 @@ void synchronize_sched_expedited(void)
|
||||
} else {
|
||||
wait_rcu_gp(call_rcu_sched);
|
||||
atomic_long_inc(&rsp->expedited_normal);
|
||||
free_cpumask_var(cm);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3031,6 +3039,7 @@ void synchronize_sched_expedited(void)
|
||||
/* ensure test happens before caller kfree */
|
||||
smp_mb__before_atomic(); /* ^^^ */
|
||||
atomic_long_inc(&rsp->expedited_workdone2);
|
||||
free_cpumask_var(cm);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -3045,6 +3054,7 @@ void synchronize_sched_expedited(void)
|
||||
/* CPU hotplug operation in flight, use normal GP. */
|
||||
wait_rcu_gp(call_rcu_sched);
|
||||
atomic_long_inc(&rsp->expedited_normal);
|
||||
free_cpumask_var(cm);
|
||||
return;
|
||||
}
|
||||
snap = atomic_long_read(&rsp->expedited_start);
|
||||
@@ -3052,6 +3062,9 @@ void synchronize_sched_expedited(void)
|
||||
}
|
||||
atomic_long_inc(&rsp->expedited_stoppedcpus);
|
||||
|
||||
all_cpus_idle:
|
||||
free_cpumask_var(cm);
|
||||
|
||||
/*
|
||||
* Everyone up to our most recent fetch is covered by our grace
|
||||
* period. Update the counter, but only if our work is still
|
||||
@@ -3143,12 +3156,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
* by the current CPU, returning 1 if so. This function is part of the
|
||||
* RCU implementation; it is -not- an exported member of the RCU API.
|
||||
*/
|
||||
static int rcu_pending(int cpu)
|
||||
static int rcu_pending(void)
|
||||
{
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp)
|
||||
if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
|
||||
if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@@ -3158,7 +3171,7 @@ static int rcu_pending(int cpu)
|
||||
* non-NULL, store an indication of whether all callbacks are lazy.
|
||||
* (If there are no callbacks, all of them are deemed to be lazy.)
|
||||
*/
|
||||
static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
|
||||
static int __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
|
||||
{
|
||||
bool al = true;
|
||||
bool hc = false;
|
||||
@@ -3166,7 +3179,7 @@ static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
|
||||
struct rcu_state *rsp;
|
||||
|
||||
for_each_rcu_flavor(rsp) {
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
if (!rdp->nxtlist)
|
||||
continue;
|
||||
hc = true;
|
||||
@@ -3485,8 +3498,10 @@ static int rcu_cpu_notify(struct notifier_block *self,
|
||||
case CPU_DEAD_FROZEN:
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
for_each_rcu_flavor(rsp)
|
||||
for_each_rcu_flavor(rsp) {
|
||||
rcu_cleanup_dead_cpu(cpu, rsp);
|
||||
do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -3766,6 +3781,8 @@ void __init rcu_init(void)
|
||||
pm_notifier(rcu_pm_notify, 0);
|
||||
for_each_online_cpu(cpu)
|
||||
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
|
||||
|
||||
rcu_early_boot_tests();
|
||||
}
|
||||
|
||||
#include "tree_plugin.h"
|
||||
|
@@ -139,7 +139,7 @@ struct rcu_node {
|
||||
unsigned long expmask; /* Groups that have ->blkd_tasks */
|
||||
/* elements that need to drain to allow the */
|
||||
/* current expedited grace period to */
|
||||
/* complete (only for TREE_PREEMPT_RCU). */
|
||||
/* complete (only for PREEMPT_RCU). */
|
||||
unsigned long qsmaskinit;
|
||||
/* Per-GP initial value for qsmask & expmask. */
|
||||
unsigned long grpmask; /* Mask to apply to parent qsmask. */
|
||||
@@ -530,10 +530,10 @@ DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
|
||||
extern struct rcu_state rcu_bh_state;
|
||||
DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
|
||||
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
extern struct rcu_state rcu_preempt_state;
|
||||
DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
||||
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
#endif /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
||||
@@ -547,7 +547,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
|
||||
/* Forward declarations for rcutree_plugin.h */
|
||||
static void rcu_bootup_announce(void);
|
||||
long rcu_batches_completed(void);
|
||||
static void rcu_preempt_note_context_switch(int cpu);
|
||||
static void rcu_preempt_note_context_switch(void);
|
||||
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
|
||||
@@ -561,12 +561,12 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
struct rcu_node *rnp,
|
||||
struct rcu_data *rdp);
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
static void rcu_preempt_check_callbacks(int cpu);
|
||||
static void rcu_preempt_check_callbacks(void);
|
||||
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
|
||||
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
bool wake);
|
||||
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
|
||||
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
|
||||
static void __init __rcu_init_preempt(void);
|
||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
||||
@@ -579,8 +579,8 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
static void __init rcu_spawn_boost_kthreads(void);
|
||||
static void rcu_prepare_kthreads(int cpu);
|
||||
static void rcu_cleanup_after_idle(int cpu);
|
||||
static void rcu_prepare_for_idle(int cpu);
|
||||
static void rcu_cleanup_after_idle(void);
|
||||
static void rcu_prepare_for_idle(void);
|
||||
static void rcu_idle_count_callbacks_posted(void);
|
||||
static void print_cpu_stall_info_begin(void);
|
||||
static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
|
||||
@@ -606,8 +606,8 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
|
||||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
|
||||
static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
|
||||
static bool init_nocb_callback_list(struct rcu_data *rdp);
|
||||
static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
|
||||
static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
|
||||
static void rcu_sysidle_enter(int irq);
|
||||
static void rcu_sysidle_exit(int irq);
|
||||
static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
|
||||
unsigned long *maxj);
|
||||
static bool is_sysidle_rcu_state(struct rcu_state *rsp);
|
||||
|
@@ -30,14 +30,24 @@
|
||||
#include <linux/smpboot.h>
|
||||
#include "../time/tick-internal.h"
|
||||
|
||||
#define RCU_KTHREAD_PRIO 1
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
#include "../locking/rtmutex_common.h"
|
||||
#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
|
||||
#else
|
||||
#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
|
||||
#endif
|
||||
|
||||
/* rcuc/rcub kthread realtime priority */
|
||||
static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
|
||||
module_param(kthread_prio, int, 0644);
|
||||
|
||||
/*
|
||||
* Control variables for per-CPU and per-rcu_node kthreads. These
|
||||
* handle all flavors of RCU.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
|
||||
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
||||
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
||||
DEFINE_PER_CPU(char, rcu_cpu_has_work);
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
|
||||
@@ -72,9 +82,6 @@ static void __init rcu_bootup_announce_oddness(void)
|
||||
#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
|
||||
pr_info("\tRCU torture testing starts during boot.\n");
|
||||
#endif
|
||||
#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
|
||||
pr_info("\tDump stacks of tasks blocking RCU-preempt GP.\n");
|
||||
#endif
|
||||
#if defined(CONFIG_RCU_CPU_STALL_INFO)
|
||||
pr_info("\tAdditional per-CPU info printed with stalls.\n");
|
||||
#endif
|
||||
@@ -85,9 +92,12 @@ static void __init rcu_bootup_announce_oddness(void)
|
||||
pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
|
||||
if (nr_cpu_ids != NR_CPUS)
|
||||
pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
pr_info("\tRCU kthread priority: %d.\n", kthread_prio);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
|
||||
RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
|
||||
static struct rcu_state *rcu_state_p = &rcu_preempt_state;
|
||||
@@ -156,7 +166,7 @@ static void rcu_preempt_qs(void)
|
||||
*
|
||||
* Caller must disable preemption.
|
||||
*/
|
||||
static void rcu_preempt_note_context_switch(int cpu)
|
||||
static void rcu_preempt_note_context_switch(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
unsigned long flags;
|
||||
@@ -167,7 +177,7 @@ static void rcu_preempt_note_context_switch(int cpu)
|
||||
!t->rcu_read_unlock_special.b.blocked) {
|
||||
|
||||
/* Possibly blocking in an RCU read-side critical section. */
|
||||
rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
|
||||
rdp = this_cpu_ptr(rcu_preempt_state.rda);
|
||||
rnp = rdp->mynode;
|
||||
raw_spin_lock_irqsave(&rnp->lock, flags);
|
||||
smp_mb__after_unlock_lock();
|
||||
@@ -415,8 +425,6 @@ void rcu_read_unlock_special(struct task_struct *t)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
|
||||
|
||||
/*
|
||||
* Dump detailed information for all tasks blocking the current RCU
|
||||
* grace period on the specified rcu_node structure.
|
||||
@@ -451,14 +459,6 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
|
||||
rcu_print_detail_task_stall_rnp(rnp);
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
|
||||
|
||||
static void rcu_print_detail_task_stall(struct rcu_state *rsp)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
|
||||
|
||||
#ifdef CONFIG_RCU_CPU_STALL_INFO
|
||||
|
||||
static void rcu_print_task_stall_begin(struct rcu_node *rnp)
|
||||
@@ -621,7 +621,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
*
|
||||
* Caller must disable hard irqs.
|
||||
*/
|
||||
static void rcu_preempt_check_callbacks(int cpu)
|
||||
static void rcu_preempt_check_callbacks(void)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
@@ -630,8 +630,8 @@ static void rcu_preempt_check_callbacks(int cpu)
|
||||
return;
|
||||
}
|
||||
if (t->rcu_read_lock_nesting > 0 &&
|
||||
per_cpu(rcu_preempt_data, cpu).qs_pending &&
|
||||
!per_cpu(rcu_preempt_data, cpu).passed_quiesce)
|
||||
__this_cpu_read(rcu_preempt_data.qs_pending) &&
|
||||
!__this_cpu_read(rcu_preempt_data.passed_quiesce))
|
||||
t->rcu_read_unlock_special.b.need_qs = true;
|
||||
}
|
||||
|
||||
@@ -919,7 +919,7 @@ void exit_rcu(void)
|
||||
__rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
static struct rcu_state *rcu_state_p = &rcu_sched_state;
|
||||
|
||||
@@ -945,7 +945,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
|
||||
* Because preemptible RCU does not exist, we never have to check for
|
||||
* CPUs being in quiescent states.
|
||||
*/
|
||||
static void rcu_preempt_note_context_switch(int cpu)
|
||||
static void rcu_preempt_note_context_switch(void)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1017,7 +1017,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
|
||||
* Because preemptible RCU does not exist, it never has any callbacks
|
||||
* to check.
|
||||
*/
|
||||
static void rcu_preempt_check_callbacks(int cpu)
|
||||
static void rcu_preempt_check_callbacks(void)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1070,7 +1070,7 @@ void exit_rcu(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
@@ -1326,7 +1326,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
smp_mb__after_unlock_lock();
|
||||
rnp->boost_kthread_task = t;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
sp.sched_priority = RCU_BOOST_PRIO;
|
||||
sp.sched_priority = kthread_prio;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
|
||||
return 0;
|
||||
@@ -1343,7 +1343,7 @@ static void rcu_cpu_kthread_setup(unsigned int cpu)
|
||||
{
|
||||
struct sched_param sp;
|
||||
|
||||
sp.sched_priority = RCU_KTHREAD_PRIO;
|
||||
sp.sched_priority = kthread_prio;
|
||||
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
|
||||
}
|
||||
|
||||
@@ -1512,10 +1512,10 @@ static void rcu_prepare_kthreads(int cpu)
|
||||
* any flavor of RCU.
|
||||
*/
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
|
||||
int rcu_needs_cpu(unsigned long *delta_jiffies)
|
||||
{
|
||||
*delta_jiffies = ULONG_MAX;
|
||||
return rcu_cpu_has_callbacks(cpu, NULL);
|
||||
return rcu_cpu_has_callbacks(NULL);
|
||||
}
|
||||
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
|
||||
|
||||
@@ -1523,7 +1523,7 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
|
||||
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
|
||||
* after it.
|
||||
*/
|
||||
static void rcu_cleanup_after_idle(int cpu)
|
||||
static void rcu_cleanup_after_idle(void)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1531,7 +1531,7 @@ static void rcu_cleanup_after_idle(int cpu)
|
||||
* Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
|
||||
* is nothing.
|
||||
*/
|
||||
static void rcu_prepare_for_idle(int cpu)
|
||||
static void rcu_prepare_for_idle(void)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1624,15 +1624,15 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
|
||||
* The caller must have disabled interrupts.
|
||||
*/
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
int rcu_needs_cpu(int cpu, unsigned long *dj)
|
||||
int rcu_needs_cpu(unsigned long *dj)
|
||||
{
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
|
||||
/* Snapshot to detect later posting of non-lazy callback. */
|
||||
rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
|
||||
|
||||
/* If no callbacks, RCU doesn't need the CPU. */
|
||||
if (!rcu_cpu_has_callbacks(cpu, &rdtp->all_lazy)) {
|
||||
if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
|
||||
*dj = ULONG_MAX;
|
||||
return 0;
|
||||
}
|
||||
@@ -1666,12 +1666,12 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
|
||||
*
|
||||
* The caller must have disabled interrupts.
|
||||
*/
|
||||
static void rcu_prepare_for_idle(int cpu)
|
||||
static void rcu_prepare_for_idle(void)
|
||||
{
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
bool needwake;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
struct rcu_node *rnp;
|
||||
struct rcu_state *rsp;
|
||||
int tne;
|
||||
@@ -1679,7 +1679,7 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
/* Handle nohz enablement switches conservatively. */
|
||||
tne = ACCESS_ONCE(tick_nohz_active);
|
||||
if (tne != rdtp->tick_nohz_enabled_snap) {
|
||||
if (rcu_cpu_has_callbacks(cpu, NULL))
|
||||
if (rcu_cpu_has_callbacks(NULL))
|
||||
invoke_rcu_core(); /* force nohz to see update. */
|
||||
rdtp->tick_nohz_enabled_snap = tne;
|
||||
return;
|
||||
@@ -1688,7 +1688,7 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
return;
|
||||
|
||||
/* If this is a no-CBs CPU, no callbacks, just return. */
|
||||
if (rcu_is_nocb_cpu(cpu))
|
||||
if (rcu_is_nocb_cpu(smp_processor_id()))
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -1712,7 +1712,7 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
return;
|
||||
rdtp->last_accelerate = jiffies;
|
||||
for_each_rcu_flavor(rsp) {
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
if (!*rdp->nxttail[RCU_DONE_TAIL])
|
||||
continue;
|
||||
rnp = rdp->mynode;
|
||||
@@ -1731,10 +1731,10 @@ static void rcu_prepare_for_idle(int cpu)
|
||||
* any grace periods that elapsed while the CPU was idle, and if any
|
||||
* callbacks are now ready to invoke, initiate invocation.
|
||||
*/
|
||||
static void rcu_cleanup_after_idle(int cpu)
|
||||
static void rcu_cleanup_after_idle(void)
|
||||
{
|
||||
#ifndef CONFIG_RCU_NOCB_CPU_ALL
|
||||
if (rcu_is_nocb_cpu(cpu))
|
||||
if (rcu_is_nocb_cpu(smp_processor_id()))
|
||||
return;
|
||||
if (rcu_try_advance_all_cbs())
|
||||
invoke_rcu_core();
|
||||
@@ -2573,9 +2573,13 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
|
||||
rdp->nocb_leader = rdp_spawn;
|
||||
if (rdp_last && rdp != rdp_spawn)
|
||||
rdp_last->nocb_next_follower = rdp;
|
||||
rdp_last = rdp;
|
||||
rdp = rdp->nocb_next_follower;
|
||||
rdp_last->nocb_next_follower = NULL;
|
||||
if (rdp == rdp_spawn) {
|
||||
rdp = rdp->nocb_next_follower;
|
||||
} else {
|
||||
rdp_last = rdp;
|
||||
rdp = rdp->nocb_next_follower;
|
||||
rdp_last->nocb_next_follower = NULL;
|
||||
}
|
||||
} while (rdp);
|
||||
rdp_spawn->nocb_next_follower = rdp_old_leader;
|
||||
}
|
||||
@@ -2761,9 +2765,10 @@ static int full_sysidle_state; /* Current system-idle state. */
|
||||
* to detect full-system idle states, not RCU quiescent states and grace
|
||||
* periods. The caller must have disabled interrupts.
|
||||
*/
|
||||
static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
|
||||
static void rcu_sysidle_enter(int irq)
|
||||
{
|
||||
unsigned long j;
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
|
||||
/* If there are no nohz_full= CPUs, no need to track this. */
|
||||
if (!tick_nohz_full_enabled())
|
||||
@@ -2832,8 +2837,10 @@ void rcu_sysidle_force_exit(void)
|
||||
* usermode execution does -not- count as idle here! The caller must
|
||||
* have disabled interrupts.
|
||||
*/
|
||||
static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
|
||||
static void rcu_sysidle_exit(int irq)
|
||||
{
|
||||
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
|
||||
|
||||
/* If there are no nohz_full= CPUs, no need to track this. */
|
||||
if (!tick_nohz_full_enabled())
|
||||
return;
|
||||
@@ -3127,11 +3134,11 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
|
||||
|
||||
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
|
||||
|
||||
static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
|
||||
static void rcu_sysidle_enter(int irq)
|
||||
{
|
||||
}
|
||||
|
||||
static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
|
||||
static void rcu_sysidle_exit(int irq)
|
||||
{
|
||||
}
|
||||
|
||||
|
@@ -306,7 +306,7 @@ struct debug_obj_descr rcuhead_debug_descr = {
|
||||
EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
|
||||
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
|
||||
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
|
||||
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
|
||||
void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
|
||||
unsigned long secs,
|
||||
unsigned long c_old, unsigned long c)
|
||||
@@ -531,7 +531,8 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
||||
struct rcu_head *next;
|
||||
LIST_HEAD(rcu_tasks_holdouts);
|
||||
|
||||
/* FIXME: Add housekeeping affinity. */
|
||||
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
|
||||
housekeeping_affine(current);
|
||||
|
||||
/*
|
||||
* Each pass through the following loop makes one check for
|
||||
@@ -690,3 +691,87 @@ static void rcu_spawn_tasks_kthread(void)
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_TASKS_RCU */
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
|
||||
/*
|
||||
* Early boot self test parameters, one for each flavor
|
||||
*/
|
||||
static bool rcu_self_test;
|
||||
static bool rcu_self_test_bh;
|
||||
static bool rcu_self_test_sched;
|
||||
|
||||
module_param(rcu_self_test, bool, 0444);
|
||||
module_param(rcu_self_test_bh, bool, 0444);
|
||||
module_param(rcu_self_test_sched, bool, 0444);
|
||||
|
||||
static int rcu_self_test_counter;
|
||||
|
||||
static void test_callback(struct rcu_head *r)
|
||||
{
|
||||
rcu_self_test_counter++;
|
||||
pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
|
||||
}
|
||||
|
||||
static void early_boot_test_call_rcu(void)
|
||||
{
|
||||
static struct rcu_head head;
|
||||
|
||||
call_rcu(&head, test_callback);
|
||||
}
|
||||
|
||||
static void early_boot_test_call_rcu_bh(void)
|
||||
{
|
||||
static struct rcu_head head;
|
||||
|
||||
call_rcu_bh(&head, test_callback);
|
||||
}
|
||||
|
||||
static void early_boot_test_call_rcu_sched(void)
|
||||
{
|
||||
static struct rcu_head head;
|
||||
|
||||
call_rcu_sched(&head, test_callback);
|
||||
}
|
||||
|
||||
void rcu_early_boot_tests(void)
|
||||
{
|
||||
pr_info("Running RCU self tests\n");
|
||||
|
||||
if (rcu_self_test)
|
||||
early_boot_test_call_rcu();
|
||||
if (rcu_self_test_bh)
|
||||
early_boot_test_call_rcu_bh();
|
||||
if (rcu_self_test_sched)
|
||||
early_boot_test_call_rcu_sched();
|
||||
}
|
||||
|
||||
static int rcu_verify_early_boot_tests(void)
|
||||
{
|
||||
int ret = 0;
|
||||
int early_boot_test_counter = 0;
|
||||
|
||||
if (rcu_self_test) {
|
||||
early_boot_test_counter++;
|
||||
rcu_barrier();
|
||||
}
|
||||
if (rcu_self_test_bh) {
|
||||
early_boot_test_counter++;
|
||||
rcu_barrier_bh();
|
||||
}
|
||||
if (rcu_self_test_sched) {
|
||||
early_boot_test_counter++;
|
||||
rcu_barrier_sched();
|
||||
}
|
||||
|
||||
if (rcu_self_test_counter != early_boot_test_counter) {
|
||||
WARN_ON(1);
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
late_initcall(rcu_verify_early_boot_tests);
|
||||
#else
|
||||
void rcu_early_boot_tests(void) {}
|
||||
#endif /* CONFIG_PROVE_RCU */
|
||||
|
@@ -2773,7 +2773,7 @@ need_resched:
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
rq = cpu_rq(cpu);
|
||||
rcu_note_context_switch(cpu);
|
||||
rcu_note_context_switch();
|
||||
prev = rq->curr;
|
||||
|
||||
schedule_debug(prev);
|
||||
@@ -2874,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void)
|
||||
* or we have been woken up remotely but the IPI has not yet arrived,
|
||||
* we haven't yet exited the RCU idle mode. Do it here manually until
|
||||
* we find a better solution.
|
||||
*
|
||||
* NB: There are buggy callers of this function. Ideally we
|
||||
* should warn if prev_state != IN_USER, but that will trigger
|
||||
* too frequently to make sense yet.
|
||||
*/
|
||||
user_exit();
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
schedule();
|
||||
user_enter();
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -1275,7 +1275,17 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
|
||||
local_irq_restore(*flags);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* This sighand can be already freed and even reused, but
|
||||
* we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
|
||||
* initializes ->siglock: this slab can't go away, it has
|
||||
* the same object type, ->siglock can't be reinitialized.
|
||||
*
|
||||
* We need to ensure that tsk->sighand is still the same
|
||||
* after we take the lock, we can race with de_thread() or
|
||||
* __exit_signal(). In the latter case the next iteration
|
||||
* must see ->sighand == NULL.
|
||||
*/
|
||||
spin_lock(&sighand->siglock);
|
||||
if (likely(sighand == tsk->sighand)) {
|
||||
rcu_read_unlock();
|
||||
@@ -1331,23 +1341,21 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
|
||||
int error = -ESRCH;
|
||||
struct task_struct *p;
|
||||
|
||||
rcu_read_lock();
|
||||
retry:
|
||||
p = pid_task(pid, PIDTYPE_PID);
|
||||
if (p) {
|
||||
error = group_send_sig_info(sig, info, p);
|
||||
if (unlikely(error == -ESRCH))
|
||||
/*
|
||||
* The task was unhashed in between, try again.
|
||||
* If it is dead, pid_task() will return NULL,
|
||||
* if we race with de_thread() it will find the
|
||||
* new leader.
|
||||
*/
|
||||
goto retry;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
for (;;) {
|
||||
rcu_read_lock();
|
||||
p = pid_task(pid, PIDTYPE_PID);
|
||||
if (p)
|
||||
error = group_send_sig_info(sig, info, p);
|
||||
rcu_read_unlock();
|
||||
if (likely(!p || error != -ESRCH))
|
||||
return error;
|
||||
|
||||
return error;
|
||||
/*
|
||||
* The task was unhashed in between, try again. If it
|
||||
* is dead, pid_task() will return NULL, if we race with
|
||||
* de_thread() it will find the new leader.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
|
||||
|
@@ -656,7 +656,7 @@ static void run_ksoftirqd(unsigned int cpu)
|
||||
* in the task stack here.
|
||||
*/
|
||||
__do_softirq();
|
||||
rcu_note_context_switch(cpu);
|
||||
rcu_note_context_switch();
|
||||
local_irq_enable();
|
||||
cond_resched();
|
||||
return;
|
||||
|
@@ -585,7 +585,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||
last_jiffies = jiffies;
|
||||
} while (read_seqretry(&jiffies_lock, seq));
|
||||
|
||||
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
|
||||
if (rcu_needs_cpu(&rcu_delta_jiffies) ||
|
||||
arch_needs_cpu() || irq_work_needs_cpu()) {
|
||||
next_jiffies = last_jiffies + 1;
|
||||
delta_jiffies = 1;
|
||||
|
@@ -1377,12 +1377,11 @@ unsigned long get_next_timer_interrupt(unsigned long now)
|
||||
void update_process_times(int user_tick)
|
||||
{
|
||||
struct task_struct *p = current;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/* Note: this timer irq context must be accounted for as well. */
|
||||
account_process_tick(p, user_tick);
|
||||
run_local_timers();
|
||||
rcu_check_callbacks(cpu, user_tick);
|
||||
rcu_check_callbacks(user_tick);
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
if (in_irq())
|
||||
irq_work_tick();
|
||||
|
Reference in New Issue
Block a user