Merge branch 'rcu-next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull the latest RCU tree from Paul E. McKenney: - Additional cleanups after RCU flavor consolidation - Grace-period forward-progress cleanups and improvements - Documentation updates - Miscellaneous fixes - spin_is_locked() conversions to lockdep - SPDX changes to RCU source and header files - SRCU updates - Torture-test updates, including nolibc updates and moving nolibc to tools/include Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1,23 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Module-based torture test facility for locking
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2014
|
||||
*
|
||||
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
* Davidlohr Bueso <dave@stgolabs.net>
|
||||
* Based on kernel/rcu/torture.c.
|
||||
*/
|
||||
@@ -45,7 +32,7 @@
|
||||
#include <linux/torture.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
|
||||
|
||||
torture_param(int, nwriters_stress, -1,
|
||||
"Number of write-locking stress-test threads");
|
||||
@@ -970,7 +957,7 @@ static int __init lock_torture_init(void)
|
||||
/* Prepare torture context. */
|
||||
if (onoff_interval > 0) {
|
||||
firsterr = torture_onoff_init(onoff_holdoff * HZ,
|
||||
onoff_interval * HZ);
|
||||
onoff_interval * HZ, NULL);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
}
|
||||
|
@@ -1,23 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* Read-Copy Update definitions shared among RCU implementations.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2011
|
||||
*
|
||||
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
* Author: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_RCU_H
|
||||
@@ -30,7 +17,7 @@
|
||||
#define RCU_TRACE(stmt)
|
||||
#endif /* #else #ifdef CONFIG_RCU_TRACE */
|
||||
|
||||
/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
|
||||
/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
|
||||
#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
|
||||
|
||||
|
||||
@@ -462,8 +449,6 @@ void rcu_request_urgent_qs_task(struct task_struct *t);
|
||||
|
||||
enum rcutorture_type {
|
||||
RCU_FLAVOR,
|
||||
RCU_BH_FLAVOR,
|
||||
RCU_SCHED_FLAVOR,
|
||||
RCU_TASKS_FLAVOR,
|
||||
SRCU_FLAVOR,
|
||||
INVALID_RCU_FLAVOR
|
||||
|
@@ -1,23 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* RCU segmented callback lists, function definitions
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2017
|
||||
*
|
||||
* Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
|
@@ -1,23 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* RCU segmented callback lists, internal-to-rcu header file
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2017
|
||||
*
|
||||
* Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/rcu_segcblist.h>
|
||||
|
@@ -1,23 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Read-Copy Update module-based performance-test facility
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2015
|
||||
*
|
||||
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) fmt
|
||||
@@ -54,7 +41,7 @@
|
||||
#include "rcu.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
|
||||
|
||||
#define PERF_FLAG "-perf:"
|
||||
#define PERFOUT_STRING(s) \
|
||||
@@ -83,13 +70,19 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
|
||||
* Various other use cases may of course be specified.
|
||||
*/
|
||||
|
||||
#ifdef MODULE
|
||||
# define RCUPERF_SHUTDOWN 0
|
||||
#else
|
||||
# define RCUPERF_SHUTDOWN 1
|
||||
#endif
|
||||
|
||||
torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
|
||||
torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
|
||||
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
|
||||
torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
|
||||
torture_param(int, nreaders, -1, "Number of RCU reader threads");
|
||||
torture_param(int, nwriters, -1, "Number of RCU updater threads");
|
||||
torture_param(bool, shutdown, !IS_ENABLED(MODULE),
|
||||
torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
|
||||
"Shutdown at end of performance tests.");
|
||||
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
|
||||
torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
|
||||
|
@@ -1,23 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Read-Copy Update module-based torture test facility
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2005, 2006
|
||||
*
|
||||
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
* Josh Triplett <josh@joshtriplett.org>
|
||||
*
|
||||
* See also: Documentation/RCU/torture.txt
|
||||
@@ -61,7 +48,7 @@
|
||||
#include "rcu.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
|
||||
|
||||
|
||||
/* Bits for ->extendables field, extendables param, and related definitions. */
|
||||
@@ -1630,21 +1617,34 @@ static bool rcu_fwd_emergency_stop;
|
||||
#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
|
||||
#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
|
||||
#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
|
||||
static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)];
|
||||
struct rcu_launder_hist {
|
||||
long n_launders;
|
||||
unsigned long launder_gp_seq;
|
||||
};
|
||||
#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
|
||||
static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
|
||||
static unsigned long rcu_launder_gp_seq_start;
|
||||
|
||||
static void rcu_torture_fwd_cb_hist(void)
|
||||
{
|
||||
unsigned long gps;
|
||||
unsigned long gps_old;
|
||||
int i;
|
||||
int j;
|
||||
|
||||
for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
|
||||
if (n_launders_hist[i] > 0)
|
||||
if (n_launders_hist[i].n_launders > 0)
|
||||
break;
|
||||
pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
|
||||
__func__, jiffies - rcu_fwd_startat);
|
||||
for (j = 0; j <= i; j++)
|
||||
pr_cont(" %ds/%d: %ld",
|
||||
j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]);
|
||||
gps_old = rcu_launder_gp_seq_start;
|
||||
for (j = 0; j <= i; j++) {
|
||||
gps = n_launders_hist[j].launder_gp_seq;
|
||||
pr_cont(" %ds/%d: %ld:%ld",
|
||||
j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders,
|
||||
rcutorture_seq_diff(gps, gps_old));
|
||||
gps_old = gps;
|
||||
}
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
@@ -1666,7 +1666,8 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
|
||||
i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
|
||||
if (i >= ARRAY_SIZE(n_launders_hist))
|
||||
i = ARRAY_SIZE(n_launders_hist) - 1;
|
||||
n_launders_hist[i]++;
|
||||
n_launders_hist[i].n_launders++;
|
||||
n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
|
||||
spin_unlock_irqrestore(&rcu_fwd_lock, flags);
|
||||
}
|
||||
|
||||
@@ -1786,9 +1787,10 @@ static void rcu_torture_fwd_prog_cr(void)
|
||||
n_max_cbs = 0;
|
||||
n_max_gps = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
|
||||
n_launders_hist[i] = 0;
|
||||
n_launders_hist[i].n_launders = 0;
|
||||
cver = READ_ONCE(rcu_torture_current_version);
|
||||
gps = cur_ops->get_gp_seq();
|
||||
rcu_launder_gp_seq_start = gps;
|
||||
while (time_before(jiffies, stopat) &&
|
||||
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
|
||||
rfcp = READ_ONCE(rcu_fwd_cb_head);
|
||||
@@ -2228,6 +2230,14 @@ static void rcu_test_debug_objects(void)
|
||||
#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
}
|
||||
|
||||
static void rcutorture_sync(void)
|
||||
{
|
||||
static unsigned long n;
|
||||
|
||||
if (cur_ops->sync && !(++n & 0xfff))
|
||||
cur_ops->sync();
|
||||
}
|
||||
|
||||
static int __init
|
||||
rcu_torture_init(void)
|
||||
{
|
||||
@@ -2389,7 +2399,8 @@ rcu_torture_init(void)
|
||||
firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
|
||||
firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
|
||||
rcutorture_sync);
|
||||
if (firsterr)
|
||||
goto unwind;
|
||||
firsterr = rcu_torture_stall_init();
|
||||
|
@@ -1,24 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Sleepable Read-Copy Update mechanism for mutual exclusion,
|
||||
* tiny version for non-preemptible single-CPU use.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2017
|
||||
*
|
||||
* Author: Paul McKenney <paulmck@us.ibm.com>
|
||||
* Author: Paul McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
|
@@ -1,24 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Sleepable Read-Copy Update mechanism for mutual exclusion.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2006
|
||||
* Copyright (C) Fujitsu, 2012
|
||||
*
|
||||
* Author: Paul McKenney <paulmck@us.ibm.com>
|
||||
* Author: Paul McKenney <paulmck@linux.ibm.com>
|
||||
* Lai Jiangshan <laijs@cn.fujitsu.com>
|
||||
*
|
||||
* For detailed explanation of Read-Copy Update mechanism see -
|
||||
@@ -58,6 +45,7 @@ static bool __read_mostly srcu_init_done;
|
||||
static void srcu_invoke_callbacks(struct work_struct *work);
|
||||
static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
|
||||
static void process_srcu(struct work_struct *work);
|
||||
static void srcu_delay_timer(struct timer_list *t);
|
||||
|
||||
/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
|
||||
#define spin_lock_rcu_node(p) \
|
||||
@@ -156,7 +144,8 @@ static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
|
||||
snp->grphi = cpu;
|
||||
}
|
||||
sdp->cpu = cpu;
|
||||
INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
|
||||
INIT_WORK(&sdp->work, srcu_invoke_callbacks);
|
||||
timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
|
||||
sdp->ssp = ssp;
|
||||
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
|
||||
if (is_static)
|
||||
@@ -386,13 +375,19 @@ void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
|
||||
} else {
|
||||
flush_delayed_work(&ssp->work);
|
||||
}
|
||||
for_each_possible_cpu(cpu)
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
|
||||
|
||||
if (quiesced) {
|
||||
if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work)))
|
||||
if (WARN_ON(timer_pending(&sdp->delay_work)))
|
||||
return; /* Just leak it! */
|
||||
if (WARN_ON(work_pending(&sdp->work)))
|
||||
return; /* Just leak it! */
|
||||
} else {
|
||||
flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work);
|
||||
del_timer_sync(&sdp->delay_work);
|
||||
flush_work(&sdp->work);
|
||||
}
|
||||
}
|
||||
if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
|
||||
WARN_ON(srcu_readers_active(ssp))) {
|
||||
pr_info("%s: Active srcu_struct %p state: %d\n",
|
||||
@@ -463,39 +458,23 @@ static void srcu_gp_start(struct srcu_struct *ssp)
|
||||
WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Track online CPUs to guide callback workqueue placement.
|
||||
*/
|
||||
DEFINE_PER_CPU(bool, srcu_online);
|
||||
|
||||
void srcu_online_cpu(unsigned int cpu)
|
||||
static void srcu_delay_timer(struct timer_list *t)
|
||||
{
|
||||
WRITE_ONCE(per_cpu(srcu_online, cpu), true);
|
||||
struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
|
||||
|
||||
queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
|
||||
}
|
||||
|
||||
void srcu_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
WRITE_ONCE(per_cpu(srcu_online, cpu), false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Place the workqueue handler on the specified CPU if online, otherwise
|
||||
* just run it whereever. This is useful for placing workqueue handlers
|
||||
* that are to invoke the specified CPU's callbacks.
|
||||
*/
|
||||
static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
struct delayed_work *dwork,
|
||||
static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
|
||||
unsigned long delay)
|
||||
{
|
||||
bool ret;
|
||||
if (!delay) {
|
||||
queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
|
||||
return;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
if (READ_ONCE(per_cpu(srcu_online, cpu)))
|
||||
ret = queue_delayed_work_on(cpu, wq, dwork, delay);
|
||||
else
|
||||
ret = queue_delayed_work(wq, dwork, delay);
|
||||
preempt_enable();
|
||||
return ret;
|
||||
timer_reduce(&sdp->delay_work, jiffies + delay);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -504,7 +483,7 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
*/
|
||||
static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
|
||||
{
|
||||
srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
|
||||
srcu_queue_delayed_work_on(sdp, delay);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1186,7 +1165,8 @@ static void srcu_invoke_callbacks(struct work_struct *work)
|
||||
struct srcu_data *sdp;
|
||||
struct srcu_struct *ssp;
|
||||
|
||||
sdp = container_of(work, struct srcu_data, work.work);
|
||||
sdp = container_of(work, struct srcu_data, work);
|
||||
|
||||
ssp = sdp->ssp;
|
||||
rcu_cblist_init(&ready_cbs);
|
||||
spin_lock_irq_rcu_node(sdp);
|
||||
|
@@ -1,20 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* RCU-based infrastructure for lightweight reader-writer locking
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (c) 2015, Red Hat, Inc.
|
||||
*
|
||||
* Author: Oleg Nesterov <oleg@redhat.com>
|
||||
|
@@ -1,23 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
*
|
||||
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
* Author: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*
|
||||
* For detailed explanation of Read-Copy Update mechanism see -
|
||||
* Documentation/RCU
|
||||
@@ -76,7 +63,7 @@ void rcu_qs(void)
|
||||
* be called from hardirq context. It is normally called from the
|
||||
* scheduling-clock interrupt.
|
||||
*/
|
||||
void rcu_check_callbacks(int user)
|
||||
void rcu_sched_clock_irq(int user)
|
||||
{
|
||||
if (user) {
|
||||
rcu_qs();
|
||||
|
@@ -1,27 +1,14 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Read-Copy Update mechanism for mutual exclusion
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
*
|
||||
* Authors: Dipankar Sarma <dipankar@in.ibm.com>
|
||||
* Manfred Spraul <manfred@colorfullife.com>
|
||||
* Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
|
||||
* Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical version
|
||||
*
|
||||
* Based on the original work by Paul McKenney <paulmck@us.ibm.com>
|
||||
* Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
|
||||
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
|
||||
*
|
||||
* For detailed explanation of Read-Copy Update mechanism see -
|
||||
@@ -62,6 +49,7 @@
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/sysrq.h>
|
||||
|
||||
#include "tree.h"
|
||||
#include "rcu.h"
|
||||
@@ -115,6 +103,9 @@ int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
|
||||
int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
|
||||
/* panic() on RCU Stall sysctl. */
|
||||
int sysctl_panic_on_rcu_stall __read_mostly;
|
||||
/* Commandeer a sysrq key to dump RCU's tree. */
|
||||
static bool sysrq_rcu;
|
||||
module_param(sysrq_rcu, bool, 0444);
|
||||
|
||||
/*
|
||||
* The rcu_scheduler_active variable is initialized to the value
|
||||
@@ -479,7 +470,6 @@ module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next
|
||||
module_param(rcu_kick_kthreads, bool, 0644);
|
||||
|
||||
static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
|
||||
static void force_quiescent_state(void);
|
||||
static int rcu_pending(void);
|
||||
|
||||
/*
|
||||
@@ -504,13 +494,12 @@ unsigned long rcu_exp_batches_completed(void)
|
||||
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
|
||||
|
||||
/*
|
||||
* Force a quiescent state.
|
||||
* Return the root node of the rcu_state structure.
|
||||
*/
|
||||
void rcu_force_quiescent_state(void)
|
||||
static struct rcu_node *rcu_get_root(void)
|
||||
{
|
||||
force_quiescent_state();
|
||||
return &rcu_state.node[0];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
|
||||
|
||||
/*
|
||||
* Convert a ->gp_state value to a character string.
|
||||
@@ -529,19 +518,30 @@ void show_rcu_gp_kthreads(void)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long j;
|
||||
unsigned long ja;
|
||||
unsigned long jr;
|
||||
unsigned long jw;
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
j = jiffies - READ_ONCE(rcu_state.gp_activity);
|
||||
pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n",
|
||||
j = jiffies;
|
||||
ja = j - READ_ONCE(rcu_state.gp_activity);
|
||||
jr = j - READ_ONCE(rcu_state.gp_req_activity);
|
||||
jw = j - READ_ONCE(rcu_state.gp_wake_time);
|
||||
pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
|
||||
rcu_state.name, gp_state_getname(rcu_state.gp_state),
|
||||
rcu_state.gp_state, rcu_state.gp_kthread->state, j);
|
||||
rcu_state.gp_state,
|
||||
rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL,
|
||||
ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),
|
||||
(long)READ_ONCE(rcu_state.gp_seq),
|
||||
(long)READ_ONCE(rcu_get_root()->gp_seq_needed),
|
||||
READ_ONCE(rcu_state.gp_flags));
|
||||
rcu_for_each_node_breadth_first(rnp) {
|
||||
if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
|
||||
continue;
|
||||
pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
|
||||
rnp->grplo, rnp->grphi, rnp->gp_seq,
|
||||
rnp->gp_seq_needed);
|
||||
pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
|
||||
rnp->grplo, rnp->grphi, (long)rnp->gp_seq,
|
||||
(long)rnp->gp_seq_needed);
|
||||
if (!rcu_is_leaf_node(rnp))
|
||||
continue;
|
||||
for_each_leaf_node_possible_cpu(rnp, cpu) {
|
||||
@@ -550,14 +550,35 @@ void show_rcu_gp_kthreads(void)
|
||||
ULONG_CMP_GE(rcu_state.gp_seq,
|
||||
rdp->gp_seq_needed))
|
||||
continue;
|
||||
pr_info("\tcpu %d ->gp_seq_needed %lu\n",
|
||||
cpu, rdp->gp_seq_needed);
|
||||
pr_info("\tcpu %d ->gp_seq_needed %ld\n",
|
||||
cpu, (long)rdp->gp_seq_needed);
|
||||
}
|
||||
}
|
||||
/* sched_show_task(rcu_state.gp_kthread); */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
|
||||
|
||||
/* Dump grace-period-request information due to commandeered sysrq. */
|
||||
static void sysrq_show_rcu(int key)
|
||||
{
|
||||
show_rcu_gp_kthreads();
|
||||
}
|
||||
|
||||
static struct sysrq_key_op sysrq_rcudump_op = {
|
||||
.handler = sysrq_show_rcu,
|
||||
.help_msg = "show-rcu(y)",
|
||||
.action_msg = "Show RCU tree",
|
||||
.enable_mask = SYSRQ_ENABLE_DUMP,
|
||||
};
|
||||
|
||||
static int __init rcu_sysrq_init(void)
|
||||
{
|
||||
if (sysrq_rcu)
|
||||
return register_sysrq_key('y', &sysrq_rcudump_op);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(rcu_sysrq_init);
|
||||
|
||||
/*
|
||||
* Send along grace-period-related data for rcutorture diagnostics.
|
||||
*/
|
||||
@@ -566,8 +587,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
|
||||
{
|
||||
switch (test_type) {
|
||||
case RCU_FLAVOR:
|
||||
case RCU_BH_FLAVOR:
|
||||
case RCU_SCHED_FLAVOR:
|
||||
*flags = READ_ONCE(rcu_state.gp_flags);
|
||||
*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
|
||||
break;
|
||||
@@ -577,14 +596,6 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
|
||||
|
||||
/*
|
||||
* Return the root node of the rcu_state structure.
|
||||
*/
|
||||
static struct rcu_node *rcu_get_root(void)
|
||||
{
|
||||
return &rcu_state.node[0];
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter an RCU extended quiescent state, which can be either the
|
||||
* idle loop or adaptive-tickless usermode execution.
|
||||
@@ -701,7 +712,6 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
|
||||
|
||||
/**
|
||||
* rcu_nmi_exit - inform RCU of exit from NMI context
|
||||
* @irq: Is this call from rcu_irq_exit?
|
||||
*
|
||||
* If you add or remove a call to rcu_nmi_exit(), be sure to test
|
||||
* with CONFIG_RCU_EQS_DEBUG=y.
|
||||
@@ -1115,7 +1125,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
||||
}
|
||||
|
||||
/*
|
||||
* NO_HZ_FULL CPUs can run in-kernel without rcu_check_callbacks!
|
||||
* NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
|
||||
* The above code handles this, but only for straight cond_resched().
|
||||
* And some in-kernel loops check need_resched() before calling
|
||||
* cond_resched(), which defeats the above code for CPUs that are
|
||||
@@ -1181,7 +1191,7 @@ static void rcu_check_gp_kthread_starvation(void)
|
||||
pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
|
||||
rcu_state.name, j,
|
||||
(long)rcu_seq_current(&rcu_state.gp_seq),
|
||||
rcu_state.gp_flags,
|
||||
READ_ONCE(rcu_state.gp_flags),
|
||||
gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
|
||||
gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
|
||||
if (gpk) {
|
||||
@@ -1310,7 +1320,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
|
||||
|
||||
panic_on_rcu_stall();
|
||||
|
||||
force_quiescent_state(); /* Kick them all. */
|
||||
rcu_force_quiescent_state(); /* Kick them all. */
|
||||
}
|
||||
|
||||
static void print_cpu_stall(void)
|
||||
@@ -1557,17 +1567,28 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Awaken the grace-period kthread. Don't do a self-awaken, and don't
|
||||
* bother awakening when there is nothing for the grace-period kthread
|
||||
* to do (as in several CPUs raced to awaken, and we lost), and finally
|
||||
* don't try to awaken a kthread that has not yet been created.
|
||||
* Awaken the grace-period kthread. Don't do a self-awaken (unless in
|
||||
* an interrupt or softirq handler), and don't bother awakening when there
|
||||
* is nothing for the grace-period kthread to do (as in several CPUs raced
|
||||
* to awaken, and we lost), and finally don't try to awaken a kthread that
|
||||
* has not yet been created. If all those checks are passed, track some
|
||||
* debug information and awaken.
|
||||
*
|
||||
* So why do the self-wakeup when in an interrupt or softirq handler
|
||||
* in the grace-period kthread's context? Because the kthread might have
|
||||
* been interrupted just as it was going to sleep, and just after the final
|
||||
* pre-sleep check of the awaken condition. In this case, a wakeup really
|
||||
* is required, and is therefore supplied.
|
||||
*/
|
||||
static void rcu_gp_kthread_wake(void)
|
||||
{
|
||||
if (current == rcu_state.gp_kthread ||
|
||||
if ((current == rcu_state.gp_kthread &&
|
||||
!in_interrupt() && !in_serving_softirq()) ||
|
||||
!READ_ONCE(rcu_state.gp_flags) ||
|
||||
!rcu_state.gp_kthread)
|
||||
return;
|
||||
WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
|
||||
WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
|
||||
swake_up_one(&rcu_state.gp_wq);
|
||||
}
|
||||
|
||||
@@ -1711,7 +1732,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
zero_cpu_stall_ticks(rdp);
|
||||
}
|
||||
rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
|
||||
if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
|
||||
if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
|
||||
rdp->gp_seq_needed = rnp->gp_seq_needed;
|
||||
WRITE_ONCE(rdp->gpwrap, false);
|
||||
rcu_gpnum_ovf(rnp, rdp);
|
||||
@@ -1939,7 +1960,7 @@ static void rcu_gp_fqs_loop(void)
|
||||
if (!ret) {
|
||||
rcu_state.jiffies_force_qs = jiffies + j;
|
||||
WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
|
||||
jiffies + 3 * j);
|
||||
jiffies + (j ? 3 * j : 2));
|
||||
}
|
||||
trace_rcu_grace_period(rcu_state.name,
|
||||
READ_ONCE(rcu_state.gp_seq),
|
||||
@@ -2497,14 +2518,14 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to see if this CPU is in a non-context-switch quiescent state
|
||||
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
|
||||
* Also schedule RCU core processing.
|
||||
*
|
||||
* This function must be called from hardirq context. It is normally
|
||||
* invoked from the scheduling-clock interrupt.
|
||||
* This function is invoked from each scheduling-clock interrupt,
|
||||
* and checks to see if this CPU is in a non-context-switch quiescent
|
||||
* state, for example, user mode or idle loop. It also schedules RCU
|
||||
* core processing. If the current grace period has gone on too long,
|
||||
* it will ask the scheduler to manufacture a context switch for the sole
|
||||
* purpose of providing a providing the needed quiescent state.
|
||||
*/
|
||||
void rcu_check_callbacks(int user)
|
||||
void rcu_sched_clock_irq(int user)
|
||||
{
|
||||
trace_rcu_utilization(TPS("Start scheduler-tick"));
|
||||
raw_cpu_inc(rcu_data.ticks_this_gp);
|
||||
@@ -2517,7 +2538,7 @@ void rcu_check_callbacks(int user)
|
||||
}
|
||||
__this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
||||
}
|
||||
rcu_flavor_check_callbacks(user);
|
||||
rcu_flavor_sched_clock_irq(user);
|
||||
if (rcu_pending())
|
||||
invoke_rcu_core();
|
||||
|
||||
@@ -2578,7 +2599,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
|
||||
* Force quiescent states on reluctant CPUs, and also detect which
|
||||
* CPUs are in dyntick-idle mode.
|
||||
*/
|
||||
static void force_quiescent_state(void)
|
||||
void rcu_force_quiescent_state(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool ret;
|
||||
@@ -2610,6 +2631,7 @@ static void force_quiescent_state(void)
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
|
||||
rcu_gp_kthread_wake();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
|
||||
|
||||
/*
|
||||
* This function checks for grace-period requests that fail to motivate
|
||||
@@ -2657,16 +2679,11 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
|
||||
__func__, (long)READ_ONCE(rcu_state.gp_seq),
|
||||
(long)READ_ONCE(rnp_root->gp_seq_needed),
|
||||
j - rcu_state.gp_req_activity, j - rcu_state.gp_activity,
|
||||
rcu_state.gp_flags, rcu_state.gp_state, rcu_state.name,
|
||||
rcu_state.gp_kthread ? rcu_state.gp_kthread->state : 0x1ffffL);
|
||||
WARN_ON(1);
|
||||
if (rnp_root != rnp)
|
||||
raw_spin_unlock_rcu_node(rnp_root);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
show_rcu_gp_kthreads();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2711,12 +2728,8 @@ void rcu_fwd_progress_check(unsigned long j)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
|
||||
|
||||
/*
|
||||
* This does the RCU core processing work for the specified rcu_data
|
||||
* structures. This may be called only from the CPU to whom the rdp
|
||||
* belongs.
|
||||
*/
|
||||
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
|
||||
/* Perform RCU core processing work for the current CPU. */
|
||||
static __latent_entropy void rcu_core(struct softirq_action *unused)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
|
||||
@@ -2801,9 +2814,9 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
|
||||
|
||||
/*
|
||||
* Force the grace period if too many callbacks or too long waiting.
|
||||
* Enforce hysteresis, and don't invoke force_quiescent_state()
|
||||
* Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
|
||||
* if some other CPU has recently done so. Also, don't bother
|
||||
* invoking force_quiescent_state() if the newly enqueued callback
|
||||
* invoking rcu_force_quiescent_state() if the newly enqueued callback
|
||||
* is the only one waiting for a grace period to complete.
|
||||
*/
|
||||
if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
|
||||
@@ -2820,7 +2833,7 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
|
||||
rdp->blimit = LONG_MAX;
|
||||
if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
|
||||
rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
|
||||
force_quiescent_state();
|
||||
rcu_force_quiescent_state();
|
||||
rdp->n_force_qs_snap = rcu_state.n_force_qs;
|
||||
rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
|
||||
}
|
||||
@@ -2889,9 +2902,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
|
||||
rcu_segcblist_init(&rdp->cblist);
|
||||
}
|
||||
rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
|
||||
if (!lazy)
|
||||
rcu_idle_count_callbacks_posted();
|
||||
|
||||
if (__is_kfree_rcu_offset((unsigned long)func))
|
||||
trace_rcu_kfree_callback(rcu_state.name, head,
|
||||
(unsigned long)func,
|
||||
@@ -2961,6 +2971,79 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kfree_call_rcu);
|
||||
|
||||
/*
|
||||
* During early boot, any blocking grace-period wait automatically
|
||||
* implies a grace period. Later on, this is never the case for PREEMPT.
|
||||
*
|
||||
* Howevr, because a context switch is a grace period for !PREEMPT, any
|
||||
* blocking grace-period wait automatically implies a grace period if
|
||||
* there is only one CPU online at any point time during execution of
|
||||
* either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
|
||||
* occasionally incorrectly indicate that there are multiple CPUs online
|
||||
* when there was in fact only one the whole time, as this just adds some
|
||||
* overhead: RCU still operates correctly.
|
||||
*/
|
||||
static int rcu_blocking_is_gp(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT))
|
||||
return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
|
||||
might_sleep(); /* Check for RCU read-side critical section. */
|
||||
preempt_disable();
|
||||
ret = num_online_cpus() <= 1;
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* synchronize_rcu - wait until a grace period has elapsed.
|
||||
*
|
||||
* Control will return to the caller some time after a full grace
|
||||
* period has elapsed, in other words after all currently executing RCU
|
||||
* read-side critical sections have completed. Note, however, that
|
||||
* upon return from synchronize_rcu(), the caller might well be executing
|
||||
* concurrently with new RCU read-side critical sections that began while
|
||||
* synchronize_rcu() was waiting. RCU read-side critical sections are
|
||||
* delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
|
||||
* In addition, regions of code across which interrupts, preemption, or
|
||||
* softirqs have been disabled also serve as RCU read-side critical
|
||||
* sections. This includes hardware interrupt handlers, softirq handlers,
|
||||
* and NMI handlers.
|
||||
*
|
||||
* Note that this guarantee implies further memory-ordering guarantees.
|
||||
* On systems with more than one CPU, when synchronize_rcu() returns,
|
||||
* each CPU is guaranteed to have executed a full memory barrier since
|
||||
* the end of its last RCU read-side critical section whose beginning
|
||||
* preceded the call to synchronize_rcu(). In addition, each CPU having
|
||||
* an RCU read-side critical section that extends beyond the return from
|
||||
* synchronize_rcu() is guaranteed to have executed a full memory barrier
|
||||
* after the beginning of synchronize_rcu() and before the beginning of
|
||||
* that RCU read-side critical section. Note that these guarantees include
|
||||
* CPUs that are offline, idle, or executing in user mode, as well as CPUs
|
||||
* that are executing in the kernel.
|
||||
*
|
||||
* Furthermore, if CPU A invoked synchronize_rcu(), which returned
|
||||
* to its caller on CPU B, then both CPU A and CPU B are guaranteed
|
||||
* to have executed a full memory barrier during the execution of
|
||||
* synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
|
||||
* again only if the system has more than one CPU).
|
||||
*/
|
||||
void synchronize_rcu(void)
|
||||
{
|
||||
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
||||
lock_is_held(&rcu_lock_map) ||
|
||||
lock_is_held(&rcu_sched_lock_map),
|
||||
"Illegal synchronize_rcu() in RCU read-side critical section");
|
||||
if (rcu_blocking_is_gp())
|
||||
return;
|
||||
if (rcu_gp_is_expedited())
|
||||
synchronize_rcu_expedited();
|
||||
else
|
||||
wait_rcu_gp(call_rcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
||||
|
||||
/**
|
||||
* get_state_synchronize_rcu - Snapshot current RCU state
|
||||
*
|
||||
@@ -3048,28 +3131,6 @@ static int rcu_pending(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the specified CPU has any callback. If all_lazy is
|
||||
* non-NULL, store an indication of whether all callbacks are lazy.
|
||||
* (If there are no callbacks, all of them are deemed to be lazy.)
|
||||
*/
|
||||
static bool rcu_cpu_has_callbacks(bool *all_lazy)
|
||||
{
|
||||
bool al = true;
|
||||
bool hc = false;
|
||||
struct rcu_data *rdp;
|
||||
|
||||
rdp = this_cpu_ptr(&rcu_data);
|
||||
if (!rcu_segcblist_empty(&rdp->cblist)) {
|
||||
hc = true;
|
||||
if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist))
|
||||
al = false;
|
||||
}
|
||||
if (all_lazy)
|
||||
*all_lazy = al;
|
||||
return hc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for rcu_barrier() tracing. If tracing is disabled,
|
||||
* the compiler is expected to optimize this away.
|
||||
@@ -3299,7 +3360,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
|
||||
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
rcu_prepare_kthreads(cpu);
|
||||
rcu_spawn_all_nocb_kthreads(cpu);
|
||||
rcu_spawn_cpu_nocb_kthread(cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -3329,8 +3390,6 @@ int rcutree_online_cpu(unsigned int cpu)
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
rnp->ffmask |= rdp->grpmask;
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
if (IS_ENABLED(CONFIG_TREE_SRCU))
|
||||
srcu_online_cpu(cpu);
|
||||
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
|
||||
return 0; /* Too early in boot for scheduler work. */
|
||||
sync_sched_exp_online_cleanup(cpu);
|
||||
@@ -3355,8 +3414,6 @@ int rcutree_offline_cpu(unsigned int cpu)
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
|
||||
rcutree_affinity_setting(cpu, cpu);
|
||||
if (IS_ENABLED(CONFIG_TREE_SRCU))
|
||||
srcu_offline_cpu(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3777,7 +3834,7 @@ void __init rcu_init(void)
|
||||
rcu_init_one();
|
||||
if (dump_tree)
|
||||
rcu_dump_rcu_node_tree();
|
||||
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
|
||||
open_softirq(RCU_SOFTIRQ, rcu_core);
|
||||
|
||||
/*
|
||||
* We don't need protection against CPU-hotplug here because
|
||||
|
@@ -1,25 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
|
||||
* Internal non-public definitions.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2008
|
||||
*
|
||||
* Author: Ingo Molnar <mingo@elte.hu>
|
||||
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
* Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/cache.h>
|
||||
@@ -36,7 +23,6 @@
|
||||
|
||||
/* Communicate arguments to a workqueue handler. */
|
||||
struct rcu_exp_work {
|
||||
smp_call_func_t rew_func;
|
||||
unsigned long rew_s;
|
||||
struct work_struct rew_work;
|
||||
};
|
||||
@@ -194,10 +180,7 @@ struct rcu_data {
|
||||
bool rcu_need_heavy_qs; /* GP old, so heavy quiescent state! */
|
||||
bool rcu_urgent_qs; /* GP old need light quiescent state. */
|
||||
#ifdef CONFIG_RCU_FAST_NO_HZ
|
||||
bool all_lazy; /* Are all CPU's CBs lazy? */
|
||||
unsigned long nonlazy_posted; /* # times non-lazy CB posted to CPU. */
|
||||
unsigned long nonlazy_posted_snap;
|
||||
/* Nonlazy_posted snapshot. */
|
||||
bool all_lazy; /* All CPU's CBs lazy at idle start? */
|
||||
unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */
|
||||
unsigned long last_advance_all; /* Last jiffy CBs were all advanced. */
|
||||
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
|
||||
@@ -234,7 +217,13 @@ struct rcu_data {
|
||||
/* Leader CPU takes GP-end wakeups. */
|
||||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
|
||||
|
||||
/* 6) Diagnostic data, including RCU CPU stall warnings. */
|
||||
/* 6) RCU priority boosting. */
|
||||
struct task_struct *rcu_cpu_kthread_task;
|
||||
/* rcuc per-CPU kthread or NULL. */
|
||||
unsigned int rcu_cpu_kthread_status;
|
||||
char rcu_cpu_has_work;
|
||||
|
||||
/* 7) Diagnostic data, including RCU CPU stall warnings. */
|
||||
unsigned int softirq_snap; /* Snapshot of softirq activity. */
|
||||
/* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
|
||||
struct irq_work rcu_iw; /* Check for non-irq activity. */
|
||||
@@ -303,6 +292,8 @@ struct rcu_state {
|
||||
struct swait_queue_head gp_wq; /* Where GP task waits. */
|
||||
short gp_flags; /* Commands for GP task. */
|
||||
short gp_state; /* GP kthread sleep state. */
|
||||
unsigned long gp_wake_time; /* Last GP kthread wake. */
|
||||
unsigned long gp_wake_seq; /* ->gp_seq at ^^^. */
|
||||
|
||||
/* End of fields guarded by root rcu_node's lock. */
|
||||
|
||||
@@ -402,13 +393,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
|
||||
|
||||
int rcu_dynticks_snap(struct rcu_data *rdp);
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
||||
DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
|
||||
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
||||
DECLARE_PER_CPU(char, rcu_cpu_has_work);
|
||||
#endif /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
/* Forward declarations for rcutree_plugin.h */
|
||||
static void rcu_bootup_announce(void);
|
||||
static void rcu_qs(void);
|
||||
@@ -420,7 +404,7 @@ static void rcu_print_detail_task_stall(void);
|
||||
static int rcu_print_task_stall(struct rcu_node *rnp);
|
||||
static int rcu_print_task_exp_stall(struct rcu_node *rnp);
|
||||
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
|
||||
static void rcu_flavor_check_callbacks(int user);
|
||||
static void rcu_flavor_sched_clock_irq(int user);
|
||||
void call_rcu(struct rcu_head *head, rcu_callback_t func);
|
||||
static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
|
||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
||||
@@ -431,7 +415,6 @@ static void __init rcu_spawn_boost_kthreads(void);
|
||||
static void rcu_prepare_kthreads(int cpu);
|
||||
static void rcu_cleanup_after_idle(void);
|
||||
static void rcu_prepare_for_idle(void);
|
||||
static void rcu_idle_count_callbacks_posted(void);
|
||||
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
|
||||
static bool rcu_preempt_need_deferred_qs(struct task_struct *t);
|
||||
static void rcu_preempt_deferred_qs(struct task_struct *t);
|
||||
@@ -451,7 +434,7 @@ static bool rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
|
||||
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
|
||||
static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
|
||||
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
|
||||
static void rcu_spawn_all_nocb_kthreads(int cpu);
|
||||
static void rcu_spawn_cpu_nocb_kthread(int cpu);
|
||||
static void __init rcu_spawn_nocb_kthreads(void);
|
||||
#ifdef CONFIG_RCU_NOCB_CPU
|
||||
static void __init rcu_organize_nocb_kthreads(void);
|
||||
@@ -462,11 +445,3 @@ static void rcu_bind_gp_kthread(void);
|
||||
static bool rcu_nohz_full_cpu(void);
|
||||
static void rcu_dynticks_task_enter(void);
|
||||
static void rcu_dynticks_task_exit(void);
|
||||
|
||||
#ifdef CONFIG_SRCU
|
||||
void srcu_online_cpu(unsigned int cpu);
|
||||
void srcu_offline_cpu(unsigned int cpu);
|
||||
#else /* #ifdef CONFIG_SRCU */
|
||||
void srcu_online_cpu(unsigned int cpu) { }
|
||||
void srcu_offline_cpu(unsigned int cpu) { }
|
||||
#endif /* #else #ifdef CONFIG_SRCU */
|
||||
|
@@ -1,27 +1,16 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* RCU expedited grace periods
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2016
|
||||
*
|
||||
* Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
* Authors: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
static void rcu_exp_handler(void *unused);
|
||||
|
||||
/*
|
||||
* Record the start of an expedited grace period.
|
||||
*/
|
||||
@@ -344,7 +333,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
smp_call_func_t func;
|
||||
unsigned long mask_ofl_test;
|
||||
unsigned long mask_ofl_ipi;
|
||||
int ret;
|
||||
@@ -352,7 +340,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
|
||||
container_of(wp, struct rcu_exp_work, rew_work);
|
||||
struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
|
||||
|
||||
func = rewp->rew_func;
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
|
||||
/* Each pass checks a CPU for identity, offline, and idle. */
|
||||
@@ -396,7 +383,7 @@ retry_ipi:
|
||||
mask_ofl_test |= mask;
|
||||
continue;
|
||||
}
|
||||
ret = smp_call_function_single(cpu, func, NULL, 0);
|
||||
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
|
||||
if (!ret) {
|
||||
mask_ofl_ipi &= ~mask;
|
||||
continue;
|
||||
@@ -426,7 +413,7 @@ retry_ipi:
|
||||
* Select the nodes that the upcoming expedited grace period needs
|
||||
* to wait for.
|
||||
*/
|
||||
static void sync_rcu_exp_select_cpus(smp_call_func_t func)
|
||||
static void sync_rcu_exp_select_cpus(void)
|
||||
{
|
||||
int cpu;
|
||||
struct rcu_node *rnp;
|
||||
@@ -440,7 +427,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
|
||||
rnp->exp_need_flush = false;
|
||||
if (!READ_ONCE(rnp->expmask))
|
||||
continue; /* Avoid early boot non-existent wq. */
|
||||
rnp->rew.rew_func = func;
|
||||
if (!READ_ONCE(rcu_par_gp_wq) ||
|
||||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
|
||||
rcu_is_last_leaf_node(rnp)) {
|
||||
@@ -449,7 +435,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
|
||||
continue;
|
||||
}
|
||||
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
|
||||
preempt_disable();
|
||||
cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
|
||||
/* If all offline, queue the work on an unbound CPU. */
|
||||
if (unlikely(cpu > rnp->grphi - rnp->grplo))
|
||||
@@ -457,7 +442,6 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
|
||||
else
|
||||
cpu += rnp->grplo;
|
||||
queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
|
||||
preempt_enable();
|
||||
rnp->exp_need_flush = true;
|
||||
}
|
||||
|
||||
@@ -580,10 +564,10 @@ static void rcu_exp_wait_wake(unsigned long s)
|
||||
* Common code to drive an expedited grace period forward, used by
|
||||
* workqueues and mid-boot-time tasks.
|
||||
*/
|
||||
static void rcu_exp_sel_wait_wake(smp_call_func_t func, unsigned long s)
|
||||
static void rcu_exp_sel_wait_wake(unsigned long s)
|
||||
{
|
||||
/* Initialize the rcu_node tree in preparation for the wait. */
|
||||
sync_rcu_exp_select_cpus(func);
|
||||
sync_rcu_exp_select_cpus();
|
||||
|
||||
/* Wait and clean up, including waking everyone. */
|
||||
rcu_exp_wait_wake(s);
|
||||
@@ -597,52 +581,7 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
|
||||
struct rcu_exp_work *rewp;
|
||||
|
||||
rewp = container_of(wp, struct rcu_exp_work, rew_work);
|
||||
rcu_exp_sel_wait_wake(rewp->rew_func, rewp->rew_s);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a smp_call_function() handler, kick off the specified
|
||||
* implementation of expedited grace period.
|
||||
*/
|
||||
static void _synchronize_rcu_expedited(smp_call_func_t func)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_exp_work rew;
|
||||
struct rcu_node *rnp;
|
||||
unsigned long s;
|
||||
|
||||
/* If expedited grace periods are prohibited, fall back to normal. */
|
||||
if (rcu_gp_is_normal()) {
|
||||
wait_rcu_gp(call_rcu);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Take a snapshot of the sequence number. */
|
||||
s = rcu_exp_gp_seq_snap();
|
||||
if (exp_funnel_lock(s))
|
||||
return; /* Someone else did our work for us. */
|
||||
|
||||
/* Ensure that load happens before action based on it. */
|
||||
if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
|
||||
/* Direct call during scheduler init and early_initcalls(). */
|
||||
rcu_exp_sel_wait_wake(func, s);
|
||||
} else {
|
||||
/* Marshall arguments & schedule the expedited grace period. */
|
||||
rew.rew_func = func;
|
||||
rew.rew_s = s;
|
||||
INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
|
||||
queue_work(rcu_gp_wq, &rew.rew_work);
|
||||
}
|
||||
|
||||
/* Wait for expedited grace period to complete. */
|
||||
rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
|
||||
rnp = rcu_get_root();
|
||||
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
|
||||
sync_exp_work_done(s));
|
||||
smp_mb(); /* Workqueue actions happen before return. */
|
||||
|
||||
/* Let the next expedited grace period start. */
|
||||
mutex_unlock(&rcu_state.exp_mutex);
|
||||
rcu_exp_sel_wait_wake(rewp->rew_s);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
@@ -654,7 +593,7 @@ static void _synchronize_rcu_expedited(smp_call_func_t func)
|
||||
* ->expmask fields in the rcu_node tree. Otherwise, immediately
|
||||
* report the quiescent state.
|
||||
*/
|
||||
static void sync_rcu_exp_handler(void *unused)
|
||||
static void rcu_exp_handler(void *unused)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||
@@ -697,6 +636,7 @@ static void sync_rcu_exp_handler(void *unused)
|
||||
WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
|
||||
}
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -730,43 +670,10 @@ static void sync_sched_exp_online_cleanup(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* synchronize_rcu_expedited - Brute-force RCU grace period
|
||||
*
|
||||
* Wait for an RCU-preempt grace period, but expedite it. The basic
|
||||
* idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
|
||||
* checks whether the CPU is in an RCU-preempt critical section, and
|
||||
* if so, it sets a flag that causes the outermost rcu_read_unlock()
|
||||
* to report the quiescent state. On the other hand, if the CPU is
|
||||
* not in an RCU read-side critical section, the IPI handler reports
|
||||
* the quiescent state immediately.
|
||||
*
|
||||
* Although this is a greate improvement over previous expedited
|
||||
* implementations, it is still unfriendly to real-time workloads, so is
|
||||
* thus not recommended for any sort of common-case code. In fact, if
|
||||
* you are using synchronize_rcu_expedited() in a loop, please restructure
|
||||
* your code to batch your updates, and then Use a single synchronize_rcu()
|
||||
* instead.
|
||||
*
|
||||
* This has the same semantics as (but is more brutal than) synchronize_rcu().
|
||||
*/
|
||||
void synchronize_rcu_expedited(void)
|
||||
{
|
||||
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
||||
lock_is_held(&rcu_lock_map) ||
|
||||
lock_is_held(&rcu_sched_lock_map),
|
||||
"Illegal synchronize_rcu_expedited() in RCU read-side critical section");
|
||||
|
||||
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
|
||||
return;
|
||||
_synchronize_rcu_expedited(sync_rcu_exp_handler);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||
|
||||
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
/* Invoked on each online non-idle CPU for expedited quiescent state. */
|
||||
static void sync_sched_exp_handler(void *unused)
|
||||
static void rcu_exp_handler(void *unused)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp;
|
||||
@@ -798,44 +705,78 @@ static void sync_sched_exp_online_cleanup(int cpu)
|
||||
rnp = rdp->mynode;
|
||||
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
|
||||
return;
|
||||
ret = smp_call_function_single(cpu, sync_sched_exp_handler, NULL, 0);
|
||||
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Because a context switch is a grace period for !PREEMPT, any
|
||||
* blocking grace-period wait automatically implies a grace period if
|
||||
* there is only one CPU online at any point time during execution of
|
||||
* either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
|
||||
* occasionally incorrectly indicate that there are multiple CPUs online
|
||||
* when there was in fact only one the whole time, as this just adds some
|
||||
* overhead: RCU still operates correctly.
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
/**
|
||||
* synchronize_rcu_expedited - Brute-force RCU grace period
|
||||
*
|
||||
* Wait for an RCU grace period, but expedite it. The basic idea is to
|
||||
* IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
|
||||
* the CPU is in an RCU critical section, and if so, it sets a flag that
|
||||
* causes the outermost rcu_read_unlock() to report the quiescent state
|
||||
* for RCU-preempt or asks the scheduler for help for RCU-sched. On the
|
||||
* other hand, if the CPU is not in an RCU read-side critical section,
|
||||
* the IPI handler reports the quiescent state immediately.
|
||||
*
|
||||
* Although this is a greate improvement over previous expedited
|
||||
* implementations, it is still unfriendly to real-time workloads, so is
|
||||
* thus not recommended for any sort of common-case code. In fact, if
|
||||
* you are using synchronize_rcu_expedited() in a loop, please restructure
|
||||
* your code to batch your updates, and then Use a single synchronize_rcu()
|
||||
* instead.
|
||||
*
|
||||
* This has the same semantics as (but is more brutal than) synchronize_rcu().
|
||||
*/
|
||||
static int rcu_blocking_is_gp(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
might_sleep(); /* Check for RCU read-side critical section. */
|
||||
preempt_disable();
|
||||
ret = num_online_cpus() <= 1;
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* PREEMPT=n implementation of synchronize_rcu_expedited(). */
|
||||
void synchronize_rcu_expedited(void)
|
||||
{
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_exp_work rew;
|
||||
struct rcu_node *rnp;
|
||||
unsigned long s;
|
||||
|
||||
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
||||
lock_is_held(&rcu_lock_map) ||
|
||||
lock_is_held(&rcu_sched_lock_map),
|
||||
"Illegal synchronize_rcu_expedited() in RCU read-side critical section");
|
||||
|
||||
/* If only one CPU, this is automatically a grace period. */
|
||||
/* Is the state is such that the call is a grace period? */
|
||||
if (rcu_blocking_is_gp())
|
||||
return;
|
||||
|
||||
_synchronize_rcu_expedited(sync_sched_exp_handler);
|
||||
/* If expedited grace periods are prohibited, fall back to normal. */
|
||||
if (rcu_gp_is_normal()) {
|
||||
wait_rcu_gp(call_rcu);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Take a snapshot of the sequence number. */
|
||||
s = rcu_exp_gp_seq_snap();
|
||||
if (exp_funnel_lock(s))
|
||||
return; /* Someone else did our work for us. */
|
||||
|
||||
/* Ensure that load happens before action based on it. */
|
||||
if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
|
||||
/* Direct call during scheduler init and early_initcalls(). */
|
||||
rcu_exp_sel_wait_wake(s);
|
||||
} else {
|
||||
/* Marshall arguments & schedule the expedited grace period. */
|
||||
rew.rew_s = s;
|
||||
INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
|
||||
queue_work(rcu_gp_wq, &rew.rew_work);
|
||||
}
|
||||
|
||||
/* Wait for expedited grace period to complete. */
|
||||
rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
|
||||
rnp = rcu_get_root();
|
||||
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
|
||||
sync_exp_work_done(s));
|
||||
smp_mb(); /* Workqueue actions happen before return. */
|
||||
|
||||
/* Let the next expedited grace period start. */
|
||||
mutex_unlock(&rcu_state.exp_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
@@ -1,27 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
|
||||
* Internal non-public definitions that provide either classic
|
||||
* or preemptible semantics.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright Red Hat, 2009
|
||||
* Copyright IBM Corporation, 2009
|
||||
*
|
||||
* Author: Ingo Molnar <mingo@elte.hu>
|
||||
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
|
||||
* Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
@@ -34,17 +21,7 @@
|
||||
#include "../time/tick-internal.h"
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
||||
#include "../locking/rtmutex_common.h"
|
||||
|
||||
/*
|
||||
* Control variables for per-CPU and per-rcu_node kthreads.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
|
||||
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
|
||||
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
|
||||
DEFINE_PER_CPU(char, rcu_cpu_has_work);
|
||||
|
||||
#else /* #ifdef CONFIG_RCU_BOOST */
|
||||
|
||||
/*
|
||||
@@ -307,7 +284,7 @@ static void rcu_qs(void)
|
||||
__this_cpu_read(rcu_data.gp_seq),
|
||||
TPS("cpuqs"));
|
||||
__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
|
||||
barrier(); /* Coordinate with rcu_flavor_check_callbacks(). */
|
||||
barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
|
||||
current->rcu_read_unlock_special.b.need_qs = false;
|
||||
}
|
||||
}
|
||||
@@ -788,13 +765,13 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for a quiescent state from the current CPU. When a task blocks,
|
||||
* the task is recorded in the corresponding CPU's rcu_node structure,
|
||||
* which is checked elsewhere.
|
||||
*
|
||||
* Caller must disable hard irqs.
|
||||
* Check for a quiescent state from the current CPU, including voluntary
|
||||
* context switches for Tasks RCU. When a task blocks, the task is
|
||||
* recorded in the corresponding CPU's rcu_node structure, which is checked
|
||||
* elsewhere, hence this function need only check for quiescent states
|
||||
* related to the current CPU, not to those related to tasks.
|
||||
*/
|
||||
static void rcu_flavor_check_callbacks(int user)
|
||||
static void rcu_flavor_sched_clock_irq(int user)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
|
||||
@@ -825,54 +802,6 @@ static void rcu_flavor_check_callbacks(int user)
|
||||
t->rcu_read_unlock_special.b.need_qs = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* synchronize_rcu - wait until a grace period has elapsed.
|
||||
*
|
||||
* Control will return to the caller some time after a full grace
|
||||
* period has elapsed, in other words after all currently executing RCU
|
||||
* read-side critical sections have completed. Note, however, that
|
||||
* upon return from synchronize_rcu(), the caller might well be executing
|
||||
* concurrently with new RCU read-side critical sections that began while
|
||||
* synchronize_rcu() was waiting. RCU read-side critical sections are
|
||||
* delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
|
||||
* In addition, regions of code across which interrupts, preemption, or
|
||||
* softirqs have been disabled also serve as RCU read-side critical
|
||||
* sections. This includes hardware interrupt handlers, softirq handlers,
|
||||
* and NMI handlers.
|
||||
*
|
||||
* Note that this guarantee implies further memory-ordering guarantees.
|
||||
* On systems with more than one CPU, when synchronize_rcu() returns,
|
||||
* each CPU is guaranteed to have executed a full memory barrier since
|
||||
* the end of its last RCU read-side critical section whose beginning
|
||||
* preceded the call to synchronize_rcu(). In addition, each CPU having
|
||||
* an RCU read-side critical section that extends beyond the return from
|
||||
* synchronize_rcu() is guaranteed to have executed a full memory barrier
|
||||
* after the beginning of synchronize_rcu() and before the beginning of
|
||||
* that RCU read-side critical section. Note that these guarantees include
|
||||
* CPUs that are offline, idle, or executing in user mode, as well as CPUs
|
||||
* that are executing in the kernel.
|
||||
*
|
||||
* Furthermore, if CPU A invoked synchronize_rcu(), which returned
|
||||
* to its caller on CPU B, then both CPU A and CPU B are guaranteed
|
||||
* to have executed a full memory barrier during the execution of
|
||||
* synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
|
||||
* again only if the system has more than one CPU).
|
||||
*/
|
||||
void synchronize_rcu(void)
|
||||
{
|
||||
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
||||
lock_is_held(&rcu_lock_map) ||
|
||||
lock_is_held(&rcu_sched_lock_map),
|
||||
"Illegal synchronize_rcu() in RCU read-side critical section");
|
||||
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
|
||||
return;
|
||||
if (rcu_gp_is_expedited())
|
||||
synchronize_rcu_expedited();
|
||||
else
|
||||
wait_rcu_gp(call_rcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
||||
|
||||
/*
|
||||
* Check for a task exiting while in a preemptible-RCU read-side
|
||||
* critical section, clean up if so. No need to issue warnings,
|
||||
@@ -1088,14 +1017,10 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to see if this CPU is in a non-context-switch quiescent state
|
||||
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
|
||||
* Also schedule RCU core processing.
|
||||
*
|
||||
* This function must be called from hardirq context. It is normally
|
||||
* invoked from the scheduling-clock interrupt.
|
||||
* Check to see if this CPU is in a non-context-switch quiescent state,
|
||||
* namely user mode and idle loop.
|
||||
*/
|
||||
static void rcu_flavor_check_callbacks(int user)
|
||||
static void rcu_flavor_sched_clock_irq(int user)
|
||||
{
|
||||
if (user || rcu_is_cpu_rrupt_from_idle()) {
|
||||
|
||||
@@ -1115,22 +1040,6 @@ static void rcu_flavor_check_callbacks(int user)
|
||||
}
|
||||
}
|
||||
|
||||
/* PREEMPT=n implementation of synchronize_rcu(). */
|
||||
void synchronize_rcu(void)
|
||||
{
|
||||
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
|
||||
lock_is_held(&rcu_lock_map) ||
|
||||
lock_is_held(&rcu_sched_lock_map),
|
||||
"Illegal synchronize_rcu() in RCU read-side critical section");
|
||||
if (rcu_blocking_is_gp())
|
||||
return;
|
||||
if (rcu_gp_is_expedited())
|
||||
synchronize_rcu_expedited();
|
||||
else
|
||||
wait_rcu_gp(call_rcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu);
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, tasks cannot possibly exit
|
||||
* while in preemptible RCU read-side critical sections.
|
||||
@@ -1307,11 +1216,11 @@ static void invoke_rcu_callbacks_kthread(void)
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__this_cpu_write(rcu_cpu_has_work, 1);
|
||||
if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
|
||||
current != __this_cpu_read(rcu_cpu_kthread_task)) {
|
||||
rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
|
||||
__this_cpu_read(rcu_cpu_kthread_status));
|
||||
__this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
|
||||
if (__this_cpu_read(rcu_data.rcu_cpu_kthread_task) != NULL &&
|
||||
current != __this_cpu_read(rcu_data.rcu_cpu_kthread_task)) {
|
||||
rcu_wake_cond(__this_cpu_read(rcu_data.rcu_cpu_kthread_task),
|
||||
__this_cpu_read(rcu_data.rcu_cpu_kthread_status));
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -1322,7 +1231,7 @@ static void invoke_rcu_callbacks_kthread(void)
|
||||
*/
|
||||
static bool rcu_is_callbacks_kthread(void)
|
||||
{
|
||||
return __this_cpu_read(rcu_cpu_kthread_task) == current;
|
||||
return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
|
||||
}
|
||||
|
||||
#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
|
||||
@@ -1369,11 +1278,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcu_kthread_do_work(void)
|
||||
{
|
||||
rcu_do_batch(this_cpu_ptr(&rcu_data));
|
||||
}
|
||||
|
||||
static void rcu_cpu_kthread_setup(unsigned int cpu)
|
||||
{
|
||||
struct sched_param sp;
|
||||
@@ -1384,12 +1288,12 @@ static void rcu_cpu_kthread_setup(unsigned int cpu)
|
||||
|
||||
static void rcu_cpu_kthread_park(unsigned int cpu)
|
||||
{
|
||||
per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
||||
per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
|
||||
}
|
||||
|
||||
static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
||||
{
|
||||
return __this_cpu_read(rcu_cpu_has_work);
|
||||
return __this_cpu_read(rcu_data.rcu_cpu_has_work);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1399,21 +1303,20 @@ static int rcu_cpu_kthread_should_run(unsigned int cpu)
|
||||
*/
|
||||
static void rcu_cpu_kthread(unsigned int cpu)
|
||||
{
|
||||
unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
|
||||
char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
|
||||
unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
|
||||
char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
|
||||
int spincnt;
|
||||
|
||||
for (spincnt = 0; spincnt < 10; spincnt++) {
|
||||
trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
|
||||
local_bh_disable();
|
||||
*statusp = RCU_KTHREAD_RUNNING;
|
||||
this_cpu_inc(rcu_cpu_kthread_loops);
|
||||
local_irq_disable();
|
||||
work = *workp;
|
||||
*workp = 0;
|
||||
local_irq_enable();
|
||||
if (work)
|
||||
rcu_kthread_do_work();
|
||||
rcu_do_batch(this_cpu_ptr(&rcu_data));
|
||||
local_bh_enable();
|
||||
if (*workp == 0) {
|
||||
trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
|
||||
@@ -1459,7 +1362,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
|
||||
}
|
||||
|
||||
static struct smp_hotplug_thread rcu_cpu_thread_spec = {
|
||||
.store = &rcu_cpu_kthread_task,
|
||||
.store = &rcu_data.rcu_cpu_kthread_task,
|
||||
.thread_should_run = rcu_cpu_kthread_should_run,
|
||||
.thread_fn = rcu_cpu_kthread,
|
||||
.thread_comm = "rcuc/%u",
|
||||
@@ -1476,7 +1379,7 @@ static void __init rcu_spawn_boost_kthreads(void)
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(rcu_cpu_has_work, cpu) = 0;
|
||||
per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
|
||||
if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
|
||||
return;
|
||||
rcu_for_each_leaf_node(rnp)
|
||||
@@ -1543,7 +1446,7 @@ static void rcu_prepare_kthreads(int cpu)
|
||||
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
{
|
||||
*nextevt = KTIME_MAX;
|
||||
return rcu_cpu_has_callbacks(NULL);
|
||||
return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1562,14 +1465,6 @@ static void rcu_prepare_for_idle(void)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't bother keeping a running count of the number of RCU callbacks
|
||||
* posted because CONFIG_RCU_FAST_NO_HZ=n.
|
||||
*/
|
||||
static void rcu_idle_count_callbacks_posted(void)
|
||||
{
|
||||
}
|
||||
|
||||
#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
||||
|
||||
/*
|
||||
@@ -1652,11 +1547,8 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/* Snapshot to detect later posting of non-lazy callback. */
|
||||
rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
|
||||
|
||||
/* If no callbacks, RCU doesn't need the CPU. */
|
||||
if (!rcu_cpu_has_callbacks(&rdp->all_lazy)) {
|
||||
if (rcu_segcblist_empty(&rdp->cblist)) {
|
||||
*nextevt = KTIME_MAX;
|
||||
return 0;
|
||||
}
|
||||
@@ -1670,11 +1562,12 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
||||
rdp->last_accelerate = jiffies;
|
||||
|
||||
/* Request timer delay depending on laziness, and round. */
|
||||
if (!rdp->all_lazy) {
|
||||
rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist);
|
||||
if (rdp->all_lazy) {
|
||||
dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
|
||||
} else {
|
||||
dj = round_up(rcu_idle_gp_delay + jiffies,
|
||||
rcu_idle_gp_delay) - jiffies;
|
||||
} else {
|
||||
dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
|
||||
}
|
||||
*nextevt = basemono + dj * TICK_NSEC;
|
||||
return 0;
|
||||
@@ -1704,7 +1597,7 @@ static void rcu_prepare_for_idle(void)
|
||||
/* Handle nohz enablement switches conservatively. */
|
||||
tne = READ_ONCE(tick_nohz_active);
|
||||
if (tne != rdp->tick_nohz_enabled_snap) {
|
||||
if (rcu_cpu_has_callbacks(NULL))
|
||||
if (!rcu_segcblist_empty(&rdp->cblist))
|
||||
invoke_rcu_core(); /* force nohz to see update. */
|
||||
rdp->tick_nohz_enabled_snap = tne;
|
||||
return;
|
||||
@@ -1717,10 +1610,8 @@ static void rcu_prepare_for_idle(void)
|
||||
* callbacks, invoke RCU core for the side-effect of recalculating
|
||||
* idle duration on re-entry to idle.
|
||||
*/
|
||||
if (rdp->all_lazy &&
|
||||
rdp->nonlazy_posted != rdp->nonlazy_posted_snap) {
|
||||
if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) {
|
||||
rdp->all_lazy = false;
|
||||
rdp->nonlazy_posted_snap = rdp->nonlazy_posted;
|
||||
invoke_rcu_core();
|
||||
return;
|
||||
}
|
||||
@@ -1756,19 +1647,6 @@ static void rcu_cleanup_after_idle(void)
|
||||
invoke_rcu_core();
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep a running count of the number of non-lazy callbacks posted
|
||||
* on this CPU. This running counter (which is never decremented) allows
|
||||
* rcu_prepare_for_idle() to detect when something out of the idle loop
|
||||
* posts a callback, even if an equal number of callbacks are invoked.
|
||||
* Of course, callbacks should only be posted from within a trace event
|
||||
* designed to be called from idle or from within RCU_NONIDLE().
|
||||
*/
|
||||
static void rcu_idle_count_callbacks_posted(void)
|
||||
{
|
||||
__this_cpu_add(rcu_data.nonlazy_posted, 1);
|
||||
}
|
||||
|
||||
#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
|
||||
|
||||
#ifdef CONFIG_RCU_FAST_NO_HZ
|
||||
@@ -1776,13 +1654,12 @@ static void rcu_idle_count_callbacks_posted(void)
|
||||
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
|
||||
unsigned long nlpd = rdp->nonlazy_posted - rdp->nonlazy_posted_snap;
|
||||
|
||||
sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
|
||||
sprintf(cp, "last_accelerate: %04lx/%04lx, Nonlazy posted: %c%c%c",
|
||||
rdp->last_accelerate & 0xffff, jiffies & 0xffff,
|
||||
ulong2long(nlpd),
|
||||
rdp->all_lazy ? 'L' : '.',
|
||||
rdp->tick_nohz_enabled_snap ? '.' : 'D');
|
||||
".l"[rdp->all_lazy],
|
||||
".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)],
|
||||
".D"[!rdp->tick_nohz_enabled_snap]);
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
|
||||
@@ -1868,22 +1745,24 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp)
|
||||
|
||||
/*
|
||||
* Offload callback processing from the boot-time-specified set of CPUs
|
||||
* specified by rcu_nocb_mask. For each CPU in the set, there is a
|
||||
* kthread created that pulls the callbacks from the corresponding CPU,
|
||||
* waits for a grace period to elapse, and invokes the callbacks.
|
||||
* The no-CBs CPUs do a wake_up() on their kthread when they insert
|
||||
* a callback into any empty list, unless the rcu_nocb_poll boot parameter
|
||||
* has been specified, in which case each kthread actively polls its
|
||||
* CPU. (Which isn't so great for energy efficiency, but which does
|
||||
* reduce RCU's overhead on that CPU.)
|
||||
* specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
|
||||
* created that pull the callbacks from the corresponding CPU, wait for
|
||||
* a grace period to elapse, and invoke the callbacks. These kthreads
|
||||
* are organized into leaders, which manage incoming callbacks, wait for
|
||||
* grace periods, and awaken followers, and the followers, which only
|
||||
* invoke callbacks. Each leader is its own follower. The no-CBs CPUs
|
||||
* do a wake_up() on their kthread when they insert a callback into any
|
||||
* empty list, unless the rcu_nocb_poll boot parameter has been specified,
|
||||
* in which case each kthread actively polls its CPU. (Which isn't so great
|
||||
* for energy efficiency, but which does reduce RCU's overhead on that CPU.)
|
||||
*
|
||||
* This is intended to be used in conjunction with Frederic Weisbecker's
|
||||
* adaptive-idle work, which would seriously reduce OS jitter on CPUs
|
||||
* running CPU-bound user-mode computations.
|
||||
*
|
||||
* Offloading of callback processing could also in theory be used as
|
||||
* an energy-efficiency measure because CPUs with no RCU callbacks
|
||||
* queued are more aggressive about entering dyntick-idle mode.
|
||||
* Offloading of callbacks can also be used as an energy-efficiency
|
||||
* measure because CPUs with no RCU callbacks queued are more aggressive
|
||||
* about entering dyntick-idle mode.
|
||||
*/
|
||||
|
||||
|
||||
@@ -1987,10 +1866,7 @@ static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
|
||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Does the specified CPU need an RCU callback for this invocation
|
||||
* of rcu_barrier()?
|
||||
*/
|
||||
/* Does rcu_barrier need to queue an RCU callback on the specified CPU? */
|
||||
static bool rcu_nocb_cpu_needs_barrier(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
@@ -2006,8 +1882,8 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
|
||||
* callbacks would be posted. In the worst case, the first
|
||||
* barrier in rcu_barrier() suffices (but the caller cannot
|
||||
* necessarily rely on this, not a substitute for the caller
|
||||
* getting the concurrency design right!). There must also be
|
||||
* a barrier between the following load an posting of a callback
|
||||
* getting the concurrency design right!). There must also be a
|
||||
* barrier between the following load and posting of a callback
|
||||
* (if a callback is in fact needed). This is associated with an
|
||||
* atomic_inc() in the caller.
|
||||
*/
|
||||
@@ -2517,9 +2393,9 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
|
||||
|
||||
/*
|
||||
* If the specified CPU is a no-CBs CPU that does not already have its
|
||||
* rcuo kthreads, spawn them.
|
||||
* rcuo kthread, spawn it.
|
||||
*/
|
||||
static void rcu_spawn_all_nocb_kthreads(int cpu)
|
||||
static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
||||
{
|
||||
if (rcu_scheduler_fully_active)
|
||||
rcu_spawn_one_nocb_kthread(cpu);
|
||||
@@ -2536,7 +2412,7 @@ static void __init rcu_spawn_nocb_kthreads(void)
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
rcu_spawn_all_nocb_kthreads(cpu);
|
||||
rcu_spawn_cpu_nocb_kthread(cpu);
|
||||
}
|
||||
|
||||
/* How many follower CPU IDs per leader? Default of -1 for sqrt(nr_cpu_ids). */
|
||||
@@ -2670,7 +2546,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
|
||||
{
|
||||
}
|
||||
|
||||
static void rcu_spawn_all_nocb_kthreads(int cpu)
|
||||
static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
|
@@ -1,26 +1,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Read-Copy Update mechanism for mutual exclusion
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright IBM Corporation, 2001
|
||||
*
|
||||
* Authors: Dipankar Sarma <dipankar@in.ibm.com>
|
||||
* Manfred Spraul <manfred@colorfullife.com>
|
||||
*
|
||||
* Based on the original work by Paul McKenney <paulmck@us.ibm.com>
|
||||
* Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
|
||||
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
|
||||
* Papers:
|
||||
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
|
||||
|
@@ -48,8 +48,8 @@ EXPORT_SYMBOL_GPL(cpufreq_add_update_util_hook);
|
||||
*
|
||||
* Clear the update_util_data pointer for the given CPU.
|
||||
*
|
||||
* Callers must use RCU-sched callbacks to free any memory that might be
|
||||
* accessed via the old update_util_data pointer or invoke synchronize_sched()
|
||||
* Callers must use RCU callbacks to free any memory that might be
|
||||
* accessed via the old update_util_data pointer or invoke synchronize_rcu()
|
||||
* right after this function to avoid use-after-free.
|
||||
*/
|
||||
void cpufreq_remove_update_util_hook(int cpu)
|
||||
|
@@ -859,7 +859,7 @@ static void sugov_stop(struct cpufreq_policy *policy)
|
||||
for_each_cpu(cpu, policy->cpus)
|
||||
cpufreq_remove_update_util_hook(cpu);
|
||||
|
||||
synchronize_sched();
|
||||
synchronize_rcu();
|
||||
|
||||
if (!policy->fast_switch_enabled) {
|
||||
irq_work_sync(&sg_policy->irq_work);
|
||||
|
@@ -1260,7 +1260,7 @@ extern void sched_ttwu_pending(void);
|
||||
|
||||
/*
|
||||
* The domain tree (rq->sd) is protected by RCU's quiescent state transition.
|
||||
* See detach_destroy_domains: synchronize_sched for details.
|
||||
* See destroy_sched_domains: call_rcu for details.
|
||||
*
|
||||
* The domain tree of any CPU may only be accessed from within
|
||||
* preempt-disabled sections.
|
||||
|
@@ -442,7 +442,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
||||
if (old_rd)
|
||||
call_rcu_sched(&old_rd->rcu, free_rootdomain);
|
||||
call_rcu(&old_rd->rcu, free_rootdomain);
|
||||
}
|
||||
|
||||
void sched_get_rd(struct root_domain *rd)
|
||||
@@ -455,7 +455,7 @@ void sched_put_rd(struct root_domain *rd)
|
||||
if (!atomic_dec_and_test(&rd->refcount))
|
||||
return;
|
||||
|
||||
call_rcu_sched(&rd->rcu, free_rootdomain);
|
||||
call_rcu(&rd->rcu, free_rootdomain);
|
||||
}
|
||||
|
||||
static int init_rootdomain(struct root_domain *rd)
|
||||
|
@@ -1632,7 +1632,7 @@ void update_process_times(int user_tick)
|
||||
/* Note: this timer irq context must be accounted for as well. */
|
||||
account_process_tick(p, user_tick);
|
||||
run_local_timers();
|
||||
rcu_check_callbacks(user_tick);
|
||||
rcu_sched_clock_irq(user_tick);
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
if (in_irq())
|
||||
irq_work_tick();
|
||||
|
@@ -1,23 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Common functions for in-kernel torture tests.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you can access it online at
|
||||
* http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2014
|
||||
*
|
||||
* Author: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Author: Paul E. McKenney <paulmck@linux.ibm.com>
|
||||
* Based on kernel/rcu/torture.c.
|
||||
*/
|
||||
|
||||
@@ -53,7 +40,7 @@
|
||||
#include "rcu/rcu.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
|
||||
MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
|
||||
|
||||
static char *torture_type;
|
||||
static int verbose;
|
||||
@@ -75,6 +62,7 @@ static DEFINE_MUTEX(fullstop_mutex);
|
||||
static struct task_struct *onoff_task;
|
||||
static long onoff_holdoff;
|
||||
static long onoff_interval;
|
||||
static torture_ofl_func *onoff_f;
|
||||
static long n_offline_attempts;
|
||||
static long n_offline_successes;
|
||||
static unsigned long sum_offline;
|
||||
@@ -118,6 +106,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: offlined %d\n",
|
||||
torture_type, cpu);
|
||||
if (onoff_f)
|
||||
onoff_f();
|
||||
(*n_offl_successes)++;
|
||||
delta = jiffies - starttime;
|
||||
*sum_offl += delta;
|
||||
@@ -243,11 +233,12 @@ stop:
|
||||
/*
|
||||
* Initiate online-offline handling.
|
||||
*/
|
||||
int torture_onoff_init(long ooholdoff, long oointerval)
|
||||
int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f)
|
||||
{
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
onoff_holdoff = ooholdoff;
|
||||
onoff_interval = oointerval;
|
||||
onoff_f = f;
|
||||
if (onoff_interval <= 0)
|
||||
return 0;
|
||||
return torture_create_kthread(torture_onoff, NULL, onoff_task);
|
||||
|
Reference in New Issue
Block a user