rcu/nocb: Leave ->cblist enabled for no-CBs CPUs
As a first step towards making no-CBs CPUs use the ->cblist, this commit leaves the ->cblist enabled for these CPUs. The main reason to make no-CBs CPUs use ->cblist is to take advantage of callback numbering, which will reduce the effects of missed grace periods which in turn will reduce forward-progress problems for no-CBs CPUs. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
@@ -79,9 +79,6 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
|
|||||||
*/
|
*/
|
||||||
void rcu_segcblist_offload(struct rcu_segcblist *rsclp)
|
void rcu_segcblist_offload(struct rcu_segcblist *rsclp)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
|
|
||||||
WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
|
|
||||||
WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp));
|
|
||||||
rsclp->offloaded = 1;
|
rsclp->offloaded = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Is the specified rcu_segcblist enabled, for example, not corresponding
|
* Is the specified rcu_segcblist enabled, for example, not corresponding
|
||||||
* to an offline or callback-offloaded CPU?
|
* to an offline CPU?
|
||||||
*/
|
*/
|
||||||
static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
|
static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -2964,7 +2964,8 @@ rcu_boot_init_percpu_data(int cpu)
|
|||||||
* Initializes a CPU's per-CPU RCU data. Note that only one online or
|
* Initializes a CPU's per-CPU RCU data. Note that only one online or
|
||||||
* offline event can be happening at a given time. Note also that we can
|
* offline event can be happening at a given time. Note also that we can
|
||||||
* accept some slop in the rsp->gp_seq access due to the fact that this
|
* accept some slop in the rsp->gp_seq access due to the fact that this
|
||||||
* CPU cannot possibly have any RCU callbacks in flight yet.
|
* CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
|
||||||
|
* And any offloaded callbacks are being numbered elsewhere.
|
||||||
*/
|
*/
|
||||||
int rcutree_prepare_cpu(unsigned int cpu)
|
int rcutree_prepare_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
@@ -2978,7 +2979,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
|
|||||||
rdp->n_force_qs_snap = rcu_state.n_force_qs;
|
rdp->n_force_qs_snap = rcu_state.n_force_qs;
|
||||||
rdp->blimit = blimit;
|
rdp->blimit = blimit;
|
||||||
if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
|
if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
|
||||||
!init_nocb_callback_list(rdp))
|
!rcu_segcblist_is_offloaded(&rdp->cblist))
|
||||||
rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
|
rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
|
||||||
rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
|
rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */
|
||||||
rcu_dynticks_eqs_online();
|
rcu_dynticks_eqs_online();
|
||||||
|
|||||||
@@ -438,7 +438,6 @@ static void __init rcu_spawn_nocb_kthreads(void);
|
|||||||
#ifdef CONFIG_RCU_NOCB_CPU
|
#ifdef CONFIG_RCU_NOCB_CPU
|
||||||
static void __init rcu_organize_nocb_kthreads(void);
|
static void __init rcu_organize_nocb_kthreads(void);
|
||||||
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
|
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
|
||||||
static bool init_nocb_callback_list(struct rcu_data *rdp);
|
|
||||||
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp);
|
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp);
|
||||||
static void rcu_bind_gp_kthread(void);
|
static void rcu_bind_gp_kthread(void);
|
||||||
static bool rcu_nohz_full_cpu(void);
|
static bool rcu_nohz_full_cpu(void);
|
||||||
|
|||||||
@@ -2007,6 +2007,7 @@ void __init rcu_init_nohz(void)
|
|||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
bool need_rcu_nocb_mask = false;
|
bool need_rcu_nocb_mask = false;
|
||||||
|
struct rcu_data *rdp;
|
||||||
|
|
||||||
#if defined(CONFIG_NO_HZ_FULL)
|
#if defined(CONFIG_NO_HZ_FULL)
|
||||||
if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
|
if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
|
||||||
@@ -2040,8 +2041,12 @@ void __init rcu_init_nohz(void)
|
|||||||
if (rcu_nocb_poll)
|
if (rcu_nocb_poll)
|
||||||
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
|
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
|
||||||
|
|
||||||
for_each_cpu(cpu, rcu_nocb_mask)
|
for_each_cpu(cpu, rcu_nocb_mask) {
|
||||||
init_nocb_callback_list(per_cpu_ptr(&rcu_data, cpu));
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||||
|
if (rcu_segcblist_empty(&rdp->cblist))
|
||||||
|
rcu_segcblist_init(&rdp->cblist);
|
||||||
|
rcu_segcblist_offload(&rdp->cblist);
|
||||||
|
}
|
||||||
rcu_organize_nocb_kthreads();
|
rcu_organize_nocb_kthreads();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2167,27 +2172,6 @@ static void __init rcu_organize_nocb_kthreads(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
|
|
||||||
static bool init_nocb_callback_list(struct rcu_data *rdp)
|
|
||||||
{
|
|
||||||
if (!rcu_is_nocb_cpu(rdp->cpu))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/* If there are early-boot callbacks, move them to nocb lists. */
|
|
||||||
if (!rcu_segcblist_empty(&rdp->cblist)) {
|
|
||||||
rdp->nocb_head = rcu_segcblist_head(&rdp->cblist);
|
|
||||||
rdp->nocb_tail = rcu_segcblist_tail(&rdp->cblist);
|
|
||||||
atomic_long_set(&rdp->nocb_q_count,
|
|
||||||
rcu_segcblist_n_cbs(&rdp->cblist));
|
|
||||||
atomic_long_set(&rdp->nocb_q_count_lazy,
|
|
||||||
rcu_segcblist_n_lazy_cbs(&rdp->cblist));
|
|
||||||
}
|
|
||||||
rcu_segcblist_init(&rdp->cblist);
|
|
||||||
rcu_segcblist_disable(&rdp->cblist);
|
|
||||||
rcu_segcblist_offload(&rdp->cblist);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bind the current task to the offloaded CPUs. If there are no offloaded
|
* Bind the current task to the offloaded CPUs. If there are no offloaded
|
||||||
* CPUs, leave the task unbound. Splat if the bind attempt fails.
|
* CPUs, leave the task unbound. Splat if the bind attempt fails.
|
||||||
@@ -2263,11 +2247,6 @@ static void __init rcu_spawn_nocb_kthreads(void)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool init_nocb_callback_list(struct rcu_data *rdp)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
|
static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
Reference in New Issue
Block a user