rcu: Add accessor macros for the ->need_future_gp[] array
Accessors for the ->need_future_gp[] array are currently open-coded, which makes them difficult to change. To improve maintainability, this commit adds need_future_gp_mask() to compute the indexing mask from the array size, need_future_gp_element() to access the element corresponding to the specified grace-period number, and need_any_future_gp() to determine if any future grace period is needed. This commit also applies need_future_gp_element() to existing open-coded single-element accesses. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Nicholas Piggin <npiggin@gmail.com>
This commit is contained in:
@@ -718,11 +718,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
|
|||||||
static int rcu_future_needs_gp(struct rcu_state *rsp)
|
static int rcu_future_needs_gp(struct rcu_state *rsp)
|
||||||
{
|
{
|
||||||
struct rcu_node *rnp = rcu_get_root(rsp);
|
struct rcu_node *rnp = rcu_get_root(rsp);
|
||||||
int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
|
|
||||||
int *fp = &rnp->need_future_gp[idx];
|
|
||||||
|
|
||||||
lockdep_assert_irqs_disabled();
|
lockdep_assert_irqs_disabled();
|
||||||
return READ_ONCE(*fp);
|
return READ_ONCE(need_future_gp_element(rnp, rnp->completed));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1699,7 +1697,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
|
|||||||
*/
|
*/
|
||||||
c = rcu_cbs_completed(rdp->rsp, rnp);
|
c = rcu_cbs_completed(rdp->rsp, rnp);
|
||||||
trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
|
trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
|
||||||
if (rnp->need_future_gp[c & 0x1]) {
|
if (need_future_gp_element(rnp, c)) {
|
||||||
trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
|
trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@@ -1711,7 +1709,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
|
|||||||
* current grace period, we don't need to explicitly start one.
|
* current grace period, we don't need to explicitly start one.
|
||||||
*/
|
*/
|
||||||
if (rnp->gpnum != rnp->completed) {
|
if (rnp->gpnum != rnp->completed) {
|
||||||
rnp->need_future_gp[c & 0x1]++;
|
need_future_gp_element(rnp, c)++;
|
||||||
trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
|
trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@@ -1737,13 +1735,13 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
|
|||||||
* If the needed for the required grace period is already
|
* If the needed for the required grace period is already
|
||||||
* recorded, trace and leave.
|
* recorded, trace and leave.
|
||||||
*/
|
*/
|
||||||
if (rnp_root->need_future_gp[c & 0x1]) {
|
if (need_future_gp_element(rnp_root, c)) {
|
||||||
trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
|
trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
|
||||||
goto unlock_out;
|
goto unlock_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Record the need for the future grace period. */
|
/* Record the need for the future grace period. */
|
||||||
rnp_root->need_future_gp[c & 0x1]++;
|
need_future_gp_element(rnp_root, c)++;
|
||||||
|
|
||||||
/* If a grace period is not already in progress, start one. */
|
/* If a grace period is not already in progress, start one. */
|
||||||
if (rnp_root->gpnum != rnp_root->completed) {
|
if (rnp_root->gpnum != rnp_root->completed) {
|
||||||
@@ -1771,8 +1769,8 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
|
|||||||
int needmore;
|
int needmore;
|
||||||
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
|
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
|
||||||
|
|
||||||
rnp->need_future_gp[c & 0x1] = 0;
|
need_future_gp_element(rnp, c) = 0;
|
||||||
needmore = rnp->need_future_gp[(c + 1) & 0x1];
|
needmore = need_future_gp_element(rnp, c + 1);
|
||||||
trace_rcu_future_gp(rnp, rdp, c,
|
trace_rcu_future_gp(rnp, rdp, c,
|
||||||
needmore ? TPS("CleanupMore") : TPS("Cleanup"));
|
needmore ? TPS("CleanupMore") : TPS("Cleanup"));
|
||||||
return needmore;
|
return needmore;
|
||||||
|
@@ -159,6 +159,21 @@ struct rcu_node {
|
|||||||
wait_queue_head_t exp_wq[4];
|
wait_queue_head_t exp_wq[4];
|
||||||
} ____cacheline_internodealigned_in_smp;
|
} ____cacheline_internodealigned_in_smp;
|
||||||
|
|
||||||
|
/* Accessors for ->need_future_gp[] array. */
|
||||||
|
#define need_future_gp_mask() \
|
||||||
|
(ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
|
||||||
|
#define need_future_gp_element(rnp, c) \
|
||||||
|
((rnp)->need_future_gp[(c) & need_future_gp_mask()])
|
||||||
|
#define need_any_future_gp(rnp) \
|
||||||
|
({ \
|
||||||
|
int __i; \
|
||||||
|
bool __nonzero = false; \
|
||||||
|
\
|
||||||
|
for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++) \
|
||||||
|
__nonzero = __nonzero || (rnp)->need_future_gp[__i]; \
|
||||||
|
__nonzero; \
|
||||||
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
|
* Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
|
||||||
* are indexed relative to this interval rather than the global CPU ID space.
|
* are indexed relative to this interval rather than the global CPU ID space.
|
||||||
|
@@ -1790,7 +1790,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
|
|||||||
*/
|
*/
|
||||||
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
|
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
|
||||||
{
|
{
|
||||||
rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
|
need_future_gp_element(rnp, rnp->completed + 1) += nrq;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
|
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
|
||||||
|
Reference in New Issue
Block a user