RCU: Privatize rcu_node::lock
In patch: "rcu: Add transitivity to remaining rcu_node ->lock acquisitions" All locking operations on rcu_node::lock are replaced with the wrappers because of the need of transitivity, which indicates we should never write code using LOCK primitives alone(i.e. without a proper barrier following) on rcu_node::lock outside those wrappers. We could detect this kind of misuses on rcu_node::lock in the future by adding __private modifier on rcu_node::lock. To privatize rcu_node::lock, unlock wrappers are also needed. Replacing spinlock unlocks with these wrappers not only privatizes rcu_node::lock but also makes it easier to figure out critical sections of rcu_node. This patch adds __private modifier to rcu_node::lock and makes every access to it wrapped by ACCESS_PRIVATE(). Besides, unlock wrappers are added and raw_spin_unlock(&rnp->lock) and its friends are replaced with those wrappers. Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
此提交包含在:
@@ -235,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
rnp->gp_tasks = &t->rcu_node_entry;
|
||||
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
|
||||
rnp->exp_tasks = &t->rcu_node_entry;
|
||||
raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
|
||||
raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
|
||||
|
||||
/*
|
||||
* Report the quiescent state for the expedited GP. This expedited
|
||||
@@ -489,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
|
||||
!!rnp->gp_tasks);
|
||||
rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
|
||||
} else {
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
}
|
||||
|
||||
/* Unboost if we were boosted. */
|
||||
@@ -518,14 +518,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
|
||||
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
if (!rcu_preempt_blocked_readers_cgp(rnp)) {
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
t = list_entry(rnp->gp_tasks->prev,
|
||||
struct task_struct, rcu_node_entry);
|
||||
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
|
||||
sched_show_task(t);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -990,7 +990,7 @@ static int rcu_boost(struct rcu_node *rnp)
|
||||
* might exit their RCU read-side critical sections on their own.
|
||||
*/
|
||||
if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1027,7 +1027,7 @@ static int rcu_boost(struct rcu_node *rnp)
|
||||
*/
|
||||
t = container_of(tb, struct task_struct, rcu_node_entry);
|
||||
rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
/* Lock only for side effect: boosts task t's priority. */
|
||||
rt_mutex_lock(&rnp->boost_mtx);
|
||||
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
|
||||
@@ -1087,7 +1087,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
||||
|
||||
if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
|
||||
rnp->n_balk_exp_gp_tasks++;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
if (rnp->exp_tasks != NULL ||
|
||||
@@ -1097,13 +1097,13 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
||||
ULONG_CMP_GE(jiffies, rnp->boost_time))) {
|
||||
if (rnp->exp_tasks == NULL)
|
||||
rnp->boost_tasks = rnp->gp_tasks;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
t = rnp->boost_kthread_task;
|
||||
if (t)
|
||||
rcu_wake_cond(t, rnp->boost_kthread_status);
|
||||
} else {
|
||||
rcu_initiate_boost_trace(rnp);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1171,7 +1171,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
|
||||
return PTR_ERR(t);
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
rnp->boost_kthread_task = t;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
sp.sched_priority = kthread_prio;
|
||||
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
||||
wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
|
||||
@@ -1307,7 +1307,7 @@ static void rcu_prepare_kthreads(int cpu)
|
||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
|
||||
__releases(rnp->lock)
|
||||
{
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
}
|
||||
|
||||
static void invoke_rcu_callbacks_kthread(void)
|
||||
@@ -1558,7 +1558,7 @@ static void rcu_prepare_for_idle(void)
|
||||
rnp = rdp->mynode;
|
||||
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
|
||||
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
|
||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
||||
if (needwake)
|
||||
rcu_gp_kthread_wake(rsp);
|
||||
}
|
||||
@@ -2058,7 +2058,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
|
||||
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
needwake = rcu_start_future_gp(rnp, rdp, &c);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
if (needwake)
|
||||
rcu_gp_kthread_wake(rdp->rsp);
|
||||
|
||||
|
新增問題並參考
封鎖使用者