sched/swait: Rename to exclusive
Since swait basically implemented exclusive waits only, make sure the API reflects that. $ git grep -l -e "\<swake_up\>" -e "\<swait_event[^ (]*" -e "\<prepare_to_swait\>" | while read file; do sed -i -e 's/\<swake_up\>/&_one/g' -e 's/\<swait_event[^ (]*/&_exclusive/g' -e 's/\<prepare_to_swait\>/&_exclusive/g' $file; done With a few manual touch-ups. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: bigeasy@linutronix.de Cc: oleg@redhat.com Cc: paulmck@linux.vnet.ibm.com Cc: pbonzini@redhat.com Link: https://lkml.kernel.org/r/20180612083909.261946548@infradead.org
This commit is contained in:

committed by
Thomas Gleixner

parent
0abf17bc77
commit
b3dae109fa
@@ -92,7 +92,7 @@ static void s2idle_enter(void)
|
||||
/* Push all the CPUs into the idle loop. */
|
||||
wake_up_all_idle_cpus();
|
||||
/* Make the current CPU wait so it can enter the idle loop too. */
|
||||
swait_event(s2idle_wait_head,
|
||||
swait_event_exclusive(s2idle_wait_head,
|
||||
s2idle_state == S2IDLE_STATE_WAKE);
|
||||
|
||||
cpuidle_pause();
|
||||
@@ -160,7 +160,7 @@ void s2idle_wake(void)
|
||||
raw_spin_lock_irqsave(&s2idle_lock, flags);
|
||||
if (s2idle_state > S2IDLE_STATE_NONE) {
|
||||
s2idle_state = S2IDLE_STATE_WAKE;
|
||||
swake_up(&s2idle_wait_head);
|
||||
swake_up_one(&s2idle_wait_head);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&s2idle_lock, flags);
|
||||
}
|
||||
|
@@ -110,7 +110,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
|
||||
|
||||
WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
|
||||
if (!newval && READ_ONCE(sp->srcu_gp_waiting))
|
||||
swake_up(&sp->srcu_wq);
|
||||
swake_up_one(&sp->srcu_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
||||
|
||||
@@ -140,7 +140,7 @@ void srcu_drive_gp(struct work_struct *wp)
|
||||
idx = sp->srcu_idx;
|
||||
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
|
||||
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
|
||||
swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
|
||||
swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
|
||||
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
|
||||
|
||||
/* Invoke the callbacks we removed above. */
|
||||
|
@@ -1727,7 +1727,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
|
||||
!READ_ONCE(rsp->gp_flags) ||
|
||||
!rsp->gp_kthread)
|
||||
return;
|
||||
swake_up(&rsp->gp_wq);
|
||||
swake_up_one(&rsp->gp_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2002,7 +2002,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for swait_event_idle() wakeup at force-quiescent-state
|
||||
* Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
|
||||
* time.
|
||||
*/
|
||||
static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
|
||||
@@ -2144,7 +2144,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
||||
READ_ONCE(rsp->gpnum),
|
||||
TPS("reqwait"));
|
||||
rsp->gp_state = RCU_GP_WAIT_GPS;
|
||||
swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
|
||||
swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
|
||||
RCU_GP_FLAG_INIT);
|
||||
rsp->gp_state = RCU_GP_DONE_GPS;
|
||||
/* Locking provides needed memory barrier. */
|
||||
@@ -2176,7 +2176,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
|
||||
READ_ONCE(rsp->gpnum),
|
||||
TPS("fqswait"));
|
||||
rsp->gp_state = RCU_GP_WAIT_FQS;
|
||||
ret = swait_event_idle_timeout(rsp->gp_wq,
|
||||
ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
|
||||
rcu_gp_fqs_check_wake(rsp, &gf), j);
|
||||
rsp->gp_state = RCU_GP_DOING_FQS;
|
||||
/* Locking provides needed memory barriers. */
|
||||
|
@@ -212,7 +212,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
if (wake) {
|
||||
smp_mb(); /* EGP done before wake_up(). */
|
||||
swake_up(&rsp->expedited_wq);
|
||||
swake_up_one(&rsp->expedited_wq);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -518,7 +518,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
|
||||
jiffies_start = jiffies;
|
||||
|
||||
for (;;) {
|
||||
ret = swait_event_timeout(
|
||||
ret = swait_event_timeout_exclusive(
|
||||
rsp->expedited_wq,
|
||||
sync_rcu_preempt_exp_done_unlocked(rnp_root),
|
||||
jiffies_stall);
|
||||
|
@@ -1854,8 +1854,8 @@ static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
|
||||
WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
|
||||
del_timer(&rdp->nocb_timer);
|
||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||
smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
|
||||
swake_up(&rdp_leader->nocb_wq);
|
||||
smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */
|
||||
swake_up_one(&rdp_leader->nocb_wq);
|
||||
} else {
|
||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||
}
|
||||
@@ -2082,7 +2082,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
|
||||
*/
|
||||
trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
|
||||
for (;;) {
|
||||
swait_event_interruptible(
|
||||
swait_event_interruptible_exclusive(
|
||||
rnp->nocb_gp_wq[c & 0x1],
|
||||
(d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
|
||||
if (likely(d))
|
||||
@@ -2111,7 +2111,7 @@ wait_again:
|
||||
/* Wait for callbacks to appear. */
|
||||
if (!rcu_nocb_poll) {
|
||||
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
|
||||
swait_event_interruptible(my_rdp->nocb_wq,
|
||||
swait_event_interruptible_exclusive(my_rdp->nocb_wq,
|
||||
!READ_ONCE(my_rdp->nocb_leader_sleep));
|
||||
raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
|
||||
my_rdp->nocb_leader_sleep = true;
|
||||
@@ -2176,7 +2176,7 @@ wait_again:
|
||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||
if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
|
||||
/* List was empty, so wake up the follower. */
|
||||
swake_up(&rdp->nocb_wq);
|
||||
swake_up_one(&rdp->nocb_wq);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2193,7 +2193,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
|
||||
{
|
||||
for (;;) {
|
||||
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
|
||||
swait_event_interruptible(rdp->nocb_wq,
|
||||
swait_event_interruptible_exclusive(rdp->nocb_wq,
|
||||
READ_ONCE(rdp->nocb_follower_head));
|
||||
if (smp_load_acquire(&rdp->nocb_follower_head)) {
|
||||
/* ^^^ Ensure CB invocation follows _head test. */
|
||||
|
@@ -32,7 +32,7 @@ void swake_up_locked(struct swait_queue_head *q)
|
||||
}
|
||||
EXPORT_SYMBOL(swake_up_locked);
|
||||
|
||||
void swake_up(struct swait_queue_head *q)
|
||||
void swake_up_one(struct swait_queue_head *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -40,7 +40,7 @@ void swake_up(struct swait_queue_head *q)
|
||||
swake_up_locked(q);
|
||||
raw_spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(swake_up);
|
||||
EXPORT_SYMBOL(swake_up_one);
|
||||
|
||||
/*
|
||||
* Does not allow usage from IRQ disabled, since we must be able to
|
||||
@@ -76,7 +76,7 @@ static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *w
|
||||
list_add_tail(&wait->task_list, &q->task_list);
|
||||
}
|
||||
|
||||
void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
|
||||
void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -85,7 +85,7 @@ void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int
|
||||
set_current_state(state);
|
||||
raw_spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(prepare_to_swait);
|
||||
EXPORT_SYMBOL(prepare_to_swait_exclusive);
|
||||
|
||||
long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
|
||||
{
|
||||
@@ -95,7 +95,7 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait
|
||||
raw_spin_lock_irqsave(&q->lock, flags);
|
||||
if (unlikely(signal_pending_state(state, current))) {
|
||||
/*
|
||||
* See prepare_to_wait_event(). TL;DR, subsequent swake_up()
|
||||
* See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
|
||||
* must not see us.
|
||||
*/
|
||||
list_del_init(&wait->task_list);
|
||||
|
Reference in New Issue
Block a user