locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock()
There are 11 interpretations of the requirements described in the header comment for smp_mb__after_spinlock(): one for each LKMM maintainer, and one currently encoded in the Cat file. Stick to the latter (until a more satisfactory solution is available). This also reworks some snippets related to the barrier to illustrate the requirements and to link them to the idioms which are relied upon at its call sites. Suggested-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Andrea Parri <andrea.parri@amarulasolutions.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: akiyks@gmail.com Cc: dhowells@redhat.com Cc: j.alglave@ucl.ac.uk Cc: linux-arch@vger.kernel.org Cc: luc.maranget@inria.fr Cc: npiggin@gmail.com Cc: parri.andrea@gmail.com Cc: stern@rowland.harvard.edu Link: http://lkml.kernel.org/r/20180716180605.16115-11-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
此提交包含在:
@@ -1998,21 +1998,20 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
* be possible to, falsely, observe p->on_rq == 0 and get stuck
|
||||
* in smp_cond_load_acquire() below.
|
||||
*
|
||||
* sched_ttwu_pending() try_to_wake_up()
|
||||
* [S] p->on_rq = 1; [L] P->state
|
||||
* UNLOCK rq->lock -----.
|
||||
* \
|
||||
* +--- RMB
|
||||
* schedule() /
|
||||
* LOCK rq->lock -----'
|
||||
* UNLOCK rq->lock
|
||||
* sched_ttwu_pending() try_to_wake_up()
|
||||
* STORE p->on_rq = 1 LOAD p->state
|
||||
* UNLOCK rq->lock
|
||||
*
|
||||
* __schedule() (switch to task 'p')
|
||||
* LOCK rq->lock smp_rmb();
|
||||
* smp_mb__after_spinlock();
|
||||
* UNLOCK rq->lock
|
||||
*
|
||||
* [task p]
|
||||
* [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
|
||||
* STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
|
||||
*
|
||||
* Pairs with the UNLOCK+LOCK on rq->lock from the
|
||||
* last wakeup of our task and the schedule that got our task
|
||||
* current.
|
||||
* Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
|
||||
* __schedule(). See the comment for smp_mb__after_spinlock().
|
||||
*/
|
||||
smp_rmb();
|
||||
if (p->on_rq && ttwu_remote(p, wake_flags))
|
||||
@@ -2026,15 +2025,17 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
* One must be running (->on_cpu == 1) in order to remove oneself
|
||||
* from the runqueue.
|
||||
*
|
||||
* [S] ->on_cpu = 1; [L] ->on_rq
|
||||
* UNLOCK rq->lock
|
||||
* RMB
|
||||
* LOCK rq->lock
|
||||
* [S] ->on_rq = 0; [L] ->on_cpu
|
||||
* __schedule() (switch to task 'p') try_to_wake_up()
|
||||
* STORE p->on_cpu = 1 LOAD p->on_rq
|
||||
* UNLOCK rq->lock
|
||||
*
|
||||
* Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
|
||||
* from the consecutive calls to schedule(); the first switching to our
|
||||
* task, the second putting it to sleep.
|
||||
* __schedule() (put 'p' to sleep)
|
||||
* LOCK rq->lock smp_rmb();
|
||||
* smp_mb__after_spinlock();
|
||||
* STORE p->on_rq = 0 LOAD p->on_cpu
|
||||
*
|
||||
* Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
|
||||
* __schedule(). See the comment for smp_mb__after_spinlock().
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
|
新增問題並參考
封鎖使用者