arch: Mass conversion of smp_mb__*()
Mostly scripted conversion of the smp_mb__* barriers. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/n/tip-55dhyhocezdw1dg7u19hmh1u@git.kernel.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-arch@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
1b15611e1c
commit
4e857c58ef
@@ -107,7 +107,7 @@ static int acquire_in_xmit(struct rds_connection *conn)
|
||||
static void release_in_xmit(struct rds_connection *conn)
|
||||
{
|
||||
clear_bit(RDS_IN_XMIT, &conn->c_flags);
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
/*
|
||||
* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
|
||||
* hot path and finding waiters is very rare. We don't want to walk
|
||||
@@ -661,7 +661,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
|
||||
|
||||
/* order flag updates with spin locks */
|
||||
if (!list_empty(&list))
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
spin_unlock_irqrestore(&conn->c_lock, flags);
|
||||
|
||||
@@ -691,7 +691,7 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
|
||||
}
|
||||
|
||||
/* order flag updates with the rs lock */
|
||||
smp_mb__after_clear_bit();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
spin_unlock_irqrestore(&rs->rs_lock, flags);
|
||||
|
||||
|
Reference in New Issue
Block a user