locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: davem@davemloft.net Cc: linux-arch@vger.kernel.org Cc: mpe@ellerman.id.au Cc: shuah@kernel.org Cc: snitzer@redhat.com Cc: thor.thayer@linux.intel.com Cc: tj@kernel.org Cc: viro@zeniv.linux.org.uk Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
b03a0fe0c5
commit
6aa7de0591
@@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data)
|
||||
unsigned long pending;
|
||||
enum reset_type method;
|
||||
|
||||
pending = ACCESS_ONCE(efx->reset_pending);
|
||||
pending = READ_ONCE(efx->reset_pending);
|
||||
method = fls(pending) - 1;
|
||||
|
||||
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
|
||||
@@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
|
||||
/* If we're not READY then just leave the flags set as the cue
|
||||
* to abort probing or reschedule the reset later.
|
||||
*/
|
||||
if (ACCESS_ONCE(efx->state) != STATE_READY)
|
||||
if (READ_ONCE(efx->state) != STATE_READY)
|
||||
return;
|
||||
|
||||
queue_work(reset_workqueue, &efx->reset_work);
|
||||
|
@@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
|
||||
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
|
||||
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
|
||||
|
||||
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
|
||||
if (!likely(READ_ONCE(efx->irq_soft_enabled)))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Check to see if we have a serious error condition */
|
||||
@@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
|
||||
ef4_oword_t reg;
|
||||
int link_speed, isolate;
|
||||
|
||||
isolate = !!ACCESS_ONCE(efx->reset_pending);
|
||||
isolate = !!READ_ONCE(efx->reset_pending);
|
||||
|
||||
switch (link_state->speed) {
|
||||
case 10000: link_speed = 3; break;
|
||||
|
@@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
|
||||
struct ef4_nic *efx = channel->efx;
|
||||
int tx_packets = 0;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
|
||||
if (unlikely(READ_ONCE(efx->reset_pending)))
|
||||
return 0;
|
||||
|
||||
if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
|
||||
@@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
|
||||
struct ef4_rx_queue *rx_queue;
|
||||
struct ef4_nic *efx = channel->efx;
|
||||
|
||||
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
|
||||
if (unlikely(READ_ONCE(efx->reset_pending)))
|
||||
return;
|
||||
|
||||
rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
|
||||
@@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
|
||||
irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct ef4_nic *efx = dev_id;
|
||||
bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
|
||||
bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
|
||||
ef4_oword_t *int_ker = efx->irq_status.addr;
|
||||
irqreturn_t result = IRQ_NONE;
|
||||
struct ef4_channel *channel;
|
||||
@@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
|
||||
"IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
|
||||
irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
|
||||
|
||||
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
|
||||
if (!likely(READ_ONCE(efx->irq_soft_enabled)))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Handle non-event-queue sources */
|
||||
|
@@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_
|
||||
static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue,
|
||||
unsigned int write_count)
|
||||
{
|
||||
unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
|
||||
unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
|
||||
|
||||
if (empty_read_count == 0)
|
||||
return false;
|
||||
@@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx);
|
||||
|
||||
static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel)
|
||||
{
|
||||
return ACCESS_ONCE(channel->event_test_cpu);
|
||||
return READ_ONCE(channel->event_test_cpu);
|
||||
}
|
||||
static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx)
|
||||
{
|
||||
return ACCESS_ONCE(efx->last_irq_cpu);
|
||||
return READ_ONCE(efx->last_irq_cpu);
|
||||
}
|
||||
|
||||
/* Global Resources */
|
||||
|
@@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
|
||||
*/
|
||||
netif_tx_stop_queue(txq1->core_txq);
|
||||
smp_mb();
|
||||
txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
|
||||
txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
|
||||
txq1->old_read_count = READ_ONCE(txq1->read_count);
|
||||
txq2->old_read_count = READ_ONCE(txq2->read_count);
|
||||
|
||||
fill_level = max(txq1->insert_count - txq1->old_read_count,
|
||||
txq2->insert_count - txq2->old_read_count);
|
||||
@@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
|
||||
|
||||
/* Check whether the hardware queue is now empty */
|
||||
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
|
||||
tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
|
||||
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
|
||||
if (tx_queue->read_count == tx_queue->old_write_count) {
|
||||
smp_mb();
|
||||
tx_queue->empty_read_count =
|
||||
|
Reference in New Issue
Block a user