locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: davem@davemloft.net Cc: linux-arch@vger.kernel.org Cc: mpe@ellerman.id.au Cc: shuah@kernel.org Cc: snitzer@redhat.com Cc: thor.thayer@linux.intel.com Cc: tj@kernel.org Cc: viro@zeniv.linux.org.uk Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
b03a0fe0c5
commit
6aa7de0591
@@ -431,7 +431,7 @@ do_sync_free:
|
||||
synchronize_rcu_expedited();
|
||||
dm_stat_free(&s->rcu_head);
|
||||
} else {
|
||||
ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
|
||||
WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
|
||||
call_rcu(&s->rcu_head, dm_stat_free);
|
||||
}
|
||||
return 0;
|
||||
@@ -639,12 +639,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
||||
*/
|
||||
last = raw_cpu_ptr(stats->last);
|
||||
stats_aux->merged =
|
||||
(bi_sector == (ACCESS_ONCE(last->last_sector) &&
|
||||
(bi_sector == (READ_ONCE(last->last_sector) &&
|
||||
((bi_rw == WRITE) ==
|
||||
(ACCESS_ONCE(last->last_rw) == WRITE))
|
||||
(READ_ONCE(last->last_rw) == WRITE))
|
||||
));
|
||||
ACCESS_ONCE(last->last_sector) = end_sector;
|
||||
ACCESS_ONCE(last->last_rw) = bi_rw;
|
||||
WRITE_ONCE(last->last_sector, end_sector);
|
||||
WRITE_ONCE(last->last_rw, bi_rw);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
@@ -693,22 +693,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
p = &s->stat_percpu[cpu][x];
|
||||
shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
|
||||
shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
|
||||
shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
|
||||
shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
|
||||
shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
|
||||
shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
|
||||
shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
|
||||
shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
|
||||
shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
|
||||
shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
|
||||
shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
|
||||
shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
|
||||
shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
|
||||
shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
|
||||
shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
|
||||
shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
|
||||
shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
|
||||
shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
|
||||
shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
|
||||
shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
|
||||
shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
|
||||
shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
|
||||
shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
|
||||
shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
|
||||
if (s->n_histogram_entries) {
|
||||
unsigned i;
|
||||
for (i = 0; i < s->n_histogram_entries + 1; i++)
|
||||
shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
|
||||
shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user