locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the coccinelle script shown below and apply its output. For several reasons, it is desirable to use {READ,WRITE}_ONCE() in preference to ACCESS_ONCE(), and new code is expected to use one of the former. So far, there's been no reason to change most existing uses of ACCESS_ONCE(), as these aren't harmful, and changing them results in churn. However, for some features, the read/write distinction is critical to correct operation. To distinguish these cases, separate read/write accessors must be used. This patch migrates (most) remaining ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following coccinelle script: ---- // Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and // WRITE_ONCE() // $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch virtual patch @ depends on patch @ expression E1, E2; @@ - ACCESS_ONCE(E1) = E2 + WRITE_ONCE(E1, E2) @ depends on patch @ expression E; @@ - ACCESS_ONCE(E) + READ_ONCE(E) ---- Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: davem@davemloft.net Cc: linux-arch@vger.kernel.org Cc: mpe@ellerman.id.au Cc: shuah@kernel.org Cc: snitzer@redhat.com Cc: thor.thayer@linux.intel.com Cc: tj@kernel.org Cc: viro@zeniv.linux.org.uk Cc: will.deacon@arm.com Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:

committed by
Ingo Molnar

parent
b03a0fe0c5
commit
6aa7de0591
@@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
|
||||
__le16 res_count, next_res_count;
|
||||
|
||||
i = ar_first_buffer_index(ctx);
|
||||
res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
|
||||
res_count = READ_ONCE(ctx->descriptors[i].res_count);
|
||||
|
||||
/* A buffer that is not yet completely filled must be the last one. */
|
||||
while (i != last && res_count == 0) {
|
||||
@@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
|
||||
/* Peek at the next descriptor. */
|
||||
next_i = ar_next_buffer_index(i);
|
||||
rmb(); /* read descriptors in order */
|
||||
next_res_count = ACCESS_ONCE(
|
||||
ctx->descriptors[next_i].res_count);
|
||||
next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
|
||||
/*
|
||||
* If the next descriptor is still empty, we must stop at this
|
||||
* descriptor.
|
||||
@@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
|
||||
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
|
||||
next_i = ar_next_buffer_index(next_i);
|
||||
rmb();
|
||||
next_res_count = ACCESS_ONCE(
|
||||
ctx->descriptors[next_i].res_count);
|
||||
next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
|
||||
if (next_res_count != cpu_to_le16(PAGE_SIZE))
|
||||
goto next_buffer_is_active;
|
||||
}
|
||||
@@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context,
|
||||
u32 buffer_dma;
|
||||
|
||||
req_count = le16_to_cpu(last->req_count);
|
||||
res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
|
||||
res_count = le16_to_cpu(READ_ONCE(last->res_count));
|
||||
completed = req_count - res_count;
|
||||
buffer_dma = le32_to_cpu(last->data_address);
|
||||
|
||||
|
Reference in New Issue
Block a user