[SPARC64]: Avoid membar instructions in delay slots.

In particular, avoid membar instructions in the delay
slot of a jmpl instruction.

UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51

The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.

If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.

We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2005-06-27 15:42:04 -07:00
parent 020f46a39e
commit b445e26cbf
15 changed files with 172 additions and 111 deletions

View File

@@ -7,18 +7,6 @@
#include <linux/config.h>
#include <asm/asi.h>
/* On SMP we need to use memory barriers to ensure
* correct memory operation ordering, nop these out
* for uniprocessor.
*/
#ifdef CONFIG_SMP
#define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad
#define ATOMIC_POST_BARRIER membar #StoreLoad | #StoreStore
#else
#define ATOMIC_PRE_BARRIER nop
#define ATOMIC_POST_BARRIER nop
#endif
.text
/* Two versions of the atomic routines, one that
@@ -52,6 +40,24 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
nop
.size atomic_sub, .-atomic_sub
/* On SMP we need to use memory barriers to ensure
* correct memory operation ordering, nop these out
* for uniprocessor.
*/
#ifdef CONFIG_SMP
#define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad;
#define ATOMIC_POST_BARRIER \
ba,pt %xcc, 80b; \
membar #StoreLoad | #StoreStore
80: retl
nop
#else
#define ATOMIC_PRE_BARRIER
#define ATOMIC_POST_BARRIER
#endif
.globl atomic_add_ret
.type atomic_add_ret,#function
atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
@@ -62,9 +68,10 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
cmp %g1, %g7
bne,pn %icc, 1b
add %g7, %o0, %g7
sra %g7, 0, %o0
ATOMIC_POST_BARRIER
retl
sra %g7, 0, %o0
nop
.size atomic_add_ret, .-atomic_add_ret
.globl atomic_sub_ret
@@ -77,9 +84,10 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
cmp %g1, %g7
bne,pn %icc, 1b
sub %g7, %o0, %g7
sra %g7, 0, %o0
ATOMIC_POST_BARRIER
retl
sra %g7, 0, %o0
nop
.size atomic_sub_ret, .-atomic_sub_ret
.globl atomic64_add
@@ -118,9 +126,10 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
cmp %g1, %g7
bne,pn %xcc, 1b
add %g7, %o0, %g7
mov %g7, %o0
ATOMIC_POST_BARRIER
retl
mov %g7, %o0
nop
.size atomic64_add_ret, .-atomic64_add_ret
.globl atomic64_sub_ret
@@ -133,7 +142,8 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
cmp %g1, %g7
bne,pn %xcc, 1b
sub %g7, %o0, %g7
mov %g7, %o0
ATOMIC_POST_BARRIER
retl
mov %g7, %o0
nop
.size atomic64_sub_ret, .-atomic64_sub_ret