123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136 |
- #ifndef __ASM_OPENRISC_ATOMIC_H
- #define __ASM_OPENRISC_ATOMIC_H
- #include <linux/types.h>
- #define ATOMIC_OP(op) \
- static inline void arch_atomic_##op(int i, atomic_t *v) \
- { \
- int tmp
- \
- __asm__ __volatile__( \
- "1: l.lwa %0,0(%1) \n" \
- " l." #op " %0,%0,%2 \n" \
- " l.swa 0(%1),%0 \n" \
- " l.bnf 1b \n" \
- " l.nop \n" \
- : "=&r"(tmp) \
- : "r"(&v->counter), "r"(i) \
- : "cc", "memory")
- }
- #define ATOMIC_OP_RETURN(op) \
- static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
- { \
- int tmp
- \
- __asm__ __volatile__( \
- "1: l.lwa %0,0(%1) \n" \
- " l." #op " %0,%0,%2 \n" \
- " l.swa 0(%1),%0 \n" \
- " l.bnf 1b \n" \
- " l.nop \n" \
- : "=&r"(tmp) \
- : "r"(&v->counter), "r"(i) \
- : "cc", "memory")
- \
- return tmp
- }
- #define ATOMIC_FETCH_OP(op) \
- static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
- { \
- int tmp, old
- \
- __asm__ __volatile__( \
- "1: l.lwa %0,0(%2) \n" \
- " l." #op " %1,%0,%3 \n" \
- " l.swa 0(%2),%1 \n" \
- " l.bnf 1b \n" \
- " l.nop \n" \
- : "=&r"(old), "=&r"(tmp) \
- : "r"(&v->counter), "r"(i) \
- : "cc", "memory")
- \
- return old
- }
- ATOMIC_OP_RETURN(add)
- ATOMIC_OP_RETURN(sub)
- ATOMIC_FETCH_OP(add)
- ATOMIC_FETCH_OP(sub)
- ATOMIC_FETCH_OP(and)
- ATOMIC_FETCH_OP(or)
- ATOMIC_FETCH_OP(xor)
- ATOMIC_OP(add)
- ATOMIC_OP(sub)
- ATOMIC_OP(and)
- ATOMIC_OP(or)
- ATOMIC_OP(xor)
- #undef ATOMIC_FETCH_OP
- #undef ATOMIC_OP_RETURN
- #undef ATOMIC_OP
- #define arch_atomic_add_return arch_atomic_add_return
- #define arch_atomic_sub_return arch_atomic_sub_return
- #define arch_atomic_fetch_add arch_atomic_fetch_add
- #define arch_atomic_fetch_sub arch_atomic_fetch_sub
- #define arch_atomic_fetch_and arch_atomic_fetch_and
- #define arch_atomic_fetch_or arch_atomic_fetch_or
- #define arch_atomic_fetch_xor arch_atomic_fetch_xor
- #define arch_atomic_add arch_atomic_add
- #define arch_atomic_sub arch_atomic_sub
- #define arch_atomic_and arch_atomic_and
- #define arch_atomic_or arch_atomic_or
- #define arch_atomic_xor arch_atomic_xor
- static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
- {
- int old, tmp
- __asm__ __volatile__(
- "1: l.lwa %0, 0(%2) \n"
- " l.sfeq %0, %4 \n"
- " l.bf 2f \n"
- " l.add %1, %0, %3 \n"
- " l.swa 0(%2), %1 \n"
- " l.bnf 1b \n"
- " l.nop \n"
- "2: \n"
- : "=&r"(old), "=&r" (tmp)
- : "r"(&v->counter), "r"(a), "r"(u)
- : "cc", "memory")
- return old
- }
- #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
- #define arch_atomic_read(v) READ_ONCE((v)->counter)
- #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
- #include <asm/cmpxchg.h>
- #define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
- #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
- #endif
|