123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438 |
- /* SPDX-License-Identifier: GPL-2.0-or-later */
- /*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Copyright (C) 2012 Regents of the University of California
- * Copyright (C) 2017 SiFive
- */
- #ifndef _ASM_RISCV_ATOMIC_H
- #define _ASM_RISCV_ATOMIC_H
- #ifdef CONFIG_GENERIC_ATOMIC64
- # include <asm-generic/atomic64.h>
- #else
- # if (__riscv_xlen < 64)
- # error "64-bit atomics require XLEN to be at least 64"
- # endif
- #endif
- #include <asm/cmpxchg.h>
- #include <asm/barrier.h>
- #define __atomic_acquire_fence() \
- __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
- #define __atomic_release_fence() \
- __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
- static __always_inline int arch_atomic_read(const atomic_t *v)
- {
- return READ_ONCE(v->counter);
- }
- static __always_inline void arch_atomic_set(atomic_t *v, int i)
- {
- WRITE_ONCE(v->counter, i);
- }
- #ifndef CONFIG_GENERIC_ATOMIC64
- #define ATOMIC64_INIT(i) { (i) }
- static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
- {
- return READ_ONCE(v->counter);
- }
- static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
- {
- WRITE_ONCE(v->counter, i);
- }
- #endif
- /*
- * First, the atomic ops that have no ordering constraints and therefor don't
- * have the AQ or RL bits set. These don't return anything, so there's only
- * one version to worry about.
- */
- #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
- static __always_inline \
- void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
- { \
- __asm__ __volatile__ ( \
- " amo" #asm_op "." #asm_type " zero, %1, %0" \
- : "+A" (v->counter) \
- : "r" (I) \
- : "memory"); \
- } \
- #ifdef CONFIG_GENERIC_ATOMIC64
- #define ATOMIC_OPS(op, asm_op, I) \
- ATOMIC_OP (op, asm_op, I, w, int, )
- #else
- #define ATOMIC_OPS(op, asm_op, I) \
- ATOMIC_OP (op, asm_op, I, w, int, ) \
- ATOMIC_OP (op, asm_op, I, d, s64, 64)
- #endif
- ATOMIC_OPS(add, add, i)
- ATOMIC_OPS(sub, add, -i)
- ATOMIC_OPS(and, and, i)
- ATOMIC_OPS( or, or, i)
- ATOMIC_OPS(xor, xor, i)
- #undef ATOMIC_OP
- #undef ATOMIC_OPS
- /*
- * Atomic ops that have ordered, relaxed, acquire, and release variants.
- * There's two flavors of these: the arithmatic ops have both fetch and return
- * versions, while the logical ops only have fetch versions.
- */
- #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
- static __always_inline \
- c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
- atomic##prefix##_t *v) \
- { \
- register c_type ret; \
- __asm__ __volatile__ ( \
- " amo" #asm_op "." #asm_type " %1, %2, %0" \
- : "+A" (v->counter), "=r" (ret) \
- : "r" (I) \
- : "memory"); \
- return ret; \
- } \
- static __always_inline \
- c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
- { \
- register c_type ret; \
- __asm__ __volatile__ ( \
- " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
- : "+A" (v->counter), "=r" (ret) \
- : "r" (I) \
- : "memory"); \
- return ret; \
- }
- #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
- static __always_inline \
- c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
- atomic##prefix##_t *v) \
- { \
- return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
- } \
- static __always_inline \
- c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
- { \
- return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
- }
- #ifdef CONFIG_GENERIC_ATOMIC64
- #define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
- #else
- #define ATOMIC_OPS(op, asm_op, c_op, I) \
- ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
- ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
- ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
- #endif
- ATOMIC_OPS(add, add, +, i)
- ATOMIC_OPS(sub, add, +, -i)
- #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
- #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
- #define arch_atomic_add_return arch_atomic_add_return
- #define arch_atomic_sub_return arch_atomic_sub_return
- #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
- #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
- #define arch_atomic_fetch_add arch_atomic_fetch_add
- #define arch_atomic_fetch_sub arch_atomic_fetch_sub
- #ifndef CONFIG_GENERIC_ATOMIC64
- #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
- #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
- #define arch_atomic64_add_return arch_atomic64_add_return
- #define arch_atomic64_sub_return arch_atomic64_sub_return
- #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
- #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
- #define arch_atomic64_fetch_add arch_atomic64_fetch_add
- #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
- #endif
- #undef ATOMIC_OPS
- #ifdef CONFIG_GENERIC_ATOMIC64
- #define ATOMIC_OPS(op, asm_op, I) \
- ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
- #else
- #define ATOMIC_OPS(op, asm_op, I) \
- ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
- ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
- #endif
- ATOMIC_OPS(and, and, i)
- ATOMIC_OPS( or, or, i)
- ATOMIC_OPS(xor, xor, i)
- #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
- #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
- #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
- #define arch_atomic_fetch_and arch_atomic_fetch_and
- #define arch_atomic_fetch_or arch_atomic_fetch_or
- #define arch_atomic_fetch_xor arch_atomic_fetch_xor
- #ifndef CONFIG_GENERIC_ATOMIC64
- #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
- #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
- #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
- #define arch_atomic64_fetch_and arch_atomic64_fetch_and
- #define arch_atomic64_fetch_or arch_atomic64_fetch_or
- #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
- #endif
- #undef ATOMIC_OPS
- #undef ATOMIC_FETCH_OP
- #undef ATOMIC_OP_RETURN
- /* This is required to provide a full barrier on success. */
- static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
- {
- int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
- return prev;
- }
- #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
- #ifndef CONFIG_GENERIC_ATOMIC64
- static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
- {
- s64 prev;
- long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " beq %[p], %[u], 1f\n"
- " add %[rc], %[p], %[a]\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- : [a]"r" (a), [u]"r" (u)
- : "memory");
- return prev;
- }
- #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
- #endif
- /*
- * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
- * {cmp,}xchg and the operations that return, so they need a full barrier.
- */
- #define ATOMIC_OP(c_t, prefix, size) \
- static __always_inline \
- c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
- { \
- return __xchg_relaxed(&(v->counter), n, size); \
- } \
- static __always_inline \
- c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
- { \
- return __xchg_acquire(&(v->counter), n, size); \
- } \
- static __always_inline \
- c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
- { \
- return __xchg_release(&(v->counter), n, size); \
- } \
- static __always_inline \
- c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
- { \
- return __xchg(&(v->counter), n, size); \
- } \
- static __always_inline \
- c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
- c_t o, c_t n) \
- { \
- return __cmpxchg_relaxed(&(v->counter), o, n, size); \
- } \
- static __always_inline \
- c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
- c_t o, c_t n) \
- { \
- return __cmpxchg_acquire(&(v->counter), o, n, size); \
- } \
- static __always_inline \
- c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
- c_t o, c_t n) \
- { \
- return __cmpxchg_release(&(v->counter), o, n, size); \
- } \
- static __always_inline \
- c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
- { \
- return __cmpxchg(&(v->counter), o, n, size); \
- }
- #ifdef CONFIG_GENERIC_ATOMIC64
- #define ATOMIC_OPS() \
- ATOMIC_OP(int, , 4)
- #else
- #define ATOMIC_OPS() \
- ATOMIC_OP(int, , 4) \
- ATOMIC_OP(s64, 64, 8)
- #endif
- ATOMIC_OPS()
- #define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
- #define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
- #define arch_atomic_xchg_release arch_atomic_xchg_release
- #define arch_atomic_xchg arch_atomic_xchg
- #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
- #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
- #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
- #define arch_atomic_cmpxchg arch_atomic_cmpxchg
- #undef ATOMIC_OPS
- #undef ATOMIC_OP
- static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
- {
- int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
- return !(prev < 0);
- }
- #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
- static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
- {
- int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
- return !(prev > 0);
- }
- #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
- static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
- {
- int prev, rc;
- __asm__ __volatile__ (
- "0: lr.w %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.w.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
- return prev - 1;
- }
- #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
- #ifndef CONFIG_GENERIC_ATOMIC64
- static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
- {
- s64 prev;
- long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bltz %[p], 1f\n"
- " addi %[rc], %[p], 1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
- return !(prev < 0);
- }
- #define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
- static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
- {
- s64 prev;
- long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " bgtz %[p], 1f\n"
- " addi %[rc], %[p], -1\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
- return !(prev > 0);
- }
- #define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
- static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
- {
- s64 prev;
- long rc;
- __asm__ __volatile__ (
- "0: lr.d %[p], %[c]\n"
- " addi %[rc], %[p], -1\n"
- " bltz %[rc], 1f\n"
- " sc.d.rl %[rc], %[rc], %[c]\n"
- " bnez %[rc], 0b\n"
- " fence rw, rw\n"
- "1:\n"
- : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
- :
- : "memory");
- return prev - 1;
- }
- #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
- #endif
- #endif /* _ASM_RISCV_ATOMIC_H */
|