atomic.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* Copyright (C) 2000 Philipp Rumpf <[email protected]>
  3. * Copyright (C) 2006 Kyle McMartin <[email protected]>
  4. */
  5. #ifndef _ASM_PARISC_ATOMIC_H_
  6. #define _ASM_PARISC_ATOMIC_H_
  7. #include <linux/types.h>
  8. #include <asm/cmpxchg.h>
  9. #include <asm/barrier.h>
  10. /*
  11. * Atomic operations that C can't guarantee us. Useful for
  12. * resource counting etc..
  13. *
  14. * And probably incredibly slow on parisc. OTOH, we don't
  15. * have to write any serious assembly. prumpf
  16. */
  17. #ifdef CONFIG_SMP
  18. #include <asm/spinlock.h>
  19. #include <asm/cache.h> /* we use L1_CACHE_BYTES */
  20. /* Use an array of spinlocks for our atomic_ts.
  21. * Hash function to index into a different SPINLOCK.
  22. * Since "a" is usually an address, use one spinlock per cacheline.
  23. */
  24. # define ATOMIC_HASH_SIZE 4
  25. # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
  26. extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
  27. /* Can't use raw_spin_lock_irq because of #include problems, so
  28. * this is the substitute */
  29. #define _atomic_spin_lock_irqsave(l,f) do { \
  30. arch_spinlock_t *s = ATOMIC_HASH(l); \
  31. local_irq_save(f); \
  32. arch_spin_lock(s); \
  33. } while(0)
  34. #define _atomic_spin_unlock_irqrestore(l,f) do { \
  35. arch_spinlock_t *s = ATOMIC_HASH(l); \
  36. arch_spin_unlock(s); \
  37. local_irq_restore(f); \
  38. } while(0)
  39. #else
  40. # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
  41. # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
  42. #endif
  43. /*
  44. * Note that we need not lock read accesses - aligned word writes/reads
  45. * are atomic, so a reader never sees inconsistent values.
  46. */
  47. static __inline__ void arch_atomic_set(atomic_t *v, int i)
  48. {
  49. unsigned long flags;
  50. _atomic_spin_lock_irqsave(v, flags);
  51. v->counter = i;
  52. _atomic_spin_unlock_irqrestore(v, flags);
  53. }
  54. #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
  55. static __inline__ int arch_atomic_read(const atomic_t *v)
  56. {
  57. return READ_ONCE((v)->counter);
  58. }
  59. /* exported interface */
  60. #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
  61. #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
  62. #define ATOMIC_OP(op, c_op) \
  63. static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
  64. { \
  65. unsigned long flags; \
  66. \
  67. _atomic_spin_lock_irqsave(v, flags); \
  68. v->counter c_op i; \
  69. _atomic_spin_unlock_irqrestore(v, flags); \
  70. }
  71. #define ATOMIC_OP_RETURN(op, c_op) \
  72. static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
  73. { \
  74. unsigned long flags; \
  75. int ret; \
  76. \
  77. _atomic_spin_lock_irqsave(v, flags); \
  78. ret = (v->counter c_op i); \
  79. _atomic_spin_unlock_irqrestore(v, flags); \
  80. \
  81. return ret; \
  82. }
  83. #define ATOMIC_FETCH_OP(op, c_op) \
  84. static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
  85. { \
  86. unsigned long flags; \
  87. int ret; \
  88. \
  89. _atomic_spin_lock_irqsave(v, flags); \
  90. ret = v->counter; \
  91. v->counter c_op i; \
  92. _atomic_spin_unlock_irqrestore(v, flags); \
  93. \
  94. return ret; \
  95. }
  96. #define ATOMIC_OPS(op, c_op) \
  97. ATOMIC_OP(op, c_op) \
  98. ATOMIC_OP_RETURN(op, c_op) \
  99. ATOMIC_FETCH_OP(op, c_op)
  100. ATOMIC_OPS(add, +=)
  101. ATOMIC_OPS(sub, -=)
  102. #undef ATOMIC_OPS
  103. #define ATOMIC_OPS(op, c_op) \
  104. ATOMIC_OP(op, c_op) \
  105. ATOMIC_FETCH_OP(op, c_op)
  106. ATOMIC_OPS(and, &=)
  107. ATOMIC_OPS(or, |=)
  108. ATOMIC_OPS(xor, ^=)
  109. #undef ATOMIC_OPS
  110. #undef ATOMIC_FETCH_OP
  111. #undef ATOMIC_OP_RETURN
  112. #undef ATOMIC_OP
  113. #ifdef CONFIG_64BIT
  114. #define ATOMIC64_INIT(i) { (i) }
  115. #define ATOMIC64_OP(op, c_op) \
  116. static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
  117. { \
  118. unsigned long flags; \
  119. \
  120. _atomic_spin_lock_irqsave(v, flags); \
  121. v->counter c_op i; \
  122. _atomic_spin_unlock_irqrestore(v, flags); \
  123. }
  124. #define ATOMIC64_OP_RETURN(op, c_op) \
  125. static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
  126. { \
  127. unsigned long flags; \
  128. s64 ret; \
  129. \
  130. _atomic_spin_lock_irqsave(v, flags); \
  131. ret = (v->counter c_op i); \
  132. _atomic_spin_unlock_irqrestore(v, flags); \
  133. \
  134. return ret; \
  135. }
  136. #define ATOMIC64_FETCH_OP(op, c_op) \
  137. static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
  138. { \
  139. unsigned long flags; \
  140. s64 ret; \
  141. \
  142. _atomic_spin_lock_irqsave(v, flags); \
  143. ret = v->counter; \
  144. v->counter c_op i; \
  145. _atomic_spin_unlock_irqrestore(v, flags); \
  146. \
  147. return ret; \
  148. }
  149. #define ATOMIC64_OPS(op, c_op) \
  150. ATOMIC64_OP(op, c_op) \
  151. ATOMIC64_OP_RETURN(op, c_op) \
  152. ATOMIC64_FETCH_OP(op, c_op)
  153. ATOMIC64_OPS(add, +=)
  154. ATOMIC64_OPS(sub, -=)
  155. #undef ATOMIC64_OPS
  156. #define ATOMIC64_OPS(op, c_op) \
  157. ATOMIC64_OP(op, c_op) \
  158. ATOMIC64_FETCH_OP(op, c_op)
  159. ATOMIC64_OPS(and, &=)
  160. ATOMIC64_OPS(or, |=)
  161. ATOMIC64_OPS(xor, ^=)
  162. #undef ATOMIC64_OPS
  163. #undef ATOMIC64_FETCH_OP
  164. #undef ATOMIC64_OP_RETURN
  165. #undef ATOMIC64_OP
  166. static __inline__ void
  167. arch_atomic64_set(atomic64_t *v, s64 i)
  168. {
  169. unsigned long flags;
  170. _atomic_spin_lock_irqsave(v, flags);
  171. v->counter = i;
  172. _atomic_spin_unlock_irqrestore(v, flags);
  173. }
  174. #define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
  175. static __inline__ s64
  176. arch_atomic64_read(const atomic64_t *v)
  177. {
  178. return READ_ONCE((v)->counter);
  179. }
  180. /* exported interface */
  181. #define arch_atomic64_cmpxchg(v, o, n) \
  182. ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
  183. #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
  184. #endif /* !CONFIG_64BIT */
  185. #endif /* _ASM_PARISC_ATOMIC_H_ */