atomic-spinlock.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. #ifndef _ASM_ARC_ATOMIC_SPLOCK_H
  3. #define _ASM_ARC_ATOMIC_SPLOCK_H
  4. /*
  5. * Non hardware assisted Atomic-R-M-W
  6. * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
  7. */
  8. static inline void arch_atomic_set(atomic_t *v, int i)
  9. {
  10. /*
  11. * Independent of hardware support, all of the atomic_xxx() APIs need
  12. * to follow the same locking rules to make sure that a "hardware"
  13. * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
  14. * sequence
  15. *
  16. * Thus atomic_set() despite being 1 insn (and seemingly atomic)
  17. * requires the locking.
  18. */
  19. unsigned long flags;
  20. atomic_ops_lock(flags);
  21. WRITE_ONCE(v->counter, i);
  22. atomic_ops_unlock(flags);
  23. }
  24. #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
  25. #define ATOMIC_OP(op, c_op, asm_op) \
  26. static inline void arch_atomic_##op(int i, atomic_t *v) \
  27. { \
  28. unsigned long flags; \
  29. \
  30. atomic_ops_lock(flags); \
  31. v->counter c_op i; \
  32. atomic_ops_unlock(flags); \
  33. }
  34. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  35. static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
  36. { \
  37. unsigned long flags; \
  38. unsigned int temp; \
  39. \
  40. /* \
  41. * spin lock/unlock provides the needed smp_mb() before/after \
  42. */ \
  43. atomic_ops_lock(flags); \
  44. temp = v->counter; \
  45. temp c_op i; \
  46. v->counter = temp; \
  47. atomic_ops_unlock(flags); \
  48. \
  49. return temp; \
  50. }
  51. #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
  52. static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
  53. { \
  54. unsigned long flags; \
  55. unsigned int orig; \
  56. \
  57. /* \
  58. * spin lock/unlock provides the needed smp_mb() before/after \
  59. */ \
  60. atomic_ops_lock(flags); \
  61. orig = v->counter; \
  62. v->counter c_op i; \
  63. atomic_ops_unlock(flags); \
  64. \
  65. return orig; \
  66. }
  67. #define ATOMIC_OPS(op, c_op, asm_op) \
  68. ATOMIC_OP(op, c_op, asm_op) \
  69. ATOMIC_OP_RETURN(op, c_op, asm_op) \
  70. ATOMIC_FETCH_OP(op, c_op, asm_op)
  71. ATOMIC_OPS(add, +=, add)
  72. ATOMIC_OPS(sub, -=, sub)
  73. #undef ATOMIC_OPS
  74. #define ATOMIC_OPS(op, c_op, asm_op) \
  75. ATOMIC_OP(op, c_op, asm_op) \
  76. ATOMIC_FETCH_OP(op, c_op, asm_op)
  77. ATOMIC_OPS(and, &=, and)
  78. ATOMIC_OPS(andnot, &= ~, bic)
  79. ATOMIC_OPS(or, |=, or)
  80. ATOMIC_OPS(xor, ^=, xor)
  81. #define arch_atomic_andnot arch_atomic_andnot
  82. #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
  83. #undef ATOMIC_OPS
  84. #undef ATOMIC_FETCH_OP
  85. #undef ATOMIC_OP_RETURN
  86. #undef ATOMIC_OP
  87. #endif