atomic.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
  3. *
  4. * This file is licensed under the terms of the GNU General Public License
  5. * version 2. This program is licensed "as is" without any warranty of any
  6. * kind, whether express or implied.
  7. */
  8. #ifndef __ASM_OPENRISC_ATOMIC_H
  9. #define __ASM_OPENRISC_ATOMIC_H
  10. #include <linux/types.h>
  11. /* Atomically perform op with v->counter and i */
  12. #define ATOMIC_OP(op) \
  13. static inline void arch_atomic_##op(int i, atomic_t *v) \
  14. { \
  15. int tmp; \
  16. \
  17. __asm__ __volatile__( \
  18. "1: l.lwa %0,0(%1) \n" \
  19. " l." #op " %0,%0,%2 \n" \
  20. " l.swa 0(%1),%0 \n" \
  21. " l.bnf 1b \n" \
  22. " l.nop \n" \
  23. : "=&r"(tmp) \
  24. : "r"(&v->counter), "r"(i) \
  25. : "cc", "memory"); \
  26. }
  27. /* Atomically perform op with v->counter and i, return the result */
  28. #define ATOMIC_OP_RETURN(op) \
  29. static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
  30. { \
  31. int tmp; \
  32. \
  33. __asm__ __volatile__( \
  34. "1: l.lwa %0,0(%1) \n" \
  35. " l." #op " %0,%0,%2 \n" \
  36. " l.swa 0(%1),%0 \n" \
  37. " l.bnf 1b \n" \
  38. " l.nop \n" \
  39. : "=&r"(tmp) \
  40. : "r"(&v->counter), "r"(i) \
  41. : "cc", "memory"); \
  42. \
  43. return tmp; \
  44. }
  45. /* Atomically perform op with v->counter and i, return orig v->counter */
  46. #define ATOMIC_FETCH_OP(op) \
  47. static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
  48. { \
  49. int tmp, old; \
  50. \
  51. __asm__ __volatile__( \
  52. "1: l.lwa %0,0(%2) \n" \
  53. " l." #op " %1,%0,%3 \n" \
  54. " l.swa 0(%2),%1 \n" \
  55. " l.bnf 1b \n" \
  56. " l.nop \n" \
  57. : "=&r"(old), "=&r"(tmp) \
  58. : "r"(&v->counter), "r"(i) \
  59. : "cc", "memory"); \
  60. \
  61. return old; \
  62. }
  63. ATOMIC_OP_RETURN(add)
  64. ATOMIC_OP_RETURN(sub)
  65. ATOMIC_FETCH_OP(add)
  66. ATOMIC_FETCH_OP(sub)
  67. ATOMIC_FETCH_OP(and)
  68. ATOMIC_FETCH_OP(or)
  69. ATOMIC_FETCH_OP(xor)
  70. ATOMIC_OP(add)
  71. ATOMIC_OP(sub)
  72. ATOMIC_OP(and)
  73. ATOMIC_OP(or)
  74. ATOMIC_OP(xor)
  75. #undef ATOMIC_FETCH_OP
  76. #undef ATOMIC_OP_RETURN
  77. #undef ATOMIC_OP
  78. #define arch_atomic_add_return arch_atomic_add_return
  79. #define arch_atomic_sub_return arch_atomic_sub_return
  80. #define arch_atomic_fetch_add arch_atomic_fetch_add
  81. #define arch_atomic_fetch_sub arch_atomic_fetch_sub
  82. #define arch_atomic_fetch_and arch_atomic_fetch_and
  83. #define arch_atomic_fetch_or arch_atomic_fetch_or
  84. #define arch_atomic_fetch_xor arch_atomic_fetch_xor
  85. #define arch_atomic_add arch_atomic_add
  86. #define arch_atomic_sub arch_atomic_sub
  87. #define arch_atomic_and arch_atomic_and
  88. #define arch_atomic_or arch_atomic_or
  89. #define arch_atomic_xor arch_atomic_xor
  90. /*
  91. * Atomically add a to v->counter as long as v is not already u.
  92. * Returns the original value at v->counter.
  93. *
  94. * This is often used through atomic_inc_not_zero()
  95. */
  96. static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
  97. {
  98. int old, tmp;
  99. __asm__ __volatile__(
  100. "1: l.lwa %0, 0(%2) \n"
  101. " l.sfeq %0, %4 \n"
  102. " l.bf 2f \n"
  103. " l.add %1, %0, %3 \n"
  104. " l.swa 0(%2), %1 \n"
  105. " l.bnf 1b \n"
  106. " l.nop \n"
  107. "2: \n"
  108. : "=&r"(old), "=&r" (tmp)
  109. : "r"(&v->counter), "r"(a), "r"(u)
  110. : "cc", "memory");
  111. return old;
  112. }
  113. #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
  114. #define arch_atomic_read(v) READ_ONCE((v)->counter)
  115. #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
  116. #include <asm/cmpxchg.h>
  117. #define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
  118. #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
  119. #endif /* __ASM_OPENRISC_ATOMIC_H */