bitops.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. */
  5. #ifndef _ASM_RISCV_BITOPS_H
  6. #define _ASM_RISCV_BITOPS_H
  7. #ifndef _LINUX_BITOPS_H
  8. #error "Only <linux/bitops.h> can be included directly"
  9. #endif /* _LINUX_BITOPS_H */
  10. #include <linux/compiler.h>
  11. #include <linux/irqflags.h>
  12. #include <asm/barrier.h>
  13. #include <asm/bitsperlong.h>
  14. #include <asm-generic/bitops/__ffs.h>
  15. #include <asm-generic/bitops/ffz.h>
  16. #include <asm-generic/bitops/fls.h>
  17. #include <asm-generic/bitops/__fls.h>
  18. #include <asm-generic/bitops/fls64.h>
  19. #include <asm-generic/bitops/sched.h>
  20. #include <asm-generic/bitops/ffs.h>
  21. #include <asm-generic/bitops/hweight.h>
  22. #if (BITS_PER_LONG == 64)
  23. #define __AMO(op) "amo" #op ".d"
  24. #elif (BITS_PER_LONG == 32)
  25. #define __AMO(op) "amo" #op ".w"
  26. #else
  27. #error "Unexpected BITS_PER_LONG"
  28. #endif
  29. #define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
  30. ({ \
  31. unsigned long __res, __mask; \
  32. __mask = BIT_MASK(nr); \
  33. __asm__ __volatile__ ( \
  34. __AMO(op) #ord " %0, %2, %1" \
  35. : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
  36. : "r" (mod(__mask)) \
  37. : "memory"); \
  38. ((__res & __mask) != 0); \
  39. })
  40. #define __op_bit_ord(op, mod, nr, addr, ord) \
  41. __asm__ __volatile__ ( \
  42. __AMO(op) #ord " zero, %1, %0" \
  43. : "+A" (addr[BIT_WORD(nr)]) \
  44. : "r" (mod(BIT_MASK(nr))) \
  45. : "memory");
  46. #define __test_and_op_bit(op, mod, nr, addr) \
  47. __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
  48. #define __op_bit(op, mod, nr, addr) \
  49. __op_bit_ord(op, mod, nr, addr, )
  50. /* Bitmask modifiers */
  51. #define __NOP(x) (x)
  52. #define __NOT(x) (~(x))
  53. /**
  54. * test_and_set_bit - Set a bit and return its old value
  55. * @nr: Bit to set
  56. * @addr: Address to count from
  57. *
  58. * This operation may be reordered on other architectures than x86.
  59. */
  60. static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
  61. {
  62. return __test_and_op_bit(or, __NOP, nr, addr);
  63. }
  64. /**
  65. * test_and_clear_bit - Clear a bit and return its old value
  66. * @nr: Bit to clear
  67. * @addr: Address to count from
  68. *
  69. * This operation can be reordered on other architectures other than x86.
  70. */
  71. static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
  72. {
  73. return __test_and_op_bit(and, __NOT, nr, addr);
  74. }
  75. /**
  76. * test_and_change_bit - Change a bit and return its old value
  77. * @nr: Bit to change
  78. * @addr: Address to count from
  79. *
  80. * This operation is atomic and cannot be reordered.
  81. * It also implies a memory barrier.
  82. */
  83. static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
  84. {
  85. return __test_and_op_bit(xor, __NOP, nr, addr);
  86. }
  87. /**
  88. * set_bit - Atomically set a bit in memory
  89. * @nr: the bit to set
  90. * @addr: the address to start counting from
  91. *
  92. * Note: there are no guarantees that this function will not be reordered
  93. * on non x86 architectures, so if you are writing portable code,
  94. * make sure not to rely on its reordering guarantees.
  95. *
  96. * Note that @nr may be almost arbitrarily large; this function is not
  97. * restricted to acting on a single-word quantity.
  98. */
  99. static inline void set_bit(int nr, volatile unsigned long *addr)
  100. {
  101. __op_bit(or, __NOP, nr, addr);
  102. }
  103. /**
  104. * clear_bit - Clears a bit in memory
  105. * @nr: Bit to clear
  106. * @addr: Address to start counting from
  107. *
  108. * Note: there are no guarantees that this function will not be reordered
  109. * on non x86 architectures, so if you are writing portable code,
  110. * make sure not to rely on its reordering guarantees.
  111. */
  112. static inline void clear_bit(int nr, volatile unsigned long *addr)
  113. {
  114. __op_bit(and, __NOT, nr, addr);
  115. }
  116. /**
  117. * change_bit - Toggle a bit in memory
  118. * @nr: Bit to change
  119. * @addr: Address to start counting from
  120. *
  121. * change_bit() may be reordered on other architectures than x86.
  122. * Note that @nr may be almost arbitrarily large; this function is not
  123. * restricted to acting on a single-word quantity.
  124. */
  125. static inline void change_bit(int nr, volatile unsigned long *addr)
  126. {
  127. __op_bit(xor, __NOP, nr, addr);
  128. }
  129. /**
  130. * test_and_set_bit_lock - Set a bit and return its old value, for lock
  131. * @nr: Bit to set
  132. * @addr: Address to count from
  133. *
  134. * This operation is atomic and provides acquire barrier semantics.
  135. * It can be used to implement bit locks.
  136. */
  137. static inline int test_and_set_bit_lock(
  138. unsigned long nr, volatile unsigned long *addr)
  139. {
  140. return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
  141. }
  142. /**
  143. * clear_bit_unlock - Clear a bit in memory, for unlock
  144. * @nr: the bit to set
  145. * @addr: the address to start counting from
  146. *
  147. * This operation is atomic and provides release barrier semantics.
  148. */
  149. static inline void clear_bit_unlock(
  150. unsigned long nr, volatile unsigned long *addr)
  151. {
  152. __op_bit_ord(and, __NOT, nr, addr, .rl);
  153. }
  154. /**
  155. * __clear_bit_unlock - Clear a bit in memory, for unlock
  156. * @nr: the bit to set
  157. * @addr: the address to start counting from
  158. *
  159. * This operation is like clear_bit_unlock, however it is not atomic.
  160. * It does provide release barrier semantics so it can be used to unlock
  161. * a bit lock, however it would only be used if no other CPU can modify
  162. * any bits in the memory until the lock is released (a good example is
  163. * if the bit lock itself protects access to the other bits in the word).
  164. *
  165. * On RISC-V systems there seems to be no benefit to taking advantage of the
  166. * non-atomic property here: it's a lot more instructions and we still have to
  167. * provide release semantics anyway.
  168. */
  169. static inline void __clear_bit_unlock(
  170. unsigned long nr, volatile unsigned long *addr)
  171. {
  172. clear_bit_unlock(nr, addr);
  173. }
  174. #undef __test_and_op_bit
  175. #undef __op_bit
  176. #undef __NOP
  177. #undef __NOT
  178. #undef __AMO
  179. #include <asm-generic/bitops/non-atomic.h>
  180. #include <asm-generic/bitops/le.h>
  181. #include <asm-generic/bitops/ext2-atomic.h>
  182. #endif /* _ASM_RISCV_BITOPS_H */