bitops.h 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ALPHA_BITOPS_H
  3. #define _ALPHA_BITOPS_H
  4. #ifndef _LINUX_BITOPS_H
  5. #error only <linux/bitops.h> can be included directly
  6. #endif
  7. #include <asm/compiler.h>
  8. #include <asm/barrier.h>
  9. /*
  10. * Copyright 1994, Linus Torvalds.
  11. */
  12. /*
  13. * These have to be done with inline assembly: that way the bit-setting
  14. * is guaranteed to be atomic. All bit operations return 0 if the bit
  15. * was cleared before the operation and != 0 if it was not.
  16. *
  17. * To get proper branch prediction for the main line, we must branch
  18. * forward to code at the end of this object's .text section, then
  19. * branch back to restart the operation.
  20. *
  21. * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
  22. */
  23. static inline void
  24. set_bit(unsigned long nr, volatile void * addr)
  25. {
  26. unsigned long temp;
  27. int *m = ((int *) addr) + (nr >> 5);
  28. __asm__ __volatile__(
  29. "1: ldl_l %0,%3\n"
  30. " bis %0,%2,%0\n"
  31. " stl_c %0,%1\n"
  32. " beq %0,2f\n"
  33. ".subsection 2\n"
  34. "2: br 1b\n"
  35. ".previous"
  36. :"=&r" (temp), "=m" (*m)
  37. :"Ir" (1UL << (nr & 31)), "m" (*m));
  38. }
  39. /*
  40. * WARNING: non atomic version.
  41. */
  42. static __always_inline void
  43. arch___set_bit(unsigned long nr, volatile unsigned long *addr)
  44. {
  45. int *m = ((int *) addr) + (nr >> 5);
  46. *m |= 1 << (nr & 31);
  47. }
  48. static inline void
  49. clear_bit(unsigned long nr, volatile void * addr)
  50. {
  51. unsigned long temp;
  52. int *m = ((int *) addr) + (nr >> 5);
  53. __asm__ __volatile__(
  54. "1: ldl_l %0,%3\n"
  55. " bic %0,%2,%0\n"
  56. " stl_c %0,%1\n"
  57. " beq %0,2f\n"
  58. ".subsection 2\n"
  59. "2: br 1b\n"
  60. ".previous"
  61. :"=&r" (temp), "=m" (*m)
  62. :"Ir" (1UL << (nr & 31)), "m" (*m));
  63. }
  64. static inline void
  65. clear_bit_unlock(unsigned long nr, volatile void * addr)
  66. {
  67. smp_mb();
  68. clear_bit(nr, addr);
  69. }
  70. /*
  71. * WARNING: non atomic version.
  72. */
  73. static __always_inline void
  74. arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
  75. {
  76. int *m = ((int *) addr) + (nr >> 5);
  77. *m &= ~(1 << (nr & 31));
  78. }
  79. static inline void
  80. __clear_bit_unlock(unsigned long nr, volatile void * addr)
  81. {
  82. smp_mb();
  83. arch___clear_bit(nr, addr);
  84. }
  85. static inline void
  86. change_bit(unsigned long nr, volatile void * addr)
  87. {
  88. unsigned long temp;
  89. int *m = ((int *) addr) + (nr >> 5);
  90. __asm__ __volatile__(
  91. "1: ldl_l %0,%3\n"
  92. " xor %0,%2,%0\n"
  93. " stl_c %0,%1\n"
  94. " beq %0,2f\n"
  95. ".subsection 2\n"
  96. "2: br 1b\n"
  97. ".previous"
  98. :"=&r" (temp), "=m" (*m)
  99. :"Ir" (1UL << (nr & 31)), "m" (*m));
  100. }
  101. /*
  102. * WARNING: non atomic version.
  103. */
  104. static __always_inline void
  105. arch___change_bit(unsigned long nr, volatile unsigned long *addr)
  106. {
  107. int *m = ((int *) addr) + (nr >> 5);
  108. *m ^= 1 << (nr & 31);
  109. }
  110. static inline int
  111. test_and_set_bit(unsigned long nr, volatile void *addr)
  112. {
  113. unsigned long oldbit;
  114. unsigned long temp;
  115. int *m = ((int *) addr) + (nr >> 5);
  116. __asm__ __volatile__(
  117. #ifdef CONFIG_SMP
  118. " mb\n"
  119. #endif
  120. "1: ldl_l %0,%4\n"
  121. " and %0,%3,%2\n"
  122. " bne %2,2f\n"
  123. " xor %0,%3,%0\n"
  124. " stl_c %0,%1\n"
  125. " beq %0,3f\n"
  126. "2:\n"
  127. #ifdef CONFIG_SMP
  128. " mb\n"
  129. #endif
  130. ".subsection 2\n"
  131. "3: br 1b\n"
  132. ".previous"
  133. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  134. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  135. return oldbit != 0;
  136. }
  137. static inline int
  138. test_and_set_bit_lock(unsigned long nr, volatile void *addr)
  139. {
  140. unsigned long oldbit;
  141. unsigned long temp;
  142. int *m = ((int *) addr) + (nr >> 5);
  143. __asm__ __volatile__(
  144. "1: ldl_l %0,%4\n"
  145. " and %0,%3,%2\n"
  146. " bne %2,2f\n"
  147. " xor %0,%3,%0\n"
  148. " stl_c %0,%1\n"
  149. " beq %0,3f\n"
  150. "2:\n"
  151. #ifdef CONFIG_SMP
  152. " mb\n"
  153. #endif
  154. ".subsection 2\n"
  155. "3: br 1b\n"
  156. ".previous"
  157. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  158. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  159. return oldbit != 0;
  160. }
  161. /*
  162. * WARNING: non atomic version.
  163. */
  164. static __always_inline bool
  165. arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
  166. {
  167. unsigned long mask = 1 << (nr & 0x1f);
  168. int *m = ((int *) addr) + (nr >> 5);
  169. int old = *m;
  170. *m = old | mask;
  171. return (old & mask) != 0;
  172. }
  173. static inline int
  174. test_and_clear_bit(unsigned long nr, volatile void * addr)
  175. {
  176. unsigned long oldbit;
  177. unsigned long temp;
  178. int *m = ((int *) addr) + (nr >> 5);
  179. __asm__ __volatile__(
  180. #ifdef CONFIG_SMP
  181. " mb\n"
  182. #endif
  183. "1: ldl_l %0,%4\n"
  184. " and %0,%3,%2\n"
  185. " beq %2,2f\n"
  186. " xor %0,%3,%0\n"
  187. " stl_c %0,%1\n"
  188. " beq %0,3f\n"
  189. "2:\n"
  190. #ifdef CONFIG_SMP
  191. " mb\n"
  192. #endif
  193. ".subsection 2\n"
  194. "3: br 1b\n"
  195. ".previous"
  196. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  197. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  198. return oldbit != 0;
  199. }
  200. /*
  201. * WARNING: non atomic version.
  202. */
  203. static __always_inline bool
  204. arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
  205. {
  206. unsigned long mask = 1 << (nr & 0x1f);
  207. int *m = ((int *) addr) + (nr >> 5);
  208. int old = *m;
  209. *m = old & ~mask;
  210. return (old & mask) != 0;
  211. }
  212. static inline int
  213. test_and_change_bit(unsigned long nr, volatile void * addr)
  214. {
  215. unsigned long oldbit;
  216. unsigned long temp;
  217. int *m = ((int *) addr) + (nr >> 5);
  218. __asm__ __volatile__(
  219. #ifdef CONFIG_SMP
  220. " mb\n"
  221. #endif
  222. "1: ldl_l %0,%4\n"
  223. " and %0,%3,%2\n"
  224. " xor %0,%3,%0\n"
  225. " stl_c %0,%1\n"
  226. " beq %0,3f\n"
  227. #ifdef CONFIG_SMP
  228. " mb\n"
  229. #endif
  230. ".subsection 2\n"
  231. "3: br 1b\n"
  232. ".previous"
  233. :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
  234. :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
  235. return oldbit != 0;
  236. }
  237. /*
  238. * WARNING: non atomic version.
  239. */
  240. static __always_inline bool
  241. arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
  242. {
  243. unsigned long mask = 1 << (nr & 0x1f);
  244. int *m = ((int *) addr) + (nr >> 5);
  245. int old = *m;
  246. *m = old ^ mask;
  247. return (old & mask) != 0;
  248. }
  249. #define arch_test_bit generic_test_bit
  250. #define arch_test_bit_acquire generic_test_bit_acquire
  251. /*
  252. * ffz = Find First Zero in word. Undefined if no zero exists,
  253. * so code should check against ~0UL first..
  254. *
  255. * Do a binary search on the bits. Due to the nature of large
  256. * constants on the alpha, it is worthwhile to split the search.
  257. */
  258. static inline unsigned long ffz_b(unsigned long x)
  259. {
  260. unsigned long sum, x1, x2, x4;
  261. x = ~x & -~x; /* set first 0 bit, clear others */
  262. x1 = x & 0xAA;
  263. x2 = x & 0xCC;
  264. x4 = x & 0xF0;
  265. sum = x2 ? 2 : 0;
  266. sum += (x4 != 0) * 4;
  267. sum += (x1 != 0);
  268. return sum;
  269. }
  270. static inline unsigned long ffz(unsigned long word)
  271. {
  272. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  273. /* Whee. EV67 can calculate it directly. */
  274. return __kernel_cttz(~word);
  275. #else
  276. unsigned long bits, qofs, bofs;
  277. bits = __kernel_cmpbge(word, ~0UL);
  278. qofs = ffz_b(bits);
  279. bits = __kernel_extbl(word, qofs);
  280. bofs = ffz_b(bits);
  281. return qofs*8 + bofs;
  282. #endif
  283. }
  284. /*
  285. * __ffs = Find First set bit in word. Undefined if no set bit exists.
  286. */
  287. static inline unsigned long __ffs(unsigned long word)
  288. {
  289. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  290. /* Whee. EV67 can calculate it directly. */
  291. return __kernel_cttz(word);
  292. #else
  293. unsigned long bits, qofs, bofs;
  294. bits = __kernel_cmpbge(0, word);
  295. qofs = ffz_b(bits);
  296. bits = __kernel_extbl(word, qofs);
  297. bofs = ffz_b(~bits);
  298. return qofs*8 + bofs;
  299. #endif
  300. }
  301. #ifdef __KERNEL__
  302. /*
  303. * ffs: find first bit set. This is defined the same way as
  304. * the libc and compiler builtin ffs routines, therefore
  305. * differs in spirit from the above __ffs.
  306. */
  307. static inline int ffs(int word)
  308. {
  309. int result = __ffs(word) + 1;
  310. return word ? result : 0;
  311. }
  312. /*
  313. * fls: find last bit set.
  314. */
  315. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  316. static inline int fls64(unsigned long word)
  317. {
  318. return 64 - __kernel_ctlz(word);
  319. }
  320. #else
  321. extern const unsigned char __flsm1_tab[256];
  322. static inline int fls64(unsigned long x)
  323. {
  324. unsigned long t, a, r;
  325. t = __kernel_cmpbge (x, 0x0101010101010101UL);
  326. a = __flsm1_tab[t];
  327. t = __kernel_extbl (x, a);
  328. r = a*8 + __flsm1_tab[t] + (x != 0);
  329. return r;
  330. }
  331. #endif
  332. static inline unsigned long __fls(unsigned long x)
  333. {
  334. return fls64(x) - 1;
  335. }
  336. static inline int fls(unsigned int x)
  337. {
  338. return fls64(x);
  339. }
  340. /*
  341. * hweightN: returns the hamming weight (i.e. the number
  342. * of bits set) of a N-bit word
  343. */
  344. #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
  345. /* Whee. EV67 can calculate it directly. */
  346. static inline unsigned long __arch_hweight64(unsigned long w)
  347. {
  348. return __kernel_ctpop(w);
  349. }
  350. static inline unsigned int __arch_hweight32(unsigned int w)
  351. {
  352. return __arch_hweight64(w);
  353. }
  354. static inline unsigned int __arch_hweight16(unsigned int w)
  355. {
  356. return __arch_hweight64(w & 0xffff);
  357. }
  358. static inline unsigned int __arch_hweight8(unsigned int w)
  359. {
  360. return __arch_hweight64(w & 0xff);
  361. }
  362. #else
  363. #include <asm-generic/bitops/arch_hweight.h>
  364. #endif
  365. #include <asm-generic/bitops/const_hweight.h>
  366. #endif /* __KERNEL__ */
  367. #ifdef __KERNEL__
  368. /*
  369. * Every architecture must define this function. It's the fastest
  370. * way of searching a 100-bit bitmap. It's guaranteed that at least
  371. * one of the 100 bits is cleared.
  372. */
  373. static inline unsigned long
  374. sched_find_first_bit(const unsigned long b[2])
  375. {
  376. unsigned long b0, b1, ofs, tmp;
  377. b0 = b[0];
  378. b1 = b[1];
  379. ofs = (b0 ? 0 : 64);
  380. tmp = (b0 ? b0 : b1);
  381. return __ffs(tmp) + ofs;
  382. }
  383. #include <asm-generic/bitops/non-instrumented-non-atomic.h>
  384. #include <asm-generic/bitops/le.h>
  385. #include <asm-generic/bitops/ext2-atomic-setbit.h>
  386. #endif /* __KERNEL__ */
  387. #endif /* _ALPHA_BITOPS_H */