bitops.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. #ifndef _M68K_BITOPS_H
  2. #define _M68K_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef _LINUX_BITOPS_H
  11. #error only <linux/bitops.h> can be included directly
  12. #endif
  13. #include <linux/compiler.h>
  14. #include <asm/barrier.h>
  15. /*
  16. * Bit access functions vary across the ColdFire and 68k families.
  17. * So we will break them out here, and then macro in the ones we want.
  18. *
  19. * ColdFire - supports standard bset/bclr/bchg with register operand only
  20. * 68000 - supports standard bset/bclr/bchg with memory operand
  21. * >= 68020 - also supports the bfset/bfclr/bfchg instructions
  22. *
  23. * Although it is possible to use only the bset/bclr/bchg with register
  24. * operands on all platforms you end up with larger generated code.
  25. * So we use the best form possible on a given platform.
  26. */
  27. static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
  28. {
  29. char *p = (char *)vaddr + (nr ^ 31) / 8;
  30. __asm__ __volatile__ ("bset %1,(%0)"
  31. :
  32. : "a" (p), "di" (nr & 7)
  33. : "memory");
  34. }
  35. static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  36. {
  37. char *p = (char *)vaddr + (nr ^ 31) / 8;
  38. __asm__ __volatile__ ("bset %1,%0"
  39. : "+m" (*p)
  40. : "di" (nr & 7));
  41. }
  42. static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  43. {
  44. __asm__ __volatile__ ("bfset %1{%0:#1}"
  45. :
  46. : "d" (nr ^ 31), "o" (*vaddr)
  47. : "memory");
  48. }
  49. #if defined(CONFIG_COLDFIRE)
  50. #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
  51. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  52. #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
  53. #else
  54. #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  55. bset_mem_set_bit(nr, vaddr) : \
  56. bfset_mem_set_bit(nr, vaddr))
  57. #endif
  58. static __always_inline void
  59. arch___set_bit(unsigned long nr, volatile unsigned long *addr)
  60. {
  61. set_bit(nr, addr);
  62. }
  63. static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
  64. {
  65. char *p = (char *)vaddr + (nr ^ 31) / 8;
  66. __asm__ __volatile__ ("bclr %1,(%0)"
  67. :
  68. : "a" (p), "di" (nr & 7)
  69. : "memory");
  70. }
  71. static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  72. {
  73. char *p = (char *)vaddr + (nr ^ 31) / 8;
  74. __asm__ __volatile__ ("bclr %1,%0"
  75. : "+m" (*p)
  76. : "di" (nr & 7));
  77. }
  78. static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  79. {
  80. __asm__ __volatile__ ("bfclr %1{%0:#1}"
  81. :
  82. : "d" (nr ^ 31), "o" (*vaddr)
  83. : "memory");
  84. }
  85. #if defined(CONFIG_COLDFIRE)
  86. #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
  87. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  88. #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
  89. #else
  90. #define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  91. bclr_mem_clear_bit(nr, vaddr) : \
  92. bfclr_mem_clear_bit(nr, vaddr))
  93. #endif
  94. static __always_inline void
  95. arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
  96. {
  97. clear_bit(nr, addr);
  98. }
  99. static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
  100. {
  101. char *p = (char *)vaddr + (nr ^ 31) / 8;
  102. __asm__ __volatile__ ("bchg %1,(%0)"
  103. :
  104. : "a" (p), "di" (nr & 7)
  105. : "memory");
  106. }
  107. static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  108. {
  109. char *p = (char *)vaddr + (nr ^ 31) / 8;
  110. __asm__ __volatile__ ("bchg %1,%0"
  111. : "+m" (*p)
  112. : "di" (nr & 7));
  113. }
  114. static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  115. {
  116. __asm__ __volatile__ ("bfchg %1{%0:#1}"
  117. :
  118. : "d" (nr ^ 31), "o" (*vaddr)
  119. : "memory");
  120. }
  121. #if defined(CONFIG_COLDFIRE)
  122. #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
  123. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  124. #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
  125. #else
  126. #define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  127. bchg_mem_change_bit(nr, vaddr) : \
  128. bfchg_mem_change_bit(nr, vaddr))
  129. #endif
  130. static __always_inline void
  131. arch___change_bit(unsigned long nr, volatile unsigned long *addr)
  132. {
  133. change_bit(nr, addr);
  134. }
  135. #define arch_test_bit generic_test_bit
  136. #define arch_test_bit_acquire generic_test_bit_acquire
  137. static inline int bset_reg_test_and_set_bit(int nr,
  138. volatile unsigned long *vaddr)
  139. {
  140. char *p = (char *)vaddr + (nr ^ 31) / 8;
  141. char retval;
  142. __asm__ __volatile__ ("bset %2,(%1); sne %0"
  143. : "=d" (retval)
  144. : "a" (p), "di" (nr & 7)
  145. : "memory");
  146. return retval;
  147. }
  148. static inline int bset_mem_test_and_set_bit(int nr,
  149. volatile unsigned long *vaddr)
  150. {
  151. char *p = (char *)vaddr + (nr ^ 31) / 8;
  152. char retval;
  153. __asm__ __volatile__ ("bset %2,%1; sne %0"
  154. : "=d" (retval), "+m" (*p)
  155. : "di" (nr & 7));
  156. return retval;
  157. }
  158. static inline int bfset_mem_test_and_set_bit(int nr,
  159. volatile unsigned long *vaddr)
  160. {
  161. char retval;
  162. __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
  163. : "=d" (retval)
  164. : "d" (nr ^ 31), "o" (*vaddr)
  165. : "memory");
  166. return retval;
  167. }
  168. #if defined(CONFIG_COLDFIRE)
  169. #define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
  170. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  171. #define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
  172. #else
  173. #define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  174. bset_mem_test_and_set_bit(nr, vaddr) : \
  175. bfset_mem_test_and_set_bit(nr, vaddr))
  176. #endif
  177. static __always_inline bool
  178. arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
  179. {
  180. return test_and_set_bit(nr, addr);
  181. }
  182. static inline int bclr_reg_test_and_clear_bit(int nr,
  183. volatile unsigned long *vaddr)
  184. {
  185. char *p = (char *)vaddr + (nr ^ 31) / 8;
  186. char retval;
  187. __asm__ __volatile__ ("bclr %2,(%1); sne %0"
  188. : "=d" (retval)
  189. : "a" (p), "di" (nr & 7)
  190. : "memory");
  191. return retval;
  192. }
  193. static inline int bclr_mem_test_and_clear_bit(int nr,
  194. volatile unsigned long *vaddr)
  195. {
  196. char *p = (char *)vaddr + (nr ^ 31) / 8;
  197. char retval;
  198. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  199. : "=d" (retval), "+m" (*p)
  200. : "di" (nr & 7));
  201. return retval;
  202. }
  203. static inline int bfclr_mem_test_and_clear_bit(int nr,
  204. volatile unsigned long *vaddr)
  205. {
  206. char retval;
  207. __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
  208. : "=d" (retval)
  209. : "d" (nr ^ 31), "o" (*vaddr)
  210. : "memory");
  211. return retval;
  212. }
  213. #if defined(CONFIG_COLDFIRE)
  214. #define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
  215. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  216. #define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
  217. #else
  218. #define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  219. bclr_mem_test_and_clear_bit(nr, vaddr) : \
  220. bfclr_mem_test_and_clear_bit(nr, vaddr))
  221. #endif
  222. static __always_inline bool
  223. arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
  224. {
  225. return test_and_clear_bit(nr, addr);
  226. }
  227. static inline int bchg_reg_test_and_change_bit(int nr,
  228. volatile unsigned long *vaddr)
  229. {
  230. char *p = (char *)vaddr + (nr ^ 31) / 8;
  231. char retval;
  232. __asm__ __volatile__ ("bchg %2,(%1); sne %0"
  233. : "=d" (retval)
  234. : "a" (p), "di" (nr & 7)
  235. : "memory");
  236. return retval;
  237. }
  238. static inline int bchg_mem_test_and_change_bit(int nr,
  239. volatile unsigned long *vaddr)
  240. {
  241. char *p = (char *)vaddr + (nr ^ 31) / 8;
  242. char retval;
  243. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  244. : "=d" (retval), "+m" (*p)
  245. : "di" (nr & 7));
  246. return retval;
  247. }
  248. static inline int bfchg_mem_test_and_change_bit(int nr,
  249. volatile unsigned long *vaddr)
  250. {
  251. char retval;
  252. __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
  253. : "=d" (retval)
  254. : "d" (nr ^ 31), "o" (*vaddr)
  255. : "memory");
  256. return retval;
  257. }
  258. #if defined(CONFIG_COLDFIRE)
  259. #define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
  260. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  261. #define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
  262. #else
  263. #define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  264. bchg_mem_test_and_change_bit(nr, vaddr) : \
  265. bfchg_mem_test_and_change_bit(nr, vaddr))
  266. #endif
  267. static __always_inline bool
  268. arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
  269. {
  270. return test_and_change_bit(nr, addr);
  271. }
  272. /*
  273. * The true 68020 and more advanced processors support the "bfffo"
  274. * instruction for finding bits. ColdFire and simple 68000 parts
  275. * (including CPU32) do not support this. They simply use the generic
  276. * functions.
  277. */
  278. #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  279. #include <asm-generic/bitops/ffz.h>
  280. #else
  281. static inline int find_first_zero_bit(const unsigned long *vaddr,
  282. unsigned size)
  283. {
  284. const unsigned long *p = vaddr;
  285. int res = 32;
  286. unsigned int words;
  287. unsigned long num;
  288. if (!size)
  289. return 0;
  290. words = (size + 31) >> 5;
  291. while (!(num = ~*p++)) {
  292. if (!--words)
  293. goto out;
  294. }
  295. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  296. : "=d" (res) : "d" (num & -num));
  297. res ^= 31;
  298. out:
  299. res += ((long)p - (long)vaddr - 4) * 8;
  300. return res < size ? res : size;
  301. }
  302. #define find_first_zero_bit find_first_zero_bit
  303. static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
  304. int offset)
  305. {
  306. const unsigned long *p = vaddr + (offset >> 5);
  307. int bit = offset & 31UL, res;
  308. if (offset >= size)
  309. return size;
  310. if (bit) {
  311. unsigned long num = ~*p++ & (~0UL << bit);
  312. offset -= bit;
  313. /* Look for zero in first longword */
  314. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  315. : "=d" (res) : "d" (num & -num));
  316. if (res < 32) {
  317. offset += res ^ 31;
  318. return offset < size ? offset : size;
  319. }
  320. offset += 32;
  321. if (offset >= size)
  322. return size;
  323. }
  324. /* No zero yet, search remaining full bytes for a zero */
  325. return offset + find_first_zero_bit(p, size - offset);
  326. }
  327. #define find_next_zero_bit find_next_zero_bit
  328. static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
  329. {
  330. const unsigned long *p = vaddr;
  331. int res = 32;
  332. unsigned int words;
  333. unsigned long num;
  334. if (!size)
  335. return 0;
  336. words = (size + 31) >> 5;
  337. while (!(num = *p++)) {
  338. if (!--words)
  339. goto out;
  340. }
  341. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  342. : "=d" (res) : "d" (num & -num));
  343. res ^= 31;
  344. out:
  345. res += ((long)p - (long)vaddr - 4) * 8;
  346. return res < size ? res : size;
  347. }
  348. #define find_first_bit find_first_bit
  349. static inline int find_next_bit(const unsigned long *vaddr, int size,
  350. int offset)
  351. {
  352. const unsigned long *p = vaddr + (offset >> 5);
  353. int bit = offset & 31UL, res;
  354. if (offset >= size)
  355. return size;
  356. if (bit) {
  357. unsigned long num = *p++ & (~0UL << bit);
  358. offset -= bit;
  359. /* Look for one in first longword */
  360. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  361. : "=d" (res) : "d" (num & -num));
  362. if (res < 32) {
  363. offset += res ^ 31;
  364. return offset < size ? offset : size;
  365. }
  366. offset += 32;
  367. if (offset >= size)
  368. return size;
  369. }
  370. /* No one yet, search remaining full bytes for a one */
  371. return offset + find_first_bit(p, size - offset);
  372. }
  373. #define find_next_bit find_next_bit
  374. /*
  375. * ffz = Find First Zero in word. Undefined if no zero exists,
  376. * so code should check against ~0UL first..
  377. */
  378. static inline unsigned long ffz(unsigned long word)
  379. {
  380. int res;
  381. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  382. : "=d" (res) : "d" (~word & -~word));
  383. return res ^ 31;
  384. }
  385. #endif
  386. #ifdef __KERNEL__
  387. #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  388. /*
  389. * The newer ColdFire family members support a "bitrev" instruction
  390. * and we can use that to implement a fast ffs. Older Coldfire parts,
  391. * and normal 68000 parts don't have anything special, so we use the
  392. * generic functions for those.
  393. */
  394. #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
  395. !defined(CONFIG_M68000)
  396. static inline unsigned long __ffs(unsigned long x)
  397. {
  398. __asm__ __volatile__ ("bitrev %0; ff1 %0"
  399. : "=d" (x)
  400. : "0" (x));
  401. return x;
  402. }
  403. static inline int ffs(int x)
  404. {
  405. if (!x)
  406. return 0;
  407. return __ffs(x) + 1;
  408. }
  409. #else
  410. #include <asm-generic/bitops/ffs.h>
  411. #include <asm-generic/bitops/__ffs.h>
  412. #endif
  413. #include <asm-generic/bitops/fls.h>
  414. #include <asm-generic/bitops/__fls.h>
  415. #else
  416. /*
  417. * ffs: find first bit set. This is defined the same way as
  418. * the libc and compiler builtin ffs routines, therefore
  419. * differs in spirit from the above ffz (man ffs).
  420. */
  421. static inline int ffs(int x)
  422. {
  423. int cnt;
  424. __asm__ ("bfffo %1{#0:#0},%0"
  425. : "=d" (cnt)
  426. : "dm" (x & -x));
  427. return 32 - cnt;
  428. }
  429. static inline unsigned long __ffs(unsigned long x)
  430. {
  431. return ffs(x) - 1;
  432. }
  433. /*
  434. * fls: find last bit set.
  435. */
  436. static inline int fls(unsigned int x)
  437. {
  438. int cnt;
  439. __asm__ ("bfffo %1{#0,#0},%0"
  440. : "=d" (cnt)
  441. : "dm" (x));
  442. return 32 - cnt;
  443. }
  444. static inline unsigned long __fls(unsigned long x)
  445. {
  446. return fls(x) - 1;
  447. }
  448. #endif
  449. /* Simple test-and-set bit locks */
  450. #define test_and_set_bit_lock test_and_set_bit
  451. #define clear_bit_unlock clear_bit
  452. #define __clear_bit_unlock clear_bit_unlock
  453. #include <asm-generic/bitops/non-instrumented-non-atomic.h>
  454. #include <asm-generic/bitops/ext2-atomic.h>
  455. #include <asm-generic/bitops/fls64.h>
  456. #include <asm-generic/bitops/sched.h>
  457. #include <asm-generic/bitops/hweight.h>
  458. #include <asm-generic/bitops/le.h>
  459. #endif /* __KERNEL__ */
  460. #endif /* _M68K_BITOPS_H */