bitops.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. #ifndef _M68K_BITOPS_H
  2. #define _M68K_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #ifndef _LINUX_BITOPS_H
  11. #error only <linux/bitops.h> can be included directly
  12. #endif
  13. #include <linux/compiler.h>
  14. #include <asm/barrier.h>
  15. /*
  16. * Bit access functions vary across the ColdFire and 68k families.
  17. * So we will break them out here, and then macro in the ones we want.
  18. *
  19. * ColdFire - supports standard bset/bclr/bchg with register operand only
  20. * 68000 - supports standard bset/bclr/bchg with memory operand
  21. * >= 68020 - also supports the bfset/bfclr/bfchg instructions
  22. *
  23. * Although it is possible to use only the bset/bclr/bchg with register
  24. * operands on all platforms you end up with larger generated code.
  25. * So we use the best form possible on a given platform.
  26. */
  27. static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
  28. {
  29. char *p = (char *)vaddr + (nr ^ 31) / 8;
  30. __asm__ __volatile__ ("bset %1,(%0)"
  31. :
  32. : "a" (p), "di" (nr & 7)
  33. : "memory");
  34. }
  35. static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  36. {
  37. char *p = (char *)vaddr + (nr ^ 31) / 8;
  38. __asm__ __volatile__ ("bset %1,%0"
  39. : "+m" (*p)
  40. : "di" (nr & 7));
  41. }
  42. static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
  43. {
  44. __asm__ __volatile__ ("bfset %1{%0:#1}"
  45. :
  46. : "d" (nr ^ 31), "o" (*vaddr)
  47. : "memory");
  48. }
  49. #if defined(CONFIG_COLDFIRE)
  50. #define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
  51. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  52. #define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
  53. #else
  54. #define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  55. bset_mem_set_bit(nr, vaddr) : \
  56. bfset_mem_set_bit(nr, vaddr))
  57. #endif
  58. #define __set_bit(nr, vaddr) set_bit(nr, vaddr)
  59. static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
  60. {
  61. char *p = (char *)vaddr + (nr ^ 31) / 8;
  62. __asm__ __volatile__ ("bclr %1,(%0)"
  63. :
  64. : "a" (p), "di" (nr & 7)
  65. : "memory");
  66. }
  67. static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  68. {
  69. char *p = (char *)vaddr + (nr ^ 31) / 8;
  70. __asm__ __volatile__ ("bclr %1,%0"
  71. : "+m" (*p)
  72. : "di" (nr & 7));
  73. }
  74. static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
  75. {
  76. __asm__ __volatile__ ("bfclr %1{%0:#1}"
  77. :
  78. : "d" (nr ^ 31), "o" (*vaddr)
  79. : "memory");
  80. }
  81. #if defined(CONFIG_COLDFIRE)
  82. #define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
  83. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  84. #define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
  85. #else
  86. #define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  87. bclr_mem_clear_bit(nr, vaddr) : \
  88. bfclr_mem_clear_bit(nr, vaddr))
  89. #endif
  90. #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
  91. static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
  92. {
  93. char *p = (char *)vaddr + (nr ^ 31) / 8;
  94. __asm__ __volatile__ ("bchg %1,(%0)"
  95. :
  96. : "a" (p), "di" (nr & 7)
  97. : "memory");
  98. }
  99. static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  100. {
  101. char *p = (char *)vaddr + (nr ^ 31) / 8;
  102. __asm__ __volatile__ ("bchg %1,%0"
  103. : "+m" (*p)
  104. : "di" (nr & 7));
  105. }
  106. static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
  107. {
  108. __asm__ __volatile__ ("bfchg %1{%0:#1}"
  109. :
  110. : "d" (nr ^ 31), "o" (*vaddr)
  111. : "memory");
  112. }
  113. #if defined(CONFIG_COLDFIRE)
  114. #define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
  115. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  116. #define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
  117. #else
  118. #define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  119. bchg_mem_change_bit(nr, vaddr) : \
  120. bfchg_mem_change_bit(nr, vaddr))
  121. #endif
  122. #define __change_bit(nr, vaddr) change_bit(nr, vaddr)
  123. static inline int test_bit(int nr, const unsigned long *vaddr)
  124. {
  125. return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
  126. }
  127. static inline int bset_reg_test_and_set_bit(int nr,
  128. volatile unsigned long *vaddr)
  129. {
  130. char *p = (char *)vaddr + (nr ^ 31) / 8;
  131. char retval;
  132. __asm__ __volatile__ ("bset %2,(%1); sne %0"
  133. : "=d" (retval)
  134. : "a" (p), "di" (nr & 7)
  135. : "memory");
  136. return retval;
  137. }
  138. static inline int bset_mem_test_and_set_bit(int nr,
  139. volatile unsigned long *vaddr)
  140. {
  141. char *p = (char *)vaddr + (nr ^ 31) / 8;
  142. char retval;
  143. __asm__ __volatile__ ("bset %2,%1; sne %0"
  144. : "=d" (retval), "+m" (*p)
  145. : "di" (nr & 7));
  146. return retval;
  147. }
  148. static inline int bfset_mem_test_and_set_bit(int nr,
  149. volatile unsigned long *vaddr)
  150. {
  151. char retval;
  152. __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
  153. : "=d" (retval)
  154. : "d" (nr ^ 31), "o" (*vaddr)
  155. : "memory");
  156. return retval;
  157. }
  158. #if defined(CONFIG_COLDFIRE)
  159. #define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
  160. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  161. #define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
  162. #else
  163. #define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  164. bset_mem_test_and_set_bit(nr, vaddr) : \
  165. bfset_mem_test_and_set_bit(nr, vaddr))
  166. #endif
  167. #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
  168. static inline int bclr_reg_test_and_clear_bit(int nr,
  169. volatile unsigned long *vaddr)
  170. {
  171. char *p = (char *)vaddr + (nr ^ 31) / 8;
  172. char retval;
  173. __asm__ __volatile__ ("bclr %2,(%1); sne %0"
  174. : "=d" (retval)
  175. : "a" (p), "di" (nr & 7)
  176. : "memory");
  177. return retval;
  178. }
  179. static inline int bclr_mem_test_and_clear_bit(int nr,
  180. volatile unsigned long *vaddr)
  181. {
  182. char *p = (char *)vaddr + (nr ^ 31) / 8;
  183. char retval;
  184. __asm__ __volatile__ ("bclr %2,%1; sne %0"
  185. : "=d" (retval), "+m" (*p)
  186. : "di" (nr & 7));
  187. return retval;
  188. }
  189. static inline int bfclr_mem_test_and_clear_bit(int nr,
  190. volatile unsigned long *vaddr)
  191. {
  192. char retval;
  193. __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
  194. : "=d" (retval)
  195. : "d" (nr ^ 31), "o" (*vaddr)
  196. : "memory");
  197. return retval;
  198. }
  199. #if defined(CONFIG_COLDFIRE)
  200. #define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
  201. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  202. #define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
  203. #else
  204. #define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  205. bclr_mem_test_and_clear_bit(nr, vaddr) : \
  206. bfclr_mem_test_and_clear_bit(nr, vaddr))
  207. #endif
  208. #define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
  209. static inline int bchg_reg_test_and_change_bit(int nr,
  210. volatile unsigned long *vaddr)
  211. {
  212. char *p = (char *)vaddr + (nr ^ 31) / 8;
  213. char retval;
  214. __asm__ __volatile__ ("bchg %2,(%1); sne %0"
  215. : "=d" (retval)
  216. : "a" (p), "di" (nr & 7)
  217. : "memory");
  218. return retval;
  219. }
  220. static inline int bchg_mem_test_and_change_bit(int nr,
  221. volatile unsigned long *vaddr)
  222. {
  223. char *p = (char *)vaddr + (nr ^ 31) / 8;
  224. char retval;
  225. __asm__ __volatile__ ("bchg %2,%1; sne %0"
  226. : "=d" (retval), "+m" (*p)
  227. : "di" (nr & 7));
  228. return retval;
  229. }
  230. static inline int bfchg_mem_test_and_change_bit(int nr,
  231. volatile unsigned long *vaddr)
  232. {
  233. char retval;
  234. __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
  235. : "=d" (retval)
  236. : "d" (nr ^ 31), "o" (*vaddr)
  237. : "memory");
  238. return retval;
  239. }
  240. #if defined(CONFIG_COLDFIRE)
  241. #define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
  242. #elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  243. #define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
  244. #else
  245. #define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
  246. bchg_mem_test_and_change_bit(nr, vaddr) : \
  247. bfchg_mem_test_and_change_bit(nr, vaddr))
  248. #endif
  249. #define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
  250. /*
  251. * The true 68020 and more advanced processors support the "bfffo"
  252. * instruction for finding bits. ColdFire and simple 68000 parts
  253. * (including CPU32) do not support this. They simply use the generic
  254. * functions.
  255. */
  256. #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  257. #include <asm-generic/bitops/find.h>
  258. #include <asm-generic/bitops/ffz.h>
  259. #else
  260. static inline int find_first_zero_bit(const unsigned long *vaddr,
  261. unsigned size)
  262. {
  263. const unsigned long *p = vaddr;
  264. int res = 32;
  265. unsigned int words;
  266. unsigned long num;
  267. if (!size)
  268. return 0;
  269. words = (size + 31) >> 5;
  270. while (!(num = ~*p++)) {
  271. if (!--words)
  272. goto out;
  273. }
  274. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  275. : "=d" (res) : "d" (num & -num));
  276. res ^= 31;
  277. out:
  278. res += ((long)p - (long)vaddr - 4) * 8;
  279. return res < size ? res : size;
  280. }
  281. #define find_first_zero_bit find_first_zero_bit
  282. static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
  283. int offset)
  284. {
  285. const unsigned long *p = vaddr + (offset >> 5);
  286. int bit = offset & 31UL, res;
  287. if (offset >= size)
  288. return size;
  289. if (bit) {
  290. unsigned long num = ~*p++ & (~0UL << bit);
  291. offset -= bit;
  292. /* Look for zero in first longword */
  293. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  294. : "=d" (res) : "d" (num & -num));
  295. if (res < 32) {
  296. offset += res ^ 31;
  297. return offset < size ? offset : size;
  298. }
  299. offset += 32;
  300. if (offset >= size)
  301. return size;
  302. }
  303. /* No zero yet, search remaining full bytes for a zero */
  304. return offset + find_first_zero_bit(p, size - offset);
  305. }
  306. #define find_next_zero_bit find_next_zero_bit
  307. static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
  308. {
  309. const unsigned long *p = vaddr;
  310. int res = 32;
  311. unsigned int words;
  312. unsigned long num;
  313. if (!size)
  314. return 0;
  315. words = (size + 31) >> 5;
  316. while (!(num = *p++)) {
  317. if (!--words)
  318. goto out;
  319. }
  320. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  321. : "=d" (res) : "d" (num & -num));
  322. res ^= 31;
  323. out:
  324. res += ((long)p - (long)vaddr - 4) * 8;
  325. return res < size ? res : size;
  326. }
  327. #define find_first_bit find_first_bit
  328. static inline int find_next_bit(const unsigned long *vaddr, int size,
  329. int offset)
  330. {
  331. const unsigned long *p = vaddr + (offset >> 5);
  332. int bit = offset & 31UL, res;
  333. if (offset >= size)
  334. return size;
  335. if (bit) {
  336. unsigned long num = *p++ & (~0UL << bit);
  337. offset -= bit;
  338. /* Look for one in first longword */
  339. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  340. : "=d" (res) : "d" (num & -num));
  341. if (res < 32) {
  342. offset += res ^ 31;
  343. return offset < size ? offset : size;
  344. }
  345. offset += 32;
  346. if (offset >= size)
  347. return size;
  348. }
  349. /* No one yet, search remaining full bytes for a one */
  350. return offset + find_first_bit(p, size - offset);
  351. }
  352. #define find_next_bit find_next_bit
  353. /*
  354. * ffz = Find First Zero in word. Undefined if no zero exists,
  355. * so code should check against ~0UL first..
  356. */
  357. static inline unsigned long ffz(unsigned long word)
  358. {
  359. int res;
  360. __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
  361. : "=d" (res) : "d" (~word & -~word));
  362. return res ^ 31;
  363. }
  364. #endif
  365. #ifdef __KERNEL__
  366. #if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
  367. /*
  368. * The newer ColdFire family members support a "bitrev" instruction
  369. * and we can use that to implement a fast ffs. Older Coldfire parts,
  370. * and normal 68000 parts don't have anything special, so we use the
  371. * generic functions for those.
  372. */
  373. #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
  374. !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
  375. static inline int __ffs(int x)
  376. {
  377. __asm__ __volatile__ ("bitrev %0; ff1 %0"
  378. : "=d" (x)
  379. : "0" (x));
  380. return x;
  381. }
  382. static inline int ffs(int x)
  383. {
  384. if (!x)
  385. return 0;
  386. return __ffs(x) + 1;
  387. }
  388. #else
  389. #include <asm-generic/bitops/ffs.h>
  390. #include <asm-generic/bitops/__ffs.h>
  391. #endif
  392. #include <asm-generic/bitops/fls.h>
  393. #include <asm-generic/bitops/__fls.h>
  394. #else
  395. /*
  396. * ffs: find first bit set. This is defined the same way as
  397. * the libc and compiler builtin ffs routines, therefore
  398. * differs in spirit from the above ffz (man ffs).
  399. */
  400. static inline int ffs(int x)
  401. {
  402. int cnt;
  403. __asm__ ("bfffo %1{#0:#0},%0"
  404. : "=d" (cnt)
  405. : "dm" (x & -x));
  406. return 32 - cnt;
  407. }
  408. #define __ffs(x) (ffs(x) - 1)
  409. /*
  410. * fls: find last bit set.
  411. */
  412. static inline int fls(int x)
  413. {
  414. int cnt;
  415. __asm__ ("bfffo %1{#0,#0},%0"
  416. : "=d" (cnt)
  417. : "dm" (x));
  418. return 32 - cnt;
  419. }
  420. static inline int __fls(int x)
  421. {
  422. return fls(x) - 1;
  423. }
  424. #endif
  425. #include <asm-generic/bitops/ext2-atomic.h>
  426. #include <asm-generic/bitops/le.h>
  427. #include <asm-generic/bitops/fls64.h>
  428. #include <asm-generic/bitops/sched.h>
  429. #include <asm-generic/bitops/hweight.h>
  430. #include <asm-generic/bitops/lock.h>
  431. #endif /* __KERNEL__ */
  432. #endif /* _M68K_BITOPS_H */