bitops.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904
  1. /*
  2. * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
  3. * Copyright (c) 2000 Silicon Graphics, Inc.
  4. *
  5. * SPDX-License-Identifier: GPL-2.0
  6. */
  7. #ifndef _ASM_BITOPS_H
  8. #define _ASM_BITOPS_H
  9. #include <linux/types.h>
  10. #include <asm/byteorder.h> /* sigh ... */
  11. #ifdef __KERNEL__
  12. #include <asm/sgidefs.h>
  13. #include <asm/system.h>
  14. #include <asm-generic/bitops/fls.h>
  15. #include <asm-generic/bitops/__fls.h>
  16. #include <asm-generic/bitops/fls64.h>
  17. #include <asm-generic/bitops/__ffs.h>
  18. /*
  19. * clear_bit() doesn't provide any barrier for the compiler.
  20. */
  21. #define smp_mb__before_clear_bit() barrier()
  22. #define smp_mb__after_clear_bit() barrier()
  23. /*
  24. * Only disable interrupt for kernel mode stuff to keep usermode stuff
  25. * that dares to use kernel include files alive.
  26. */
  27. #define __bi_flags unsigned long flags
  28. #define __bi_cli() __cli()
  29. #define __bi_save_flags(x) __save_flags(x)
  30. #define __bi_save_and_cli(x) __save_and_cli(x)
  31. #define __bi_restore_flags(x) __restore_flags(x)
  32. #else
  33. #define __bi_flags
  34. #define __bi_cli()
  35. #define __bi_save_flags(x)
  36. #define __bi_save_and_cli(x)
  37. #define __bi_restore_flags(x)
  38. #endif /* __KERNEL__ */
  39. #ifdef CONFIG_CPU_HAS_LLSC
  40. #include <asm/mipsregs.h>
  41. /*
  42. * These functions for MIPS ISA > 1 are interrupt and SMP proof and
  43. * interrupt friendly
  44. */
  45. /*
  46. * set_bit - Atomically set a bit in memory
  47. * @nr: the bit to set
  48. * @addr: the address to start counting from
  49. *
  50. * This function is atomic and may not be reordered. See __set_bit()
  51. * if you do not require the atomic guarantees.
  52. * Note that @nr may be almost arbitrarily large; this function is not
  53. * restricted to acting on a single-word quantity.
  54. */
  55. static __inline__ void
  56. set_bit(int nr, volatile void *addr)
  57. {
  58. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  59. unsigned long temp;
  60. __asm__ __volatile__(
  61. "1:\tll\t%0, %1\t\t# set_bit\n\t"
  62. "or\t%0, %2\n\t"
  63. "sc\t%0, %1\n\t"
  64. "beqz\t%0, 1b"
  65. : "=&r" (temp), "=m" (*m)
  66. : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  67. }
  68. /*
  69. * __set_bit - Set a bit in memory
  70. * @nr: the bit to set
  71. * @addr: the address to start counting from
  72. *
  73. * Unlike set_bit(), this function is non-atomic and may be reordered.
  74. * If it's called on the same region of memory simultaneously, the effect
  75. * may be that only one operation succeeds.
  76. */
  77. static __inline__ void __set_bit(int nr, volatile void * addr)
  78. {
  79. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  80. *m |= 1UL << (nr & 31);
  81. }
  82. #define PLATFORM__SET_BIT
  83. /*
  84. * clear_bit - Clears a bit in memory
  85. * @nr: Bit to clear
  86. * @addr: Address to start counting from
  87. *
  88. * clear_bit() is atomic and may not be reordered. However, it does
  89. * not contain a memory barrier, so if it is used for locking purposes,
  90. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  91. * in order to ensure changes are visible on other processors.
  92. */
  93. static __inline__ void
  94. clear_bit(int nr, volatile void *addr)
  95. {
  96. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  97. unsigned long temp;
  98. __asm__ __volatile__(
  99. "1:\tll\t%0, %1\t\t# clear_bit\n\t"
  100. "and\t%0, %2\n\t"
  101. "sc\t%0, %1\n\t"
  102. "beqz\t%0, 1b\n\t"
  103. : "=&r" (temp), "=m" (*m)
  104. : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
  105. }
  106. /*
  107. * change_bit - Toggle a bit in memory
  108. * @nr: Bit to clear
  109. * @addr: Address to start counting from
  110. *
  111. * change_bit() is atomic and may not be reordered.
  112. * Note that @nr may be almost arbitrarily large; this function is not
  113. * restricted to acting on a single-word quantity.
  114. */
  115. static __inline__ void
  116. change_bit(int nr, volatile void *addr)
  117. {
  118. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  119. unsigned long temp;
  120. __asm__ __volatile__(
  121. "1:\tll\t%0, %1\t\t# change_bit\n\t"
  122. "xor\t%0, %2\n\t"
  123. "sc\t%0, %1\n\t"
  124. "beqz\t%0, 1b"
  125. : "=&r" (temp), "=m" (*m)
  126. : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  127. }
  128. /*
  129. * __change_bit - Toggle a bit in memory
  130. * @nr: the bit to set
  131. * @addr: the address to start counting from
  132. *
  133. * Unlike change_bit(), this function is non-atomic and may be reordered.
  134. * If it's called on the same region of memory simultaneously, the effect
  135. * may be that only one operation succeeds.
  136. */
  137. static __inline__ void __change_bit(int nr, volatile void * addr)
  138. {
  139. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  140. *m ^= 1UL << (nr & 31);
  141. }
  142. /*
  143. * test_and_set_bit - Set a bit and return its old value
  144. * @nr: Bit to set
  145. * @addr: Address to count from
  146. *
  147. * This operation is atomic and cannot be reordered.
  148. * It also implies a memory barrier.
  149. */
  150. static __inline__ int
  151. test_and_set_bit(int nr, volatile void *addr)
  152. {
  153. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  154. unsigned long temp, res;
  155. __asm__ __volatile__(
  156. ".set\tnoreorder\t\t# test_and_set_bit\n"
  157. "1:\tll\t%0, %1\n\t"
  158. "or\t%2, %0, %3\n\t"
  159. "sc\t%2, %1\n\t"
  160. "beqz\t%2, 1b\n\t"
  161. " and\t%2, %0, %3\n\t"
  162. ".set\treorder"
  163. : "=&r" (temp), "=m" (*m), "=&r" (res)
  164. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  165. : "memory");
  166. return res != 0;
  167. }
  168. /*
  169. * __test_and_set_bit - Set a bit and return its old value
  170. * @nr: Bit to set
  171. * @addr: Address to count from
  172. *
  173. * This operation is non-atomic and can be reordered.
  174. * If two examples of this operation race, one can appear to succeed
  175. * but actually fail. You must protect multiple accesses with a lock.
  176. */
  177. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  178. {
  179. int mask, retval;
  180. volatile int *a = addr;
  181. a += nr >> 5;
  182. mask = 1 << (nr & 0x1f);
  183. retval = (mask & *a) != 0;
  184. *a |= mask;
  185. return retval;
  186. }
  187. /*
  188. * test_and_clear_bit - Clear a bit and return its old value
  189. * @nr: Bit to set
  190. * @addr: Address to count from
  191. *
  192. * This operation is atomic and cannot be reordered.
  193. * It also implies a memory barrier.
  194. */
  195. static __inline__ int
  196. test_and_clear_bit(int nr, volatile void *addr)
  197. {
  198. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  199. unsigned long temp, res;
  200. __asm__ __volatile__(
  201. ".set\tnoreorder\t\t# test_and_clear_bit\n"
  202. "1:\tll\t%0, %1\n\t"
  203. "or\t%2, %0, %3\n\t"
  204. "xor\t%2, %3\n\t"
  205. "sc\t%2, %1\n\t"
  206. "beqz\t%2, 1b\n\t"
  207. " and\t%2, %0, %3\n\t"
  208. ".set\treorder"
  209. : "=&r" (temp), "=m" (*m), "=&r" (res)
  210. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  211. : "memory");
  212. return res != 0;
  213. }
  214. /*
  215. * __test_and_clear_bit - Clear a bit and return its old value
  216. * @nr: Bit to set
  217. * @addr: Address to count from
  218. *
  219. * This operation is non-atomic and can be reordered.
  220. * If two examples of this operation race, one can appear to succeed
  221. * but actually fail. You must protect multiple accesses with a lock.
  222. */
  223. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  224. {
  225. int mask, retval;
  226. volatile int *a = addr;
  227. a += nr >> 5;
  228. mask = 1 << (nr & 0x1f);
  229. retval = (mask & *a) != 0;
  230. *a &= ~mask;
  231. return retval;
  232. }
  233. /*
  234. * test_and_change_bit - Change a bit and return its new value
  235. * @nr: Bit to set
  236. * @addr: Address to count from
  237. *
  238. * This operation is atomic and cannot be reordered.
  239. * It also implies a memory barrier.
  240. */
  241. static __inline__ int
  242. test_and_change_bit(int nr, volatile void *addr)
  243. {
  244. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  245. unsigned long temp, res;
  246. __asm__ __volatile__(
  247. ".set\tnoreorder\t\t# test_and_change_bit\n"
  248. "1:\tll\t%0, %1\n\t"
  249. "xor\t%2, %0, %3\n\t"
  250. "sc\t%2, %1\n\t"
  251. "beqz\t%2, 1b\n\t"
  252. " and\t%2, %0, %3\n\t"
  253. ".set\treorder"
  254. : "=&r" (temp), "=m" (*m), "=&r" (res)
  255. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  256. : "memory");
  257. return res != 0;
  258. }
  259. /*
  260. * __test_and_change_bit - Change a bit and return its old value
  261. * @nr: Bit to set
  262. * @addr: Address to count from
  263. *
  264. * This operation is non-atomic and can be reordered.
  265. * If two examples of this operation race, one can appear to succeed
  266. * but actually fail. You must protect multiple accesses with a lock.
  267. */
  268. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  269. {
  270. int mask, retval;
  271. volatile int *a = addr;
  272. a += nr >> 5;
  273. mask = 1 << (nr & 0x1f);
  274. retval = (mask & *a) != 0;
  275. *a ^= mask;
  276. return retval;
  277. }
  278. #else /* MIPS I */
  279. /*
  280. * set_bit - Atomically set a bit in memory
  281. * @nr: the bit to set
  282. * @addr: the address to start counting from
  283. *
  284. * This function is atomic and may not be reordered. See __set_bit()
  285. * if you do not require the atomic guarantees.
  286. * Note that @nr may be almost arbitrarily large; this function is not
  287. * restricted to acting on a single-word quantity.
  288. */
  289. static __inline__ void set_bit(int nr, volatile void * addr)
  290. {
  291. int mask;
  292. volatile int *a = addr;
  293. __bi_flags;
  294. a += nr >> 5;
  295. mask = 1 << (nr & 0x1f);
  296. __bi_save_and_cli(flags);
  297. *a |= mask;
  298. __bi_restore_flags(flags);
  299. }
  300. /*
  301. * __set_bit - Set a bit in memory
  302. * @nr: the bit to set
  303. * @addr: the address to start counting from
  304. *
  305. * Unlike set_bit(), this function is non-atomic and may be reordered.
  306. * If it's called on the same region of memory simultaneously, the effect
  307. * may be that only one operation succeeds.
  308. */
  309. static __inline__ void __set_bit(int nr, volatile void * addr)
  310. {
  311. int mask;
  312. volatile int *a = addr;
  313. a += nr >> 5;
  314. mask = 1 << (nr & 0x1f);
  315. *a |= mask;
  316. }
  317. /*
  318. * clear_bit - Clears a bit in memory
  319. * @nr: Bit to clear
  320. * @addr: Address to start counting from
  321. *
  322. * clear_bit() is atomic and may not be reordered. However, it does
  323. * not contain a memory barrier, so if it is used for locking purposes,
  324. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  325. * in order to ensure changes are visible on other processors.
  326. */
  327. static __inline__ void clear_bit(int nr, volatile void * addr)
  328. {
  329. int mask;
  330. volatile int *a = addr;
  331. __bi_flags;
  332. a += nr >> 5;
  333. mask = 1 << (nr & 0x1f);
  334. __bi_save_and_cli(flags);
  335. *a &= ~mask;
  336. __bi_restore_flags(flags);
  337. }
  338. /*
  339. * change_bit - Toggle a bit in memory
  340. * @nr: Bit to clear
  341. * @addr: Address to start counting from
  342. *
  343. * change_bit() is atomic and may not be reordered.
  344. * Note that @nr may be almost arbitrarily large; this function is not
  345. * restricted to acting on a single-word quantity.
  346. */
  347. static __inline__ void change_bit(int nr, volatile void * addr)
  348. {
  349. int mask;
  350. volatile int *a = addr;
  351. __bi_flags;
  352. a += nr >> 5;
  353. mask = 1 << (nr & 0x1f);
  354. __bi_save_and_cli(flags);
  355. *a ^= mask;
  356. __bi_restore_flags(flags);
  357. }
  358. /*
  359. * __change_bit - Toggle a bit in memory
  360. * @nr: the bit to set
  361. * @addr: the address to start counting from
  362. *
  363. * Unlike change_bit(), this function is non-atomic and may be reordered.
  364. * If it's called on the same region of memory simultaneously, the effect
  365. * may be that only one operation succeeds.
  366. */
  367. static __inline__ void __change_bit(int nr, volatile void * addr)
  368. {
  369. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  370. *m ^= 1UL << (nr & 31);
  371. }
  372. /*
  373. * test_and_set_bit - Set a bit and return its old value
  374. * @nr: Bit to set
  375. * @addr: Address to count from
  376. *
  377. * This operation is atomic and cannot be reordered.
  378. * It also implies a memory barrier.
  379. */
  380. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  381. {
  382. int mask, retval;
  383. volatile int *a = addr;
  384. __bi_flags;
  385. a += nr >> 5;
  386. mask = 1 << (nr & 0x1f);
  387. __bi_save_and_cli(flags);
  388. retval = (mask & *a) != 0;
  389. *a |= mask;
  390. __bi_restore_flags(flags);
  391. return retval;
  392. }
  393. /*
  394. * __test_and_set_bit - Set a bit and return its old value
  395. * @nr: Bit to set
  396. * @addr: Address to count from
  397. *
  398. * This operation is non-atomic and can be reordered.
  399. * If two examples of this operation race, one can appear to succeed
  400. * but actually fail. You must protect multiple accesses with a lock.
  401. */
  402. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  403. {
  404. int mask, retval;
  405. volatile int *a = addr;
  406. a += nr >> 5;
  407. mask = 1 << (nr & 0x1f);
  408. retval = (mask & *a) != 0;
  409. *a |= mask;
  410. return retval;
  411. }
  412. /*
  413. * test_and_clear_bit - Clear a bit and return its old value
  414. * @nr: Bit to set
  415. * @addr: Address to count from
  416. *
  417. * This operation is atomic and cannot be reordered.
  418. * It also implies a memory barrier.
  419. */
  420. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  421. {
  422. int mask, retval;
  423. volatile int *a = addr;
  424. __bi_flags;
  425. a += nr >> 5;
  426. mask = 1 << (nr & 0x1f);
  427. __bi_save_and_cli(flags);
  428. retval = (mask & *a) != 0;
  429. *a &= ~mask;
  430. __bi_restore_flags(flags);
  431. return retval;
  432. }
  433. /*
  434. * __test_and_clear_bit - Clear a bit and return its old value
  435. * @nr: Bit to set
  436. * @addr: Address to count from
  437. *
  438. * This operation is non-atomic and can be reordered.
  439. * If two examples of this operation race, one can appear to succeed
  440. * but actually fail. You must protect multiple accesses with a lock.
  441. */
  442. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  443. {
  444. int mask, retval;
  445. volatile int *a = addr;
  446. a += nr >> 5;
  447. mask = 1 << (nr & 0x1f);
  448. retval = (mask & *a) != 0;
  449. *a &= ~mask;
  450. return retval;
  451. }
  452. /*
  453. * test_and_change_bit - Change a bit and return its new value
  454. * @nr: Bit to set
  455. * @addr: Address to count from
  456. *
  457. * This operation is atomic and cannot be reordered.
  458. * It also implies a memory barrier.
  459. */
  460. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  461. {
  462. int mask, retval;
  463. volatile int *a = addr;
  464. __bi_flags;
  465. a += nr >> 5;
  466. mask = 1 << (nr & 0x1f);
  467. __bi_save_and_cli(flags);
  468. retval = (mask & *a) != 0;
  469. *a ^= mask;
  470. __bi_restore_flags(flags);
  471. return retval;
  472. }
  473. /*
  474. * __test_and_change_bit - Change a bit and return its old value
  475. * @nr: Bit to set
  476. * @addr: Address to count from
  477. *
  478. * This operation is non-atomic and can be reordered.
  479. * If two examples of this operation race, one can appear to succeed
  480. * but actually fail. You must protect multiple accesses with a lock.
  481. */
  482. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  483. {
  484. int mask, retval;
  485. volatile int *a = addr;
  486. a += nr >> 5;
  487. mask = 1 << (nr & 0x1f);
  488. retval = (mask & *a) != 0;
  489. *a ^= mask;
  490. return retval;
  491. }
  492. #undef __bi_flags
  493. #undef __bi_cli
  494. #undef __bi_save_flags
  495. #undef __bi_restore_flags
  496. #endif /* MIPS I */
  497. /*
  498. * test_bit - Determine whether a bit is set
  499. * @nr: bit number to test
  500. * @addr: Address to start counting from
  501. */
  502. static __inline__ int test_bit(int nr, const volatile void *addr)
  503. {
  504. return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
  505. }
  506. #ifndef __MIPSEB__
  507. /* Little endian versions. */
  508. /*
  509. * find_first_zero_bit - find the first zero bit in a memory region
  510. * @addr: The address to start the search at
  511. * @size: The maximum size to search
  512. *
  513. * Returns the bit-number of the first zero bit, not the number of the byte
  514. * containing a bit.
  515. */
  516. static __inline__ int find_first_zero_bit (void *addr, unsigned size)
  517. {
  518. unsigned long dummy;
  519. int res;
  520. if (!size)
  521. return 0;
  522. __asm__ (".set\tnoreorder\n\t"
  523. ".set\tnoat\n"
  524. "1:\tsubu\t$1,%6,%0\n\t"
  525. "blez\t$1,2f\n\t"
  526. "lw\t$1,(%5)\n\t"
  527. "addiu\t%5,4\n\t"
  528. #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
  529. (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
  530. (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
  531. "beql\t%1,$1,1b\n\t"
  532. "addiu\t%0,32\n\t"
  533. #else
  534. "addiu\t%0,32\n\t"
  535. "beq\t%1,$1,1b\n\t"
  536. "nop\n\t"
  537. "subu\t%0,32\n\t"
  538. #endif
  539. #ifdef __MIPSEB__
  540. #error "Fix this for big endian"
  541. #endif /* __MIPSEB__ */
  542. "li\t%1,1\n"
  543. "1:\tand\t%2,$1,%1\n\t"
  544. "beqz\t%2,2f\n\t"
  545. "sll\t%1,%1,1\n\t"
  546. "bnez\t%1,1b\n\t"
  547. "add\t%0,%0,1\n\t"
  548. ".set\tat\n\t"
  549. ".set\treorder\n"
  550. "2:"
  551. : "=r" (res), "=r" (dummy), "=r" (addr)
  552. : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
  553. "2" (addr), "r" (size)
  554. : "$1");
  555. return res;
  556. }
  557. /*
  558. * find_next_zero_bit - find the first zero bit in a memory region
  559. * @addr: The address to base the search on
  560. * @offset: The bitnumber to start searching at
  561. * @size: The maximum size to search
  562. */
  563. static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
  564. {
  565. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  566. int set = 0, bit = offset & 31, res;
  567. unsigned long dummy;
  568. if (bit) {
  569. /*
  570. * Look for zero in first byte
  571. */
  572. #ifdef __MIPSEB__
  573. #error "Fix this for big endian byte order"
  574. #endif
  575. __asm__(".set\tnoreorder\n\t"
  576. ".set\tnoat\n"
  577. "1:\tand\t$1,%4,%1\n\t"
  578. "beqz\t$1,1f\n\t"
  579. "sll\t%1,%1,1\n\t"
  580. "bnez\t%1,1b\n\t"
  581. "addiu\t%0,1\n\t"
  582. ".set\tat\n\t"
  583. ".set\treorder\n"
  584. "1:"
  585. : "=r" (set), "=r" (dummy)
  586. : "0" (0), "1" (1 << bit), "r" (*p)
  587. : "$1");
  588. if (set < (32 - bit))
  589. return set + offset;
  590. set = 32 - bit;
  591. p++;
  592. }
  593. /*
  594. * No zero yet, search remaining full bytes for a zero
  595. */
  596. res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
  597. return offset + set + res;
  598. }
  599. #endif /* !(__MIPSEB__) */
  600. /*
  601. * ffz - find first zero in word.
  602. * @word: The word to search
  603. *
  604. * Undefined if no zero exists, so code should check against ~0UL first.
  605. */
  606. static __inline__ unsigned long ffz(unsigned long word)
  607. {
  608. unsigned int __res;
  609. unsigned int mask = 1;
  610. __asm__ (
  611. ".set\tnoreorder\n\t"
  612. ".set\tnoat\n\t"
  613. "move\t%0,$0\n"
  614. "1:\tand\t$1,%2,%1\n\t"
  615. "beqz\t$1,2f\n\t"
  616. "sll\t%1,1\n\t"
  617. "bnez\t%1,1b\n\t"
  618. "addiu\t%0,1\n\t"
  619. ".set\tat\n\t"
  620. ".set\treorder\n"
  621. "2:\n\t"
  622. : "=&r" (__res), "=r" (mask)
  623. : "r" (word), "1" (mask)
  624. : "$1");
  625. return __res;
  626. }
  627. #ifdef __KERNEL__
  628. /*
  629. * hweightN - returns the hamming weight of a N-bit word
  630. * @x: the word to weigh
  631. *
  632. * The Hamming Weight of a number is the total number of bits set in it.
  633. */
  634. #define hweight32(x) generic_hweight32(x)
  635. #define hweight16(x) generic_hweight16(x)
  636. #define hweight8(x) generic_hweight8(x)
  637. #endif /* __KERNEL__ */
  638. #ifdef __MIPSEB__
  639. /*
  640. * find_next_zero_bit - find the first zero bit in a memory region
  641. * @addr: The address to base the search on
  642. * @offset: The bitnumber to start searching at
  643. * @size: The maximum size to search
  644. */
  645. static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
  646. {
  647. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  648. unsigned long result = offset & ~31UL;
  649. unsigned long tmp;
  650. if (offset >= size)
  651. return size;
  652. size -= result;
  653. offset &= 31UL;
  654. if (offset) {
  655. tmp = *(p++);
  656. tmp |= ~0UL >> (32-offset);
  657. if (size < 32)
  658. goto found_first;
  659. if (~tmp)
  660. goto found_middle;
  661. size -= 32;
  662. result += 32;
  663. }
  664. while (size & ~31UL) {
  665. if (~(tmp = *(p++)))
  666. goto found_middle;
  667. result += 32;
  668. size -= 32;
  669. }
  670. if (!size)
  671. return result;
  672. tmp = *p;
  673. found_first:
  674. tmp |= ~0UL << size;
  675. found_middle:
  676. return result + ffz(tmp);
  677. }
  678. /* Linus sez that gcc can optimize the following correctly, we'll see if this
  679. * holds on the Sparc as it does for the ALPHA.
  680. */
  681. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  682. /*
  683. * find_first_zero_bit - find the first zero bit in a memory region
  684. * @addr: The address to start the search at
  685. * @size: The maximum size to search
  686. *
  687. * Returns the bit-number of the first zero bit, not the number of the byte
  688. * containing a bit.
  689. */
  690. static int find_first_zero_bit (void *addr, unsigned size);
  691. #endif
  692. #define find_first_zero_bit(addr, size) \
  693. find_next_zero_bit((addr), (size), 0)
  694. #endif /* (__MIPSEB__) */
  695. /* Now for the ext2 filesystem bit operations and helper routines. */
  696. #ifdef __MIPSEB__
  697. static __inline__ int ext2_set_bit(int nr, void * addr)
  698. {
  699. int mask, retval, flags;
  700. unsigned char *ADDR = (unsigned char *) addr;
  701. ADDR += nr >> 3;
  702. mask = 1 << (nr & 0x07);
  703. save_and_cli(flags);
  704. retval = (mask & *ADDR) != 0;
  705. *ADDR |= mask;
  706. restore_flags(flags);
  707. return retval;
  708. }
  709. static __inline__ int ext2_clear_bit(int nr, void * addr)
  710. {
  711. int mask, retval, flags;
  712. unsigned char *ADDR = (unsigned char *) addr;
  713. ADDR += nr >> 3;
  714. mask = 1 << (nr & 0x07);
  715. save_and_cli(flags);
  716. retval = (mask & *ADDR) != 0;
  717. *ADDR &= ~mask;
  718. restore_flags(flags);
  719. return retval;
  720. }
  721. static __inline__ int ext2_test_bit(int nr, const void * addr)
  722. {
  723. int mask;
  724. const unsigned char *ADDR = (const unsigned char *) addr;
  725. ADDR += nr >> 3;
  726. mask = 1 << (nr & 0x07);
  727. return ((mask & *ADDR) != 0);
  728. }
  729. #define ext2_find_first_zero_bit(addr, size) \
  730. ext2_find_next_zero_bit((addr), (size), 0)
  731. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  732. {
  733. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  734. unsigned long result = offset & ~31UL;
  735. unsigned long tmp;
  736. if (offset >= size)
  737. return size;
  738. size -= result;
  739. offset &= 31UL;
  740. if(offset) {
  741. /* We hold the little endian value in tmp, but then the
  742. * shift is illegal. So we could keep a big endian value
  743. * in tmp, like this:
  744. *
  745. * tmp = __swab32(*(p++));
  746. * tmp |= ~0UL >> (32-offset);
  747. *
  748. * but this would decrease preformance, so we change the
  749. * shift:
  750. */
  751. tmp = *(p++);
  752. tmp |= __swab32(~0UL >> (32-offset));
  753. if(size < 32)
  754. goto found_first;
  755. if(~tmp)
  756. goto found_middle;
  757. size -= 32;
  758. result += 32;
  759. }
  760. while(size & ~31UL) {
  761. if(~(tmp = *(p++)))
  762. goto found_middle;
  763. result += 32;
  764. size -= 32;
  765. }
  766. if(!size)
  767. return result;
  768. tmp = *p;
  769. found_first:
  770. /* tmp is little endian, so we would have to swab the shift,
  771. * see above. But then we have to swab tmp below for ffz, so
  772. * we might as well do this here.
  773. */
  774. return result + ffz(__swab32(tmp) | (~0UL << size));
  775. found_middle:
  776. return result + ffz(__swab32(tmp));
  777. }
  778. #else /* !(__MIPSEB__) */
  779. /* Native ext2 byte ordering, just collapse using defines. */
  780. #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
  781. #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
  782. #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
  783. #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
  784. #define ext2_find_next_zero_bit(addr, size, offset) \
  785. find_next_zero_bit((addr), (size), (offset))
  786. #endif /* !(__MIPSEB__) */
  787. /*
  788. * Bitmap functions for the minix filesystem.
  789. * FIXME: These assume that Minix uses the native byte/bitorder.
  790. * This limits the Minix filesystem's value for data exchange very much.
  791. */
  792. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  793. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  794. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  795. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  796. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  797. #endif /* _ASM_BITOPS_H */