spinlock.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. /*
  2. Unix SMB/CIFS implementation.
  3. trivial database library
  4. Copyright (C) Anton Blanchard 2001
  5. ** NOTE! The following LGPL license applies to the tdb
  6. ** library. This does NOT imply that all of Samba is released
  7. ** under the LGPL
  8. This library is free software; you can redistribute it and/or
  9. modify it under the terms of the GNU Lesser General Public
  10. License as published by the Free Software Foundation; either
  11. version 2 of the License, or (at your option) any later version.
  12. This library is distributed in the hope that it will be useful,
  13. but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. Lesser General Public License for more details.
  16. You should have received a copy of the GNU Lesser General Public
  17. License along with this library; if not, write to the Free Software
  18. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. */
  20. #include <stdlib.h>
  21. #include <stdio.h>
  22. #include <unistd.h>
  23. #include <string.h>
  24. #include <fcntl.h>
  25. #include <errno.h>
  26. #include <sys/stat.h>
  27. #include <time.h>
  28. #include <signal.h>
  29. #include "tdb.h"
  30. #include "spinlock.h"
  31. #define DEBUG
  32. #ifdef USE_SPINLOCKS
  33. /*
  34. * ARCH SPECIFIC
  35. */
  36. #if defined(SPARC_SPINLOCKS)
  37. static inline int __spin_trylock(spinlock_t *lock)
  38. {
  39. unsigned int result;
  40. asm volatile("ldstub [%1], %0"
  41. : "=r" (result)
  42. : "r" (lock)
  43. : "memory");
  44. return (result == 0) ? 0 : EBUSY;
  45. }
  46. static inline void __spin_unlock(spinlock_t *lock)
  47. {
  48. asm volatile("":::"memory");
  49. *lock = 0;
  50. }
  51. static inline void __spin_lock_init(spinlock_t *lock)
  52. {
  53. *lock = 0;
  54. }
  55. static inline int __spin_is_locked(spinlock_t *lock)
  56. {
  57. return (*lock != 0);
  58. }
  59. #elif defined(POWERPC_SPINLOCKS)
  60. static inline int __spin_trylock(spinlock_t *lock)
  61. {
  62. unsigned int result;
  63. __asm__ __volatile__(
  64. "1: lwarx %0,0,%1\n\
  65. cmpwi 0,%0,0\n\
  66. li %0,0\n\
  67. bne- 2f\n\
  68. li %0,1\n\
  69. stwcx. %0,0,%1\n\
  70. bne- 1b\n\
  71. isync\n\
  72. 2:" : "=&r"(result)
  73. : "r"(lock)
  74. : "cr0", "memory");
  75. return (result == 1) ? 0 : EBUSY;
  76. }
  77. static inline void __spin_unlock(spinlock_t *lock)
  78. {
  79. asm volatile("eieio":::"memory");
  80. *lock = 0;
  81. }
  82. static inline void __spin_lock_init(spinlock_t *lock)
  83. {
  84. *lock = 0;
  85. }
  86. static inline int __spin_is_locked(spinlock_t *lock)
  87. {
  88. return (*lock != 0);
  89. }
  90. #elif defined(INTEL_SPINLOCKS)
  91. static inline int __spin_trylock(spinlock_t *lock)
  92. {
  93. int oldval;
  94. asm volatile("xchgl %0,%1"
  95. : "=r" (oldval), "=m" (*lock)
  96. : "0" (0)
  97. : "memory");
  98. return oldval > 0 ? 0 : EBUSY;
  99. }
  100. static inline void __spin_unlock(spinlock_t *lock)
  101. {
  102. asm volatile("":::"memory");
  103. *lock = 1;
  104. }
  105. static inline void __spin_lock_init(spinlock_t *lock)
  106. {
  107. *lock = 1;
  108. }
  109. static inline int __spin_is_locked(spinlock_t *lock)
  110. {
  111. return (*lock != 1);
  112. }
  113. #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
  114. /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
  115. * sync(3) for the details of the intrinsic operations.
  116. *
  117. * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
  118. */
  119. #ifdef STANDALONE
  120. /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
  121. #define inline __inline
  122. #endif /* STANDALONE */
  123. /* Returns 0 if the lock is acquired, EBUSY otherwise. */
  124. static inline int __spin_trylock(spinlock_t *lock)
  125. {
  126. unsigned int val;
  127. val = __lock_test_and_set(lock, 1);
  128. return val == 0 ? 0 : EBUSY;
  129. }
  130. static inline void __spin_unlock(spinlock_t *lock)
  131. {
  132. __lock_release(lock);
  133. }
  134. static inline void __spin_lock_init(spinlock_t *lock)
  135. {
  136. __lock_release(lock);
  137. }
  138. /* Returns 1 if the lock is held, 0 otherwise. */
  139. static inline int __spin_is_locked(spinlock_t *lock)
  140. {
  141. unsigned int val;
  142. val = __add_and_fetch(lock, 0);
  143. return val;
  144. }
  145. #elif defined(MIPS_SPINLOCKS)
  146. static inline unsigned int load_linked(unsigned long addr)
  147. {
  148. unsigned int res;
  149. __asm__ __volatile__("ll\t%0,(%1)"
  150. : "=r" (res)
  151. : "r" (addr));
  152. return res;
  153. }
  154. static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
  155. {
  156. unsigned int res;
  157. __asm__ __volatile__("sc\t%0,(%2)"
  158. : "=r" (res)
  159. : "0" (value), "r" (addr));
  160. return res;
  161. }
  162. static inline int __spin_trylock(spinlock_t *lock)
  163. {
  164. unsigned int mw;
  165. do {
  166. mw = load_linked(lock);
  167. if (mw)
  168. return EBUSY;
  169. } while (!store_conditional(lock, 1));
  170. asm volatile("":::"memory");
  171. return 0;
  172. }
  173. static inline void __spin_unlock(spinlock_t *lock)
  174. {
  175. asm volatile("":::"memory");
  176. *lock = 0;
  177. }
  178. static inline void __spin_lock_init(spinlock_t *lock)
  179. {
  180. *lock = 0;
  181. }
  182. static inline int __spin_is_locked(spinlock_t *lock)
  183. {
  184. return (*lock != 0);
  185. }
  186. #else
  187. #error Need to implement spinlock code in spinlock.c
  188. #endif
  189. /*
  190. * OS SPECIFIC
  191. */
  192. static void yield_cpu(void)
  193. {
  194. struct timespec tm;
  195. #ifdef USE_SCHED_YIELD
  196. sched_yield();
  197. #else
  198. /* Linux will busy loop for delays < 2ms on real time tasks */
  199. tm.tv_sec = 0;
  200. tm.tv_nsec = 2000000L + 1;
  201. nanosleep(&tm, NULL);
  202. #endif
  203. }
  204. static int this_is_smp(void)
  205. {
  206. #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
  207. return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
  208. #else
  209. return 0;
  210. #endif
  211. }
  212. /*
  213. * GENERIC
  214. */
  215. static int smp_machine = 0;
  216. static inline void __spin_lock(spinlock_t *lock)
  217. {
  218. int ntries = 0;
  219. while(__spin_trylock(lock)) {
  220. while(__spin_is_locked(lock)) {
  221. if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
  222. continue;
  223. yield_cpu();
  224. }
  225. }
  226. }
  227. static void __read_lock(tdb_rwlock_t *rwlock)
  228. {
  229. int ntries = 0;
  230. while(1) {
  231. __spin_lock(&rwlock->lock);
  232. if (!(rwlock->count & RWLOCK_BIAS)) {
  233. rwlock->count++;
  234. __spin_unlock(&rwlock->lock);
  235. return;
  236. }
  237. __spin_unlock(&rwlock->lock);
  238. while(rwlock->count & RWLOCK_BIAS) {
  239. if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
  240. continue;
  241. yield_cpu();
  242. }
  243. }
  244. }
  245. static void __write_lock(tdb_rwlock_t *rwlock)
  246. {
  247. int ntries = 0;
  248. while(1) {
  249. __spin_lock(&rwlock->lock);
  250. if (rwlock->count == 0) {
  251. rwlock->count |= RWLOCK_BIAS;
  252. __spin_unlock(&rwlock->lock);
  253. return;
  254. }
  255. __spin_unlock(&rwlock->lock);
  256. while(rwlock->count != 0) {
  257. if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
  258. continue;
  259. yield_cpu();
  260. }
  261. }
  262. }
  263. static void __write_unlock(tdb_rwlock_t *rwlock)
  264. {
  265. __spin_lock(&rwlock->lock);
  266. #ifdef DEBUG
  267. if (!(rwlock->count & RWLOCK_BIAS))
  268. fprintf(stderr, "bug: write_unlock\n");
  269. #endif
  270. rwlock->count &= ~RWLOCK_BIAS;
  271. __spin_unlock(&rwlock->lock);
  272. }
  273. static void __read_unlock(tdb_rwlock_t *rwlock)
  274. {
  275. __spin_lock(&rwlock->lock);
  276. #ifdef DEBUG
  277. if (!rwlock->count)
  278. fprintf(stderr, "bug: read_unlock\n");
  279. if (rwlock->count & RWLOCK_BIAS)
  280. fprintf(stderr, "bug: read_unlock\n");
  281. #endif
  282. rwlock->count--;
  283. __spin_unlock(&rwlock->lock);
  284. }
  285. /* TDB SPECIFIC */
  286. /* lock a list in the database. list -1 is the alloc list */
  287. int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
  288. {
  289. tdb_rwlock_t *rwlocks;
  290. if (!tdb->map_ptr) return -1;
  291. rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
  292. switch(rw_type) {
  293. case F_RDLCK:
  294. __read_lock(&rwlocks[list+1]);
  295. break;
  296. case F_WRLCK:
  297. __write_lock(&rwlocks[list+1]);
  298. break;
  299. default:
  300. return TDB_ERRCODE(TDB_ERR_LOCK, -1);
  301. }
  302. return 0;
  303. }
  304. /* unlock the database. */
  305. int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
  306. {
  307. tdb_rwlock_t *rwlocks;
  308. if (!tdb->map_ptr) return -1;
  309. rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
  310. switch(rw_type) {
  311. case F_RDLCK:
  312. __read_unlock(&rwlocks[list+1]);
  313. break;
  314. case F_WRLCK:
  315. __write_unlock(&rwlocks[list+1]);
  316. break;
  317. default:
  318. return TDB_ERRCODE(TDB_ERR_LOCK, -1);
  319. }
  320. return 0;
  321. }
  322. int tdb_create_rwlocks(int fd, unsigned int hash_size)
  323. {
  324. unsigned size, i;
  325. tdb_rwlock_t *rwlocks;
  326. size = TDB_SPINLOCK_SIZE(hash_size);
  327. rwlocks = malloc(size);
  328. if (!rwlocks)
  329. return -1;
  330. for(i = 0; i < hash_size+1; i++) {
  331. __spin_lock_init(&rwlocks[i].lock);
  332. rwlocks[i].count = 0;
  333. }
  334. /* Write it out (appending to end) */
  335. if (write(fd, rwlocks, size) != size) {
  336. free(rwlocks);
  337. return -1;
  338. }
  339. smp_machine = this_is_smp();
  340. free(rwlocks);
  341. return 0;
  342. }
  343. int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
  344. {
  345. tdb_rwlock_t *rwlocks;
  346. unsigned i;
  347. if (tdb->header.rwlocks == 0) return 0;
  348. if (!tdb->map_ptr) return -1;
  349. /* We're mmapped here */
  350. rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
  351. for(i = 0; i < tdb->header.hash_size+1; i++) {
  352. __spin_lock_init(&rwlocks[i].lock);
  353. rwlocks[i].count = 0;
  354. }
  355. return 0;
  356. }
  357. #else
  358. int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
  359. int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
  360. int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
  361. /* Non-spinlock version: remove spinlock pointer */
  362. int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
  363. {
  364. tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
  365. - (char *)&tdb->header);
  366. tdb->header.rwlocks = 0;
  367. if (lseek(tdb->fd, off, SEEK_SET) != off
  368. || write(tdb->fd, (void *)&tdb->header.rwlocks,
  369. sizeof(tdb->header.rwlocks))
  370. != sizeof(tdb->header.rwlocks))
  371. return -1;
  372. return 0;
  373. }
  374. #endif