lowlevellock.h 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /* Low-level lock implementation. Generic futex-based version.
  2. Copyright (C) 2005-2019 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library. If not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #ifndef _LOWLEVELLOCK_H
  16. #define _LOWLEVELLOCK_H 1
  17. #include <atomic.h>
  18. #include <lowlevellock-futex.h>
  19. /* Low-level locks use a combination of atomic operations (to acquire and
  20. release lock ownership) and futex operations (to block until the state
  21. of a lock changes). A lock can be in one of three states:
  22. 0: not acquired,
  23. 1: acquired with no waiters; no other threads are blocked or about to block
  24. for changes to the lock state,
  25. >1: acquired, possibly with waiters; there may be other threads blocked or
  26. about to block for changes to the lock state.
  27. We expect that the common case is an uncontended lock, so we just need
  28. to transition the lock between states 0 and 1; releasing the lock does
  29. not need to wake any other blocked threads. If the lock is contended
  30. and a thread decides to block using a futex operation, then this thread
  31. needs to first change the state to >1; if this state is observed during
  32. lock release, the releasing thread will wake one of the potentially
  33. blocked threads.
  34. Much of this code takes a 'private' parameter. This may be:
  35. LLL_PRIVATE: lock only shared within a process
  36. LLL_SHARED: lock may be shared across processes.
  37. Condition variables contain an optimization for broadcasts that requeues
  38. waiting threads on a lock's futex. Therefore, there is a special
  39. variant of the locks (whose name contains "cond") that makes sure to
  40. always set the lock state to >1 and not just 1.
  41. Robust locks set the lock to the id of the owner. This allows detection
  42. of the case where the owner exits without releasing the lock. Flags are
  43. OR'd with the owner id to record additional information about lock state.
  44. Therefore the states of robust locks are:
  45. 0: not acquired
  46. id: acquired (by user identified by id & FUTEX_TID_MASK)
  47. The following flags may be set in the robust lock value:
  48. FUTEX_WAITERS - possibly has waiters
  49. FUTEX_OWNER_DIED - owning user has exited without releasing the futex. */
  50. /* If LOCK is 0 (not acquired), set to 1 (acquired with no waiters) and return
  51. 0. Otherwise leave lock unchanged and return non-zero to indicate that the
  52. lock was not acquired. */
  53. #define lll_trylock(lock) \
  54. __glibc_unlikely (atomic_compare_and_exchange_bool_acq (&(lock), 1, 0))
  55. /* If LOCK is 0 (not acquired), set to 2 (acquired, possibly with waiters) and
  56. return 0. Otherwise leave lock unchanged and return non-zero to indicate
  57. that the lock was not acquired. */
  58. #define lll_cond_trylock(lock) \
  59. __glibc_unlikely (atomic_compare_and_exchange_bool_acq (&(lock), 2, 0))
  60. extern void __lll_lock_wait_private (int *futex) attribute_hidden;
  61. extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
  62. /* This is an expression rather than a statement even though its value is
  63. void, so that it can be used in a comma expression or as an expression
  64. that's cast to void. */
  65. /* The inner conditional compiles to a call to __lll_lock_wait_private if
  66. private is known at compile time to be LLL_PRIVATE, and to a call to
  67. __lll_lock_wait otherwise. */
  68. /* If FUTEX is 0 (not acquired), set to 1 (acquired with no waiters) and
  69. return. Otherwise, ensure that it is >1 (acquired, possibly with waiters)
  70. and then block until we acquire the lock, at which point FUTEX will still be
  71. >1. The lock is always acquired on return. */
  72. #define __lll_lock(futex, private) \
  73. ((void) \
  74. ({ \
  75. int *__futex = (futex); \
  76. if (__glibc_unlikely \
  77. (atomic_compare_and_exchange_bool_acq (__futex, 1, 0))) \
  78. { \
  79. if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
  80. __lll_lock_wait_private (__futex); \
  81. else \
  82. __lll_lock_wait (__futex, private); \
  83. } \
  84. }))
  85. #define lll_lock(futex, private) \
  86. __lll_lock (&(futex), private)
  87. /* This is an expression rather than a statement even though its value is
  88. void, so that it can be used in a comma expression or as an expression
  89. that's cast to void. */
  90. /* Unconditionally set FUTEX to 2 (acquired, possibly with waiters). If FUTEX
  91. was 0 (not acquired) then return. Otherwise, block until the lock is
  92. acquired, at which point FUTEX is 2 (acquired, possibly with waiters). The
  93. lock is always acquired on return. */
  94. #define __lll_cond_lock(futex, private) \
  95. ((void) \
  96. ({ \
  97. int *__futex = (futex); \
  98. if (__glibc_unlikely (atomic_exchange_acq (__futex, 2) != 0)) \
  99. __lll_lock_wait (__futex, private); \
  100. }))
  101. #define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
  102. extern int __lll_timedlock_wait (int *futex, const struct timespec *,
  103. int private) attribute_hidden;
  104. /* As __lll_lock, but with a timeout. If the timeout occurs then return
  105. ETIMEDOUT. If ABSTIME is invalid, return EINVAL. */
  106. #define __lll_timedlock(futex, abstime, private) \
  107. ({ \
  108. int *__futex = (futex); \
  109. int __val = 0; \
  110. \
  111. if (__glibc_unlikely \
  112. (atomic_compare_and_exchange_bool_acq (__futex, 1, 0))) \
  113. __val = __lll_timedlock_wait (__futex, abstime, private); \
  114. __val; \
  115. })
  116. #define lll_timedlock(futex, abstime, private) \
  117. __lll_timedlock (&(futex), abstime, private)
  118. /* This is an expression rather than a statement even though its value is
  119. void, so that it can be used in a comma expression or as an expression
  120. that's cast to void. */
  121. /* Unconditionally set FUTEX to 0 (not acquired), releasing the lock. If FUTEX
  122. was >1 (acquired, possibly with waiters), then wake any waiters. The waiter
  123. that acquires the lock will set FUTEX to >1.
  124. Evaluate PRIVATE before releasing the lock so that we do not violate the
  125. mutex destruction requirements. Specifically, we need to ensure that
  126. another thread can destroy the mutex (and reuse its memory) once it
  127. acquires the lock and when there will be no further lock acquisitions;
  128. thus, we must not access the lock after releasing it, or those accesses
  129. could be concurrent with mutex destruction or reuse of the memory. */
  130. #define __lll_unlock(futex, private) \
  131. ((void) \
  132. ({ \
  133. int *__futex = (futex); \
  134. int __private = (private); \
  135. int __oldval = atomic_exchange_rel (__futex, 0); \
  136. if (__glibc_unlikely (__oldval > 1)) \
  137. lll_futex_wake (__futex, 1, __private); \
  138. }))
  139. #define lll_unlock(futex, private) \
  140. __lll_unlock (&(futex), private)
  141. #define lll_islocked(futex) \
  142. ((futex) != LLL_LOCK_INITIALIZER)
  143. /* Our internal lock implementation is identical to the binary-compatible
  144. mutex implementation. */
  145. /* Initializers for lock. */
  146. #define LLL_LOCK_INITIALIZER (0)
  147. #define LLL_LOCK_INITIALIZER_LOCKED (1)
  148. extern int __lll_timedwait_tid (int *, const struct timespec *)
  149. attribute_hidden;
  150. /* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
  151. wake-up when the clone terminates. The memory location contains the
  152. thread ID while the clone is running and is reset to zero by the kernel
  153. afterwards. The kernel up to version 3.16.3 does not use the private futex
  154. operations for futex wake-up when the clone terminates.
  155. If ABSTIME is not NULL, is used a timeout for futex call. If the timeout
  156. occurs then return ETIMEOUT, if ABSTIME is invalid, return EINVAL.
  157. The futex operation are issues with cancellable versions. */
  158. #define lll_wait_tid(tid, abstime) \
  159. ({ \
  160. int __res = 0; \
  161. __typeof (tid) __tid; \
  162. if (abstime != NULL) \
  163. __res = __lll_timedwait_tid (&(tid), (abstime)); \
  164. else \
  165. /* We need acquire MO here so that we synchronize with the \
  166. kernel's store to 0 when the clone terminates. (see above) */ \
  167. while ((__tid = atomic_load_acquire (&(tid))) != 0) \
  168. lll_futex_wait_cancel (&(tid), __tid, LLL_SHARED); \
  169. __res; \
  170. })
  171. #endif /* lowlevellock.h */