elide.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /* elide.h: Generic lock elision support.
  2. Copyright (C) 2014-2019 Free Software Foundation, Inc.
  3. This file is part of the GNU C Library.
  4. The GNU C Library is free software; you can redistribute it and/or
  5. modify it under the terms of the GNU Lesser General Public
  6. License as published by the Free Software Foundation; either
  7. version 2.1 of the License, or (at your option) any later version.
  8. The GNU C Library is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. Lesser General Public License for more details.
  12. You should have received a copy of the GNU Lesser General Public
  13. License along with the GNU C Library; if not, see
  14. <http://www.gnu.org/licenses/>. */
  15. #ifndef ELIDE_H
  16. #define ELIDE_H 1
  17. #include <hle.h>
  18. #include <elision-conf.h>
  19. #include <atomic.h>
  20. /* Adapt elision with ADAPT_COUNT and STATUS and decide retries. */
  21. static inline bool
  22. elision_adapt(signed char *adapt_count, unsigned int status)
  23. {
  24. if (status & _XABORT_RETRY)
  25. return false;
  26. if ((status & _XABORT_EXPLICIT)
  27. && _XABORT_CODE (status) == _ABORT_LOCK_BUSY)
  28. {
  29. /* Right now we skip here. Better would be to wait a bit
  30. and retry. This likely needs some spinning. Be careful
  31. to avoid writing the lock.
  32. Using relaxed MO and separate atomic accesses is sufficient because
  33. adapt_count is just a hint. */
  34. if (atomic_load_relaxed (adapt_count) != __elision_aconf.skip_lock_busy)
  35. atomic_store_relaxed (adapt_count, __elision_aconf.skip_lock_busy);
  36. }
  37. /* Internal abort. There is no chance for retry.
  38. Use the normal locking and next time use lock.
  39. Be careful to avoid writing to the lock. See above for MO. */
  40. else if (atomic_load_relaxed (adapt_count)
  41. != __elision_aconf.skip_lock_internal_abort)
  42. atomic_store_relaxed (adapt_count,
  43. __elision_aconf.skip_lock_internal_abort);
  44. return true;
  45. }
  46. /* is_lock_free must be executed inside the transaction */
  47. /* Returns true if lock defined by IS_LOCK_FREE was elided.
  48. ADAPT_COUNT is a per-lock state variable; it must be accessed atomically
  49. to avoid data races but is just a hint, so using relaxed MO and separate
  50. atomic loads and stores instead of atomic read-modify-write operations is
  51. sufficient. */
  52. #define ELIDE_LOCK(adapt_count, is_lock_free) \
  53. ({ \
  54. int ret = 0; \
  55. \
  56. if (atomic_load_relaxed (&(adapt_count)) <= 0) \
  57. { \
  58. for (int i = __elision_aconf.retry_try_xbegin; i > 0; i--) \
  59. { \
  60. unsigned int status; \
  61. if ((status = _xbegin ()) == _XBEGIN_STARTED) \
  62. { \
  63. if (is_lock_free) \
  64. { \
  65. ret = 1; \
  66. break; \
  67. } \
  68. _xabort (_ABORT_LOCK_BUSY); \
  69. } \
  70. if (!elision_adapt (&(adapt_count), status)) \
  71. break; \
  72. } \
  73. } \
  74. else \
  75. atomic_store_relaxed (&(adapt_count), \
  76. atomic_load_relaxed (&(adapt_count)) - 1); \
  77. ret; \
  78. })
  79. /* Returns true if lock defined by IS_LOCK_FREE was try-elided.
  80. ADAPT_COUNT is a per-lock state variable. */
  81. #define ELIDE_TRYLOCK(adapt_count, is_lock_free, write) ({ \
  82. int ret = 0; \
  83. if (__elision_aconf.retry_try_xbegin > 0) \
  84. { \
  85. if (write) \
  86. _xabort (_ABORT_NESTED_TRYLOCK); \
  87. ret = ELIDE_LOCK (adapt_count, is_lock_free); \
  88. } \
  89. ret; \
  90. })
  91. /* Returns true if lock defined by IS_LOCK_FREE was elided. The call
  92. to _xend crashes if the application incorrectly tries to unlock a
  93. lock which has not been locked. */
  94. #define ELIDE_UNLOCK(is_lock_free) \
  95. ({ \
  96. int ret = 0; \
  97. if (is_lock_free) \
  98. { \
  99. _xend (); \
  100. ret = 1; \
  101. } \
  102. ret; \
  103. })
  104. #endif