x86.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  3. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
  4. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
  5. *
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. *
  16. * Some of the machine specific code was borrowed from our GC distribution.
  17. */
  18. /* The following really assume we have a 486 or better. */
  19. #include "../all_aligned_atomic_load_store.h"
  20. #include "../test_and_set_t_is_char.h"
  21. #if !defined(AO_USE_PENTIUM4_INSTRS) && !defined(__i386)
  22. /* "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
  23. # define AO_USE_PENTIUM4_INSTRS
  24. #endif
  25. #if defined(AO_USE_PENTIUM4_INSTRS)
  26. AO_INLINE void
  27. AO_nop_full(void)
  28. {
  29. __asm__ __volatile__ ("mfence" : : : "memory");
  30. }
  31. # define AO_HAVE_nop_full
  32. #else
  33. /* We could use the cpuid instruction. But that seems to be slower */
  34. /* than the default implementation based on test_and_set_full. Thus */
  35. /* we omit that bit of misinformation here. */
  36. #endif /* !AO_USE_PENTIUM4_INSTRS */
  37. /* As far as we can tell, the lfence and sfence instructions are not */
  38. /* currently needed or useful for cached memory accesses. */
  39. /* Really only works for 486 and later */
  40. #ifndef AO_PREFER_GENERALIZED
  41. AO_INLINE AO_t
  42. AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
  43. {
  44. AO_t result;
  45. __asm__ __volatile__ ("lock; xadd %0, %1"
  46. : "=r" (result), "+m" (*p)
  47. : "0" (incr)
  48. : "memory");
  49. return result;
  50. }
  51. # define AO_HAVE_fetch_and_add_full
  52. #endif /* !AO_PREFER_GENERALIZED */
  53. AO_INLINE unsigned char
  54. AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
  55. {
  56. unsigned char result;
  57. __asm__ __volatile__ ("lock; xaddb %0, %1"
  58. : "=q" (result), "+m" (*p)
  59. : "0" (incr)
  60. : "memory");
  61. return result;
  62. }
  63. #define AO_HAVE_char_fetch_and_add_full
  64. AO_INLINE unsigned short
  65. AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
  66. {
  67. unsigned short result;
  68. __asm__ __volatile__ ("lock; xaddw %0, %1"
  69. : "=r" (result), "+m" (*p)
  70. : "0" (incr)
  71. : "memory");
  72. return result;
  73. }
  74. #define AO_HAVE_short_fetch_and_add_full
  75. #ifndef AO_PREFER_GENERALIZED
  76. /* Really only works for 486 and later */
  77. AO_INLINE void
  78. AO_and_full (volatile AO_t *p, AO_t value)
  79. {
  80. __asm__ __volatile__ ("lock; and %1, %0"
  81. : "+m" (*p)
  82. : "r" (value)
  83. : "memory");
  84. }
  85. # define AO_HAVE_and_full
  86. AO_INLINE void
  87. AO_or_full (volatile AO_t *p, AO_t value)
  88. {
  89. __asm__ __volatile__ ("lock; or %1, %0"
  90. : "+m" (*p)
  91. : "r" (value)
  92. : "memory");
  93. }
  94. # define AO_HAVE_or_full
  95. AO_INLINE void
  96. AO_xor_full (volatile AO_t *p, AO_t value)
  97. {
  98. __asm__ __volatile__ ("lock; xor %1, %0"
  99. : "+m" (*p)
  100. : "r" (value)
  101. : "memory");
  102. }
  103. # define AO_HAVE_xor_full
  104. #endif /* !AO_PREFER_GENERALIZED */
  105. AO_INLINE AO_TS_VAL_t
  106. AO_test_and_set_full (volatile AO_TS_t *addr)
  107. {
  108. AO_TS_t oldval;
  109. /* Note: the "xchg" instruction does not need a "lock" prefix */
  110. __asm__ __volatile__ ("xchg %b0, %1"
  111. : "=q" (oldval), "+m" (*addr)
  112. : "0" (0xff)
  113. : "memory");
  114. return (AO_TS_VAL_t)oldval;
  115. }
  116. #define AO_HAVE_test_and_set_full
  117. #ifndef AO_GENERALIZE_ASM_BOOL_CAS
  118. /* Returns nonzero if the comparison succeeded. */
  119. AO_INLINE int
  120. AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
  121. {
  122. char result;
  123. __asm__ __volatile__ ("lock; cmpxchg %2, %0; setz %1"
  124. : "+m" (*addr), "=a" (result)
  125. : "r" (new_val), "a" (old)
  126. : "memory");
  127. return (int) result;
  128. }
  129. # define AO_HAVE_compare_and_swap_full
  130. #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
  131. AO_INLINE AO_t
  132. AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
  133. AO_t new_val)
  134. {
  135. AO_t fetched_val;
  136. __asm__ __volatile__ ("lock; cmpxchg %2, %0"
  137. : "+m" (*addr), "=a" (fetched_val)
  138. : "r" (new_val), "a" (old_val)
  139. : "memory");
  140. return fetched_val;
  141. }
  142. #define AO_HAVE_fetch_compare_and_swap_full
  143. #if defined(__i386)
  144. # ifndef AO_NO_CMPXCHG8B
  145. # include "../standard_ao_double_t.h"
  146. /* Reading or writing a quadword aligned on a 64-bit boundary is */
  147. /* always carried out atomically (requires at least a Pentium). */
  148. # define AO_ACCESS_double_CHECK_ALIGNED
  149. # include "../loadstore/double_atomic_load_store.h"
  150. /* Returns nonzero if the comparison succeeded. */
  151. /* Really requires at least a Pentium. */
  152. AO_INLINE int
  153. AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
  154. AO_t old_val1, AO_t old_val2,
  155. AO_t new_val1, AO_t new_val2)
  156. {
  157. char result;
  158. __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
  159. : "+m" (*addr), "=a" (result)
  160. : "d" (old_val2), "a" (old_val1),
  161. "c" (new_val2), "b" (new_val1)
  162. : "memory");
  163. return (int) result;
  164. }
  165. # define AO_HAVE_compare_double_and_swap_double_full
  166. # endif /* !AO_NO_CMPXCHG8B */
  167. # define AO_T_IS_INT
  168. #else /* x64 */
  169. AO_INLINE unsigned int
  170. AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
  171. {
  172. unsigned int result;
  173. __asm__ __volatile__ ("lock; xaddl %0, %1"
  174. : "=r" (result), "+m" (*p)
  175. : "0" (incr)
  176. : "memory");
  177. return result;
  178. }
  179. # define AO_HAVE_int_fetch_and_add_full
  180. # ifdef AO_CMPXCHG16B_AVAILABLE
  181. # include "../standard_ao_double_t.h"
  182. /* Older AMD Opterons are missing this instruction (SIGILL should */
  183. /* be thrown in this case). */
  184. AO_INLINE int
  185. AO_compare_double_and_swap_double_full (volatile AO_double_t *addr,
  186. AO_t old_val1, AO_t old_val2,
  187. AO_t new_val1, AO_t new_val2)
  188. {
  189. char result;
  190. __asm__ __volatile__ ("lock; cmpxchg16b %0; setz %1"
  191. : "+m" (*addr), "=a" (result)
  192. : "d" (old_val2), "a" (old_val1),
  193. "c" (new_val2), "b" (new_val1)
  194. : "memory");
  195. return (int) result;
  196. }
  197. # define AO_HAVE_compare_double_and_swap_double_full
  198. # endif /* !AO_CMPXCHG16B_AVAILABLE */
  199. #endif /* x64 */
  200. /* Real X86 implementations, except for some old 32-bit WinChips, */
  201. /* appear to enforce ordering between memory operations, EXCEPT that */
  202. /* a later read can pass earlier writes, presumably due to the visible */
  203. /* presence of store buffers. */
  204. /* We ignore both the WinChips and the fact that the official specs */
  205. /* seem to be much weaker (and arguably too weak to be usable). */
  206. #include "../ordered_except_wr.h"