x86.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /*
  2. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  3. * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
  4. * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
  5. *
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. *
  16. * Some of the machine specific code was borrowed from our GC distribution.
  17. */
  18. /* The following really assume we have a 486 or better. Unfortunately */
  19. /* gcc doesn't define a suitable feature test macro based on command */
  20. /* line options. */
  21. /* We should perhaps test dynamically. */
  22. #include "../all_aligned_atomic_load_store.h"
  23. #include "../test_and_set_t_is_char.h"
  24. #if defined(__SSE2__) && !defined(AO_USE_PENTIUM4_INSTRS)
  25. /* "mfence" is a part of SSE2 set (introduced on Intel Pentium 4). */
  26. # define AO_USE_PENTIUM4_INSTRS
  27. #endif
  28. #if defined(AO_USE_PENTIUM4_INSTRS)
  29. AO_INLINE void
  30. AO_nop_full(void)
  31. {
  32. __asm__ __volatile__("mfence" : : : "memory");
  33. }
  34. # define AO_HAVE_nop_full
  35. #else
  36. /* We could use the cpuid instruction. But that seems to be slower */
  37. /* than the default implementation based on test_and_set_full. Thus */
  38. /* we omit that bit of misinformation here. */
  39. #endif /* !AO_USE_PENTIUM4_INSTRS */
  40. /* As far as we can tell, the lfence and sfence instructions are not */
  41. /* currently needed or useful for cached memory accesses. */
  42. /* Really only works for 486 and later */
  43. #ifndef AO_PREFER_GENERALIZED
  44. AO_INLINE AO_t
  45. AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
  46. {
  47. AO_t result;
  48. __asm__ __volatile__ ("lock; xadd %0, %1" :
  49. "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
  50. : "memory");
  51. return result;
  52. }
  53. # define AO_HAVE_fetch_and_add_full
  54. #endif /* !AO_PREFER_GENERALIZED */
  55. AO_INLINE unsigned char
  56. AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
  57. {
  58. unsigned char result;
  59. __asm__ __volatile__ ("lock; xaddb %0, %1" :
  60. "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
  61. : "memory");
  62. return result;
  63. }
  64. #define AO_HAVE_char_fetch_and_add_full
  65. AO_INLINE unsigned short
  66. AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
  67. {
  68. unsigned short result;
  69. __asm__ __volatile__ ("lock; xaddw %0, %1" :
  70. "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
  71. : "memory");
  72. return result;
  73. }
  74. #define AO_HAVE_short_fetch_and_add_full
  75. #ifndef AO_PREFER_GENERALIZED
  76. /* Really only works for 486 and later */
  77. AO_INLINE void
  78. AO_and_full (volatile AO_t *p, AO_t value)
  79. {
  80. __asm__ __volatile__ ("lock; and %1, %0" :
  81. "=m" (*p) : "r" (value), "m" (*p)
  82. : "memory");
  83. }
  84. # define AO_HAVE_and_full
  85. AO_INLINE void
  86. AO_or_full (volatile AO_t *p, AO_t value)
  87. {
  88. __asm__ __volatile__ ("lock; or %1, %0" :
  89. "=m" (*p) : "r" (value), "m" (*p)
  90. : "memory");
  91. }
  92. # define AO_HAVE_or_full
  93. AO_INLINE void
  94. AO_xor_full (volatile AO_t *p, AO_t value)
  95. {
  96. __asm__ __volatile__ ("lock; xor %1, %0" :
  97. "=m" (*p) : "r" (value), "m" (*p)
  98. : "memory");
  99. }
  100. # define AO_HAVE_xor_full
  101. /* AO_store_full could be implemented directly using "xchg" but it */
  102. /* could be generalized efficiently as an ordinary store accomplished */
  103. /* with AO_nop_full ("mfence" instruction). */
  104. #endif /* !AO_PREFER_GENERALIZED */
  105. AO_INLINE AO_TS_VAL_t
  106. AO_test_and_set_full(volatile AO_TS_t *addr)
  107. {
  108. unsigned char oldval;
  109. /* Note: the "xchg" instruction does not need a "lock" prefix */
  110. __asm__ __volatile__ ("xchgb %0, %1"
  111. : "=q" (oldval), "=m" (*addr)
  112. : "0" ((unsigned char)0xff), "m" (*addr)
  113. : "memory");
  114. return (AO_TS_VAL_t)oldval;
  115. }
  116. #define AO_HAVE_test_and_set_full
  117. #ifndef AO_GENERALIZE_ASM_BOOL_CAS
  118. /* Returns nonzero if the comparison succeeded. */
  119. AO_INLINE int
  120. AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
  121. {
  122. # ifdef AO_USE_SYNC_CAS_BUILTIN
  123. return (int)__sync_bool_compare_and_swap(addr, old, new_val
  124. /* empty protection list */);
  125. /* Note: an empty list of variables protected by the */
  126. /* memory barrier should mean all globally accessible */
  127. /* variables are protected. */
  128. # else
  129. char result;
  130. __asm__ __volatile__ ("lock; cmpxchg %3, %0; setz %1"
  131. : "=m" (*addr), "=a" (result)
  132. : "m" (*addr), "r" (new_val), "a" (old)
  133. : "memory");
  134. return (int)result;
  135. # endif
  136. }
  137. # define AO_HAVE_compare_and_swap_full
  138. #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
  139. AO_INLINE AO_t
  140. AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
  141. AO_t new_val)
  142. {
  143. # ifdef AO_USE_SYNC_CAS_BUILTIN
  144. return __sync_val_compare_and_swap(addr, old_val, new_val
  145. /* empty protection list */);
  146. # else
  147. AO_t fetched_val;
  148. __asm__ __volatile__ ("lock; cmpxchg %3, %4"
  149. : "=a" (fetched_val), "=m" (*addr)
  150. : "a" (old_val), "r" (new_val), "m" (*addr)
  151. : "memory");
  152. return fetched_val;
  153. # endif
  154. }
  155. #define AO_HAVE_fetch_compare_and_swap_full
  156. #if !defined(__x86_64__) && !defined(AO_USE_SYNC_CAS_BUILTIN)
  157. # include "../standard_ao_double_t.h"
  158. /* Reading or writing a quadword aligned on a 64-bit boundary is */
  159. /* always carried out atomically on at least a Pentium according to */
  160. /* Chapter 8.1.1 of Volume 3A Part 1 of Intel processor manuals. */
  161. # define AO_ACCESS_double_CHECK_ALIGNED
  162. # include "../loadstore/double_atomic_load_store.h"
  163. /* Returns nonzero if the comparison succeeded. */
  164. /* Really requires at least a Pentium. */
  165. AO_INLINE int
  166. AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
  167. AO_t old_val1, AO_t old_val2,
  168. AO_t new_val1, AO_t new_val2)
  169. {
  170. char result;
  171. # ifdef __PIC__
  172. AO_t saved_ebx;
  173. /* If PIC is turned on, we cannot use ebx as it is reserved for the */
  174. /* GOT pointer. We should save and restore ebx. The proposed */
  175. /* solution is not so efficient as the older alternatives using */
  176. /* push ebx or edi as new_val1 (w/o clobbering edi and temporary */
  177. /* local variable usage) but it is more portable (it works even if */
  178. /* ebx is not used as GOT pointer, and it works for the buggy GCC */
  179. /* releases that incorrectly evaluate memory operands offset in the */
  180. /* inline assembly after push). */
  181. # ifdef __OPTIMIZE__
  182. __asm__ __volatile__("mov %%ebx, %2\n\t" /* save ebx */
  183. "lea %0, %%edi\n\t" /* in case addr is in ebx */
  184. "mov %7, %%ebx\n\t" /* load new_val1 */
  185. "lock; cmpxchg8b (%%edi)\n\t"
  186. "mov %2, %%ebx\n\t" /* restore ebx */
  187. "setz %1"
  188. : "=m" (*addr), "=a" (result), "=m" (saved_ebx)
  189. : "m" (*addr), "d" (old_val2), "a" (old_val1),
  190. "c" (new_val2), "m" (new_val1)
  191. : "%edi", "memory");
  192. # else
  193. /* A less-efficient code manually preserving edi if GCC invoked */
  194. /* with -O0 option (otherwise it fails while finding a register */
  195. /* in class 'GENERAL_REGS'). */
  196. AO_t saved_edi;
  197. __asm__ __volatile__("mov %%edi, %3\n\t" /* save edi */
  198. "mov %%ebx, %2\n\t" /* save ebx */
  199. "lea %0, %%edi\n\t" /* in case addr is in ebx */
  200. "mov %8, %%ebx\n\t" /* load new_val1 */
  201. "lock; cmpxchg8b (%%edi)\n\t"
  202. "mov %2, %%ebx\n\t" /* restore ebx */
  203. "mov %3, %%edi\n\t" /* restore edi */
  204. "setz %1"
  205. : "=m" (*addr), "=a" (result),
  206. "=m" (saved_ebx), "=m" (saved_edi)
  207. : "m" (*addr), "d" (old_val2), "a" (old_val1),
  208. "c" (new_val2), "m" (new_val1) : "memory");
  209. # endif
  210. # else
  211. /* For non-PIC mode, this operation could be simplified (and be */
  212. /* faster) by using ebx as new_val1 (GCC would refuse to compile */
  213. /* such code for PIC mode). */
  214. __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
  215. : "=m" (*addr), "=a" (result)
  216. : "m" (*addr), "d" (old_val2), "a" (old_val1),
  217. "c" (new_val2), "b" (new_val1)
  218. : "memory");
  219. # endif
  220. return (int) result;
  221. }
  222. # define AO_HAVE_compare_double_and_swap_double_full
  223. # define AO_T_IS_INT
  224. #elif defined(__ILP32__) || !defined(__x86_64__)
  225. # include "../standard_ao_double_t.h"
  226. /* Reading or writing a quadword aligned on a 64-bit boundary is */
  227. /* always carried out atomically (requires at least a Pentium). */
  228. # define AO_ACCESS_double_CHECK_ALIGNED
  229. # include "../loadstore/double_atomic_load_store.h"
  230. /* X32 has native support for 64-bit integer operations (AO_double_t */
  231. /* is a 64-bit integer and we could use 64-bit cmpxchg). */
  232. /* This primitive is used by compare_double_and_swap_double_full. */
  233. AO_INLINE int
  234. AO_double_compare_and_swap_full(volatile AO_double_t *addr,
  235. AO_double_t old_val, AO_double_t new_val)
  236. {
  237. /* It is safe to use __sync CAS built-in here. */
  238. return __sync_bool_compare_and_swap(&addr->AO_whole,
  239. old_val.AO_whole, new_val.AO_whole
  240. /* empty protection list */);
  241. }
  242. # define AO_HAVE_double_compare_and_swap_full
  243. # define AO_T_IS_INT
  244. #else /* 64-bit */
  245. AO_INLINE unsigned int
  246. AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
  247. {
  248. unsigned int result;
  249. __asm__ __volatile__ ("lock; xaddl %0, %1"
  250. : "=r" (result), "=m" (*p)
  251. : "0" (incr), "m" (*p)
  252. : "memory");
  253. return result;
  254. }
  255. # define AO_HAVE_int_fetch_and_add_full
  256. /* The Intel and AMD Architecture Programmer Manuals state roughly */
  257. /* the following: */
  258. /* - CMPXCHG16B (with a LOCK prefix) can be used to perform 16-byte */
  259. /* atomic accesses in 64-bit mode (with certain alignment */
  260. /* restrictions); */
  261. /* - SSE instructions that access data larger than a quadword (like */
  262. /* MOVDQA) may be implemented using multiple memory accesses; */
  263. /* - LOCK prefix causes an invalid-opcode exception when used with */
  264. /* 128-bit media (SSE) instructions. */
  265. /* Thus, currently, the only way to implement lock-free double_load */
  266. /* and double_store on x86_64 is to use CMPXCHG16B (if available). */
  267. /* TODO: Test some gcc macro to detect presence of cmpxchg16b. */
  268. # ifdef AO_CMPXCHG16B_AVAILABLE
  269. # include "../standard_ao_double_t.h"
  270. /* NEC LE-IT: older AMD Opterons are missing this instruction. */
  271. /* On these machines SIGILL will be thrown. */
  272. /* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated (lock */
  273. /* based) version available. */
  274. /* HB: Changed this to not define either by default. There are */
  275. /* enough machines and tool chains around on which cmpxchg16b */
  276. /* doesn't work. And the emulation is unsafe by our usual rules. */
  277. /* However both are clearly useful in certain cases. */
  278. AO_INLINE int
  279. AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
  280. AO_t old_val1, AO_t old_val2,
  281. AO_t new_val1, AO_t new_val2)
  282. {
  283. char result;
  284. __asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
  285. : "=m"(*addr), "=a"(result)
  286. : "m"(*addr), "d" (old_val2), "a" (old_val1),
  287. "c" (new_val2), "b" (new_val1)
  288. : "memory");
  289. return (int) result;
  290. }
  291. # define AO_HAVE_compare_double_and_swap_double_full
  292. # elif defined(AO_WEAK_DOUBLE_CAS_EMULATION)
  293. # include "../standard_ao_double_t.h"
  294. /* This one provides spinlock based emulation of CAS implemented in */
  295. /* atomic_ops.c. We probably do not want to do this here, since it */
  296. /* is not atomic with respect to other kinds of updates of *addr. */
  297. /* On the other hand, this may be a useful facility on occasion. */
  298. int AO_compare_double_and_swap_double_emulation(
  299. volatile AO_double_t *addr,
  300. AO_t old_val1, AO_t old_val2,
  301. AO_t new_val1, AO_t new_val2);
  302. AO_INLINE int
  303. AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
  304. AO_t old_val1, AO_t old_val2,
  305. AO_t new_val1, AO_t new_val2)
  306. {
  307. return AO_compare_double_and_swap_double_emulation(addr,
  308. old_val1, old_val2, new_val1, new_val2);
  309. }
  310. # define AO_HAVE_compare_double_and_swap_double_full
  311. # endif /* AO_WEAK_DOUBLE_CAS_EMULATION && !AO_CMPXCHG16B_AVAILABLE */
  312. #endif /* x86_64 && !ILP32 */
  313. /* Real X86 implementations, except for some old 32-bit WinChips, */
  314. /* appear to enforce ordering between memory operations, EXCEPT that */
  315. /* a later read can pass earlier writes, presumably due to the visible */
  316. /* presence of store buffers. */
  317. /* We ignore both the WinChips and the fact that the official specs */
  318. /* seem to be much weaker (and arguably too weak to be usable). */
  319. #include "../ordered_except_wr.h"