ia64.h 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to deal
  6. * in the Software without restriction, including without limitation the rights
  7. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. * copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. * SOFTWARE.
  21. */
  22. #include "../all_atomic_load_store.h"
  23. #include "../all_acquire_release_volatile.h"
  24. #include "../test_and_set_t_is_char.h"
  25. #ifdef _ILP32
  26. /* 32-bit HP/UX code. */
  27. /* This requires pointer "swizzling". Pointers need to be expanded */
  28. /* to 64 bits using the addp4 instruction before use. This makes it */
  29. /* hard to share code, but we try anyway. */
  30. # define AO_LEN "4"
  31. /* We assume that addr always appears in argument position 1 in asm */
  32. /* code. If it is clobbered due to swizzling, we also need it in */
  33. /* second position. Any later arguments are referenced symbolically, */
  34. /* so that we don't have to worry about their position. This requires*/
  35. /* gcc 3.1, but you shouldn't be using anything older than that on */
  36. /* IA64 anyway. */
  37. /* The AO_MASK macro is a workaround for the fact that HP/UX gcc */
  38. /* appears to otherwise store 64-bit pointers in ar.ccv, i.e. it */
  39. /* doesn't appear to clear high bits in a pointer value we pass into */
  40. /* assembly code, even if it is supposedly of type AO_t. */
  41. # define AO_IN_ADDR "1"(addr)
  42. # define AO_OUT_ADDR , "=r"(addr)
  43. # define AO_SWIZZLE "addp4 %1=0,%1;;\n"
  44. # define AO_MASK(ptr) __asm__ __volatile__("zxt4 %1=%1": "=r"(ptr) : "0"(ptr))
  45. #else
  46. # define AO_LEN "8"
  47. # define AO_IN_ADDR "r"(addr)
  48. # define AO_OUT_ADDR
  49. # define AO_SWIZZLE
  50. # define AO_MASK(ptr) /* empty */
  51. #endif /* !_ILP32 */
  52. AO_INLINE void
  53. AO_nop_full(void)
  54. {
  55. __asm__ __volatile__("mf" : : : "memory");
  56. }
  57. #define AO_HAVE_nop_full
  58. #ifndef AO_PREFER_GENERALIZED
  59. AO_INLINE AO_t
  60. AO_fetch_and_add1_acquire (volatile AO_t *addr)
  61. {
  62. AO_t result;
  63. __asm__ __volatile__ (AO_SWIZZLE
  64. "fetchadd" AO_LEN ".acq %0=[%1],1":
  65. "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
  66. return result;
  67. }
  68. #define AO_HAVE_fetch_and_add1_acquire
  69. AO_INLINE AO_t
  70. AO_fetch_and_add1_release (volatile AO_t *addr)
  71. {
  72. AO_t result;
  73. __asm__ __volatile__ (AO_SWIZZLE
  74. "fetchadd" AO_LEN ".rel %0=[%1],1":
  75. "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
  76. return result;
  77. }
  78. #define AO_HAVE_fetch_and_add1_release
  79. AO_INLINE AO_t
  80. AO_fetch_and_sub1_acquire (volatile AO_t *addr)
  81. {
  82. AO_t result;
  83. __asm__ __volatile__ (AO_SWIZZLE
  84. "fetchadd" AO_LEN ".acq %0=[%1],-1":
  85. "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
  86. return result;
  87. }
  88. #define AO_HAVE_fetch_and_sub1_acquire
  89. AO_INLINE AO_t
  90. AO_fetch_and_sub1_release (volatile AO_t *addr)
  91. {
  92. AO_t result;
  93. __asm__ __volatile__ (AO_SWIZZLE
  94. "fetchadd" AO_LEN ".rel %0=[%1],-1":
  95. "=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
  96. return result;
  97. }
  98. #define AO_HAVE_fetch_and_sub1_release
  99. #endif /* !AO_PREFER_GENERALIZED */
  100. AO_INLINE AO_t
  101. AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
  102. {
  103. AO_t fetched_val;
  104. AO_MASK(old);
  105. __asm__ __volatile__(AO_SWIZZLE
  106. "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
  107. ".acq %0=[%1],%[new_val],ar.ccv"
  108. : "=r"(fetched_val) AO_OUT_ADDR
  109. : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
  110. : "memory");
  111. return fetched_val;
  112. }
  113. #define AO_HAVE_fetch_compare_and_swap_acquire
  114. AO_INLINE AO_t
  115. AO_fetch_compare_and_swap_release(volatile AO_t *addr, AO_t old, AO_t new_val)
  116. {
  117. AO_t fetched_val;
  118. AO_MASK(old);
  119. __asm__ __volatile__(AO_SWIZZLE
  120. "mov ar.ccv=%[old] ;; cmpxchg" AO_LEN
  121. ".rel %0=[%1],%[new_val],ar.ccv"
  122. : "=r"(fetched_val) AO_OUT_ADDR
  123. : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"(old)
  124. : "memory");
  125. return fetched_val;
  126. }
  127. #define AO_HAVE_fetch_compare_and_swap_release
  128. AO_INLINE unsigned char
  129. AO_char_fetch_compare_and_swap_acquire(volatile unsigned char *addr,
  130. unsigned char old, unsigned char new_val)
  131. {
  132. unsigned char fetched_val;
  133. __asm__ __volatile__(AO_SWIZZLE
  134. "mov ar.ccv=%[old] ;; cmpxchg1.acq %0=[%1],%[new_val],ar.ccv"
  135. : "=r"(fetched_val) AO_OUT_ADDR
  136. : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
  137. : "memory");
  138. return fetched_val;
  139. }
  140. #define AO_HAVE_char_fetch_compare_and_swap_acquire
  141. AO_INLINE unsigned char
  142. AO_char_fetch_compare_and_swap_release(volatile unsigned char *addr,
  143. unsigned char old, unsigned char new_val)
  144. {
  145. unsigned char fetched_val;
  146. __asm__ __volatile__(AO_SWIZZLE
  147. "mov ar.ccv=%[old] ;; cmpxchg1.rel %0=[%1],%[new_val],ar.ccv"
  148. : "=r"(fetched_val) AO_OUT_ADDR
  149. : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
  150. : "memory");
  151. return fetched_val;
  152. }
  153. #define AO_HAVE_char_fetch_compare_and_swap_release
  154. AO_INLINE unsigned short
  155. AO_short_fetch_compare_and_swap_acquire(volatile unsigned short *addr,
  156. unsigned short old, unsigned short new_val)
  157. {
  158. unsigned short fetched_val;
  159. __asm__ __volatile__(AO_SWIZZLE
  160. "mov ar.ccv=%[old] ;; cmpxchg2.acq %0=[%1],%[new_val],ar.ccv"
  161. : "=r"(fetched_val) AO_OUT_ADDR
  162. : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
  163. : "memory");
  164. return fetched_val;
  165. }
  166. #define AO_HAVE_short_fetch_compare_and_swap_acquire
  167. AO_INLINE unsigned short
  168. AO_short_fetch_compare_and_swap_release(volatile unsigned short *addr,
  169. unsigned short old, unsigned short new_val)
  170. {
  171. unsigned short fetched_val;
  172. __asm__ __volatile__(AO_SWIZZLE
  173. "mov ar.ccv=%[old] ;; cmpxchg2.rel %0=[%1],%[new_val],ar.ccv"
  174. : "=r"(fetched_val) AO_OUT_ADDR
  175. : AO_IN_ADDR, [new_val]"r"(new_val), [old]"r"((AO_t)old)
  176. : "memory");
  177. return fetched_val;
  178. }
  179. #define AO_HAVE_short_fetch_compare_and_swap_release
  180. #ifdef _ILP32
  181. # define AO_T_IS_INT
  182. /* TODO: Add compare_double_and_swap_double for the _ILP32 case. */
  183. #else
  184. # ifndef AO_PREFER_GENERALIZED
  185. AO_INLINE unsigned int
  186. AO_int_fetch_and_add1_acquire(volatile unsigned int *addr)
  187. {
  188. unsigned int result;
  189. __asm__ __volatile__("fetchadd4.acq %0=[%1],1"
  190. : "=r" (result) : AO_IN_ADDR
  191. : "memory");
  192. return result;
  193. }
  194. # define AO_HAVE_int_fetch_and_add1_acquire
  195. AO_INLINE unsigned int
  196. AO_int_fetch_and_add1_release(volatile unsigned int *addr)
  197. {
  198. unsigned int result;
  199. __asm__ __volatile__("fetchadd4.rel %0=[%1],1"
  200. : "=r" (result) : AO_IN_ADDR
  201. : "memory");
  202. return result;
  203. }
  204. # define AO_HAVE_int_fetch_and_add1_release
  205. AO_INLINE unsigned int
  206. AO_int_fetch_and_sub1_acquire(volatile unsigned int *addr)
  207. {
  208. unsigned int result;
  209. __asm__ __volatile__("fetchadd4.acq %0=[%1],-1"
  210. : "=r" (result) : AO_IN_ADDR
  211. : "memory");
  212. return result;
  213. }
  214. # define AO_HAVE_int_fetch_and_sub1_acquire
  215. AO_INLINE unsigned int
  216. AO_int_fetch_and_sub1_release(volatile unsigned int *addr)
  217. {
  218. unsigned int result;
  219. __asm__ __volatile__("fetchadd4.rel %0=[%1],-1"
  220. : "=r" (result) : AO_IN_ADDR
  221. : "memory");
  222. return result;
  223. }
  224. # define AO_HAVE_int_fetch_and_sub1_release
  225. # endif /* !AO_PREFER_GENERALIZED */
  226. AO_INLINE unsigned int
  227. AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr,
  228. unsigned int old, unsigned int new_val)
  229. {
  230. unsigned int fetched_val;
  231. __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.acq %0=[%1],%2,ar.ccv"
  232. : "=r"(fetched_val)
  233. : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old)
  234. : "memory");
  235. return fetched_val;
  236. }
  237. # define AO_HAVE_int_fetch_compare_and_swap_acquire
  238. AO_INLINE unsigned int
  239. AO_int_fetch_compare_and_swap_release(volatile unsigned int *addr,
  240. unsigned int old, unsigned int new_val)
  241. {
  242. unsigned int fetched_val;
  243. __asm__ __volatile__("mov ar.ccv=%3 ;; cmpxchg4.rel %0=[%1],%2,ar.ccv"
  244. : "=r"(fetched_val)
  245. : AO_IN_ADDR, "r"(new_val), "r"((AO_t)old)
  246. : "memory");
  247. return fetched_val;
  248. }
  249. # define AO_HAVE_int_fetch_compare_and_swap_release
  250. #endif /* !_ILP32 */
  251. /* TODO: Add compare_and_swap_double as soon as there is widely */
  252. /* available hardware that implements it. */