generic_pthread.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434
  1. /*
  2. * Copyright (c) 2003 by Hewlett-Packard Company. All rights reserved.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a copy
  5. * of this software and associated documentation files (the "Software"), to deal
  6. * in the Software without restriction, including without limitation the rights
  7. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  8. * copies of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  17. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  19. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. * SOFTWARE.
  21. */
  22. /* The following is useful primarily for debugging and documentation. */
  23. /* We define various atomic operations by acquiring a global pthread */
  24. /* lock. The resulting implementation will perform poorly, but should */
  25. /* be correct unless it is used from signal handlers. */
  26. /* We assume that all pthread operations act like full memory barriers. */
  27. /* (We believe that is the intent of the specification.) */
  28. #include <pthread.h>
  29. #include "test_and_set_t_is_ao_t.h"
  30. /* This is not necessarily compatible with the native */
  31. /* implementation. But those can't be safely mixed anyway. */
  32. /* We define only the full barrier variants, and count on the */
  33. /* generalization section below to fill in the rest. */
  34. extern pthread_mutex_t AO_pt_lock;
  35. AO_INLINE void
  36. AO_nop_full(void)
  37. {
  38. pthread_mutex_lock(&AO_pt_lock);
  39. pthread_mutex_unlock(&AO_pt_lock);
  40. }
  41. #define AO_HAVE_nop_full
  42. AO_INLINE AO_t
  43. AO_load_full(const volatile AO_t *addr)
  44. {
  45. AO_t result;
  46. pthread_mutex_lock(&AO_pt_lock);
  47. result = *addr;
  48. pthread_mutex_unlock(&AO_pt_lock);
  49. return result;
  50. }
  51. #define AO_HAVE_load_full
  52. AO_INLINE void
  53. AO_store_full(volatile AO_t *addr, AO_t val)
  54. {
  55. pthread_mutex_lock(&AO_pt_lock);
  56. *addr = val;
  57. pthread_mutex_unlock(&AO_pt_lock);
  58. }
  59. #define AO_HAVE_store_full
  60. AO_INLINE unsigned char
  61. AO_char_load_full(const volatile unsigned char *addr)
  62. {
  63. unsigned char result;
  64. pthread_mutex_lock(&AO_pt_lock);
  65. result = *addr;
  66. pthread_mutex_unlock(&AO_pt_lock);
  67. return result;
  68. }
  69. #define AO_HAVE_char_load_full
  70. AO_INLINE void
  71. AO_char_store_full(volatile unsigned char *addr, unsigned char val)
  72. {
  73. pthread_mutex_lock(&AO_pt_lock);
  74. *addr = val;
  75. pthread_mutex_unlock(&AO_pt_lock);
  76. }
  77. #define AO_HAVE_char_store_full
  78. AO_INLINE unsigned short
  79. AO_short_load_full(const volatile unsigned short *addr)
  80. {
  81. unsigned short result;
  82. pthread_mutex_lock(&AO_pt_lock);
  83. result = *addr;
  84. pthread_mutex_unlock(&AO_pt_lock);
  85. return result;
  86. }
  87. #define AO_HAVE_short_load_full
  88. AO_INLINE void
  89. AO_short_store_full(volatile unsigned short *addr, unsigned short val)
  90. {
  91. pthread_mutex_lock(&AO_pt_lock);
  92. *addr = val;
  93. pthread_mutex_unlock(&AO_pt_lock);
  94. }
  95. #define AO_HAVE_short_store_full
  96. AO_INLINE unsigned int
  97. AO_int_load_full(const volatile unsigned int *addr)
  98. {
  99. unsigned int result;
  100. pthread_mutex_lock(&AO_pt_lock);
  101. result = *addr;
  102. pthread_mutex_unlock(&AO_pt_lock);
  103. return result;
  104. }
  105. #define AO_HAVE_int_load_full
  106. AO_INLINE void
  107. AO_int_store_full(volatile unsigned int *addr, unsigned int val)
  108. {
  109. pthread_mutex_lock(&AO_pt_lock);
  110. *addr = val;
  111. pthread_mutex_unlock(&AO_pt_lock);
  112. }
  113. #define AO_HAVE_int_store_full
  114. AO_INLINE AO_TS_VAL_t
  115. AO_test_and_set_full(volatile AO_TS_t *addr)
  116. {
  117. AO_TS_VAL_t result;
  118. pthread_mutex_lock(&AO_pt_lock);
  119. result = (AO_TS_VAL_t)(*addr);
  120. *addr = AO_TS_SET;
  121. pthread_mutex_unlock(&AO_pt_lock);
  122. assert(result == AO_TS_SET || result == AO_TS_CLEAR);
  123. return result;
  124. }
  125. #define AO_HAVE_test_and_set_full
  126. AO_INLINE AO_t
  127. AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
  128. {
  129. AO_t old_val;
  130. pthread_mutex_lock(&AO_pt_lock);
  131. old_val = *p;
  132. *p = old_val + incr;
  133. pthread_mutex_unlock(&AO_pt_lock);
  134. return old_val;
  135. }
  136. #define AO_HAVE_fetch_and_add_full
  137. AO_INLINE unsigned char
  138. AO_char_fetch_and_add_full(volatile unsigned char *p, unsigned char incr)
  139. {
  140. unsigned char old_val;
  141. pthread_mutex_lock(&AO_pt_lock);
  142. old_val = *p;
  143. *p = old_val + incr;
  144. pthread_mutex_unlock(&AO_pt_lock);
  145. return old_val;
  146. }
  147. #define AO_HAVE_char_fetch_and_add_full
  148. AO_INLINE unsigned short
  149. AO_short_fetch_and_add_full(volatile unsigned short *p, unsigned short incr)
  150. {
  151. unsigned short old_val;
  152. pthread_mutex_lock(&AO_pt_lock);
  153. old_val = *p;
  154. *p = old_val + incr;
  155. pthread_mutex_unlock(&AO_pt_lock);
  156. return old_val;
  157. }
  158. #define AO_HAVE_short_fetch_and_add_full
  159. AO_INLINE unsigned int
  160. AO_int_fetch_and_add_full(volatile unsigned int *p, unsigned int incr)
  161. {
  162. unsigned int old_val;
  163. pthread_mutex_lock(&AO_pt_lock);
  164. old_val = *p;
  165. *p = old_val + incr;
  166. pthread_mutex_unlock(&AO_pt_lock);
  167. return old_val;
  168. }
  169. #define AO_HAVE_int_fetch_and_add_full
  170. AO_INLINE void
  171. AO_and_full(volatile AO_t *p, AO_t value)
  172. {
  173. pthread_mutex_lock(&AO_pt_lock);
  174. *p &= value;
  175. pthread_mutex_unlock(&AO_pt_lock);
  176. }
  177. #define AO_HAVE_and_full
  178. AO_INLINE void
  179. AO_or_full(volatile AO_t *p, AO_t value)
  180. {
  181. pthread_mutex_lock(&AO_pt_lock);
  182. *p |= value;
  183. pthread_mutex_unlock(&AO_pt_lock);
  184. }
  185. #define AO_HAVE_or_full
  186. AO_INLINE void
  187. AO_xor_full(volatile AO_t *p, AO_t value)
  188. {
  189. pthread_mutex_lock(&AO_pt_lock);
  190. *p ^= value;
  191. pthread_mutex_unlock(&AO_pt_lock);
  192. }
  193. #define AO_HAVE_xor_full
  194. AO_INLINE void
  195. AO_char_and_full(volatile unsigned char *p, unsigned char value)
  196. {
  197. pthread_mutex_lock(&AO_pt_lock);
  198. *p &= value;
  199. pthread_mutex_unlock(&AO_pt_lock);
  200. }
  201. #define AO_HAVE_char_and_full
  202. AO_INLINE void
  203. AO_char_or_full(volatile unsigned char *p, unsigned char value)
  204. {
  205. pthread_mutex_lock(&AO_pt_lock);
  206. *p |= value;
  207. pthread_mutex_unlock(&AO_pt_lock);
  208. }
  209. #define AO_HAVE_char_or_full
  210. AO_INLINE void
  211. AO_char_xor_full(volatile unsigned char *p, unsigned char value)
  212. {
  213. pthread_mutex_lock(&AO_pt_lock);
  214. *p ^= value;
  215. pthread_mutex_unlock(&AO_pt_lock);
  216. }
  217. #define AO_HAVE_char_xor_full
  218. AO_INLINE void
  219. AO_short_and_full(volatile unsigned short *p, unsigned short value)
  220. {
  221. pthread_mutex_lock(&AO_pt_lock);
  222. *p &= value;
  223. pthread_mutex_unlock(&AO_pt_lock);
  224. }
  225. #define AO_HAVE_short_and_full
  226. AO_INLINE void
  227. AO_short_or_full(volatile unsigned short *p, unsigned short value)
  228. {
  229. pthread_mutex_lock(&AO_pt_lock);
  230. *p |= value;
  231. pthread_mutex_unlock(&AO_pt_lock);
  232. }
  233. #define AO_HAVE_short_or_full
  234. AO_INLINE void
  235. AO_short_xor_full(volatile unsigned short *p, unsigned short value)
  236. {
  237. pthread_mutex_lock(&AO_pt_lock);
  238. *p ^= value;
  239. pthread_mutex_unlock(&AO_pt_lock);
  240. }
  241. #define AO_HAVE_short_xor_full
  242. AO_INLINE void
  243. AO_int_and_full(volatile unsigned *p, unsigned value)
  244. {
  245. pthread_mutex_lock(&AO_pt_lock);
  246. *p &= value;
  247. pthread_mutex_unlock(&AO_pt_lock);
  248. }
  249. #define AO_HAVE_int_and_full
  250. AO_INLINE void
  251. AO_int_or_full(volatile unsigned *p, unsigned value)
  252. {
  253. pthread_mutex_lock(&AO_pt_lock);
  254. *p |= value;
  255. pthread_mutex_unlock(&AO_pt_lock);
  256. }
  257. #define AO_HAVE_int_or_full
  258. AO_INLINE void
  259. AO_int_xor_full(volatile unsigned *p, unsigned value)
  260. {
  261. pthread_mutex_lock(&AO_pt_lock);
  262. *p ^= value;
  263. pthread_mutex_unlock(&AO_pt_lock);
  264. }
  265. #define AO_HAVE_int_xor_full
  266. AO_INLINE AO_t
  267. AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
  268. AO_t new_val)
  269. {
  270. AO_t fetched_val;
  271. pthread_mutex_lock(&AO_pt_lock);
  272. fetched_val = *addr;
  273. if (fetched_val == old_val)
  274. *addr = new_val;
  275. pthread_mutex_unlock(&AO_pt_lock);
  276. return fetched_val;
  277. }
  278. #define AO_HAVE_fetch_compare_and_swap_full
  279. AO_INLINE unsigned char
  280. AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr,
  281. unsigned char old_val,
  282. unsigned char new_val)
  283. {
  284. unsigned char fetched_val;
  285. pthread_mutex_lock(&AO_pt_lock);
  286. fetched_val = *addr;
  287. if (fetched_val == old_val)
  288. *addr = new_val;
  289. pthread_mutex_unlock(&AO_pt_lock);
  290. return fetched_val;
  291. }
  292. #define AO_HAVE_char_fetch_compare_and_swap_full
  293. AO_INLINE unsigned short
  294. AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr,
  295. unsigned short old_val,
  296. unsigned short new_val)
  297. {
  298. unsigned short fetched_val;
  299. pthread_mutex_lock(&AO_pt_lock);
  300. fetched_val = *addr;
  301. if (fetched_val == old_val)
  302. *addr = new_val;
  303. pthread_mutex_unlock(&AO_pt_lock);
  304. return fetched_val;
  305. }
  306. #define AO_HAVE_short_fetch_compare_and_swap_full
  307. AO_INLINE unsigned
  308. AO_int_fetch_compare_and_swap_full(volatile unsigned *addr, unsigned old_val,
  309. unsigned new_val)
  310. {
  311. unsigned fetched_val;
  312. pthread_mutex_lock(&AO_pt_lock);
  313. fetched_val = *addr;
  314. if (fetched_val == old_val)
  315. *addr = new_val;
  316. pthread_mutex_unlock(&AO_pt_lock);
  317. return fetched_val;
  318. }
  319. #define AO_HAVE_int_fetch_compare_and_swap_full
  320. /* Unlike real architectures, we define both double-width CAS variants. */
  321. typedef struct {
  322. AO_t AO_val1;
  323. AO_t AO_val2;
  324. } AO_double_t;
  325. #define AO_HAVE_double_t
  326. #define AO_DOUBLE_T_INITIALIZER { (AO_t)0, (AO_t)0 }
  327. AO_INLINE AO_double_t
  328. AO_double_load_full(const volatile AO_double_t *addr)
  329. {
  330. AO_double_t result;
  331. pthread_mutex_lock(&AO_pt_lock);
  332. result.AO_val1 = addr->AO_val1;
  333. result.AO_val2 = addr->AO_val2;
  334. pthread_mutex_unlock(&AO_pt_lock);
  335. return result;
  336. }
  337. #define AO_HAVE_double_load_full
  338. AO_INLINE void
  339. AO_double_store_full(volatile AO_double_t *addr, AO_double_t value)
  340. {
  341. pthread_mutex_lock(&AO_pt_lock);
  342. addr->AO_val1 = value.AO_val1;
  343. addr->AO_val2 = value.AO_val2;
  344. pthread_mutex_unlock(&AO_pt_lock);
  345. }
  346. #define AO_HAVE_double_store_full
  347. AO_INLINE int
  348. AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
  349. AO_t old1, AO_t old2,
  350. AO_t new1, AO_t new2)
  351. {
  352. pthread_mutex_lock(&AO_pt_lock);
  353. if (addr -> AO_val1 == old1 && addr -> AO_val2 == old2)
  354. {
  355. addr -> AO_val1 = new1;
  356. addr -> AO_val2 = new2;
  357. pthread_mutex_unlock(&AO_pt_lock);
  358. return 1;
  359. }
  360. else
  361. pthread_mutex_unlock(&AO_pt_lock);
  362. return 0;
  363. }
  364. #define AO_HAVE_compare_double_and_swap_double_full
  365. AO_INLINE int
  366. AO_compare_and_swap_double_full(volatile AO_double_t *addr,
  367. AO_t old1, AO_t new1, AO_t new2)
  368. {
  369. pthread_mutex_lock(&AO_pt_lock);
  370. if (addr -> AO_val1 == old1)
  371. {
  372. addr -> AO_val1 = new1;
  373. addr -> AO_val2 = new2;
  374. pthread_mutex_unlock(&AO_pt_lock);
  375. return 1;
  376. }
  377. else
  378. pthread_mutex_unlock(&AO_pt_lock);
  379. return 0;
  380. }
  381. #define AO_HAVE_compare_and_swap_double_full
  382. /* We can't use hardware loads and stores, since they don't */
  383. /* interact correctly with atomic updates. */