icc_generic.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. Copyright 2005-2013 Intel Corporation. All Rights Reserved.
  3. This file is part of Threading Building Blocks.
  4. Threading Building Blocks is free software; you can redistribute it
  5. and/or modify it under the terms of the GNU General Public License
  6. version 2 as published by the Free Software Foundation.
  7. Threading Building Blocks is distributed in the hope that it will be
  8. useful, but WITHOUT ANY WARRANTY; without even the implied warranty
  9. of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with Threading Building Blocks; if not, write to the Free Software
  13. Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  14. As a special exception, you may use this file as part of a free software
  15. library without restriction. Specifically, if other files instantiate
  16. templates or use macros or inline functions from this file, or you compile
  17. this file and link it with other files to produce an executable, this
  18. file does not by itself cause the resulting executable to be covered by
  19. the GNU General Public License. This exception does not however
  20. invalidate any other reasons why the executable file might be covered by
  21. the GNU General Public License.
  22. */
  23. #if !defined(__TBB_machine_H) || defined(__TBB_machine_icc_generic_H)
  24. #error Do not #include this internal file directly; use public TBB headers instead.
  25. #endif
  26. #if ! __TBB_ICC_BUILTIN_ATOMICS_PRESENT
  27. #error "Intel C++ Compiler of at least 12.1 version is needed to use ICC intrinsics port"
  28. #endif
  29. #define __TBB_machine_icc_generic_H
  30. //ICC mimics the "native" target compiler
  31. #if _MSC_VER
  32. #include "msvc_ia32_common.h"
  33. #else
  34. #include "gcc_ia32_common.h"
  35. #endif
  36. //TODO: Make __TBB_WORDSIZE macro optional for ICC intrinsics port.
  37. //As compiler intrinsics are used for all the operations it is possible to do.
  38. #if __TBB_x86_32
  39. #define __TBB_WORDSIZE 4
  40. #else
  41. #define __TBB_WORDSIZE 8
  42. #endif
  43. #define __TBB_BIG_ENDIAN 0
  44. //__TBB_compiler_fence() defined just in case, as it seems not to be used on its own anywhere else
  45. #if _MSC_VER
  46. //TODO: any way to use same intrinsics on windows and linux?
  47. #pragma intrinsic(_ReadWriteBarrier)
  48. #pragma intrinsic(_mm_mfence)
  49. #define __TBB_compiler_fence() _ReadWriteBarrier()
  50. #define __TBB_full_memory_fence() _mm_mfence()
  51. #else
  52. #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory")
  53. #define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory")
  54. #endif
  55. #define __TBB_control_consistency_helper() __TBB_compiler_fence()
  56. namespace tbb { namespace internal {
  57. //TODO: is there any way to reuse definition of memory_order enum from ICC instead of copy paste.
  58. //however it seems unlikely that ICC will silently change exact enum values, as they are defined
  59. //in the ISO exactly like this.
  60. //TODO: add test that exact values of the enum are same as in the ISO C++11
  61. typedef enum memory_order {
  62. memory_order_relaxed, memory_order_consume, memory_order_acquire,
  63. memory_order_release, memory_order_acq_rel, memory_order_seq_cst
  64. } memory_order;
  65. namespace icc_intrinsics_port {
  66. template <typename T>
  67. T convert_argument(T value){
  68. return value;
  69. }
  70. //The overload below is needed to have explicit conversion of pointer to void* in argument list.
  71. //compiler bug?
  72. //TODO: add according broken macro and recheck with ICC 13.0 if the overload is still needed
  73. template <typename T>
  74. void* convert_argument(T* value){
  75. return (void*)value;
  76. }
  77. }
  78. //TODO: code bellow is a bit repetitive, consider simplifying it
  79. template <typename T, size_t S>
  80. struct machine_load_store {
  81. static T load_with_acquire ( const volatile T& location ) {
  82. return __atomic_load_explicit(&location, memory_order_acquire);
  83. }
  84. static void store_with_release ( volatile T &location, T value ) {
  85. __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release);
  86. }
  87. };
  88. template <typename T, size_t S>
  89. struct machine_load_store_relaxed {
  90. static inline T load ( const T& location ) {
  91. return __atomic_load_explicit(&location, memory_order_relaxed);
  92. }
  93. static inline void store ( T& location, T value ) {
  94. __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed);
  95. }
  96. };
  97. template <typename T, size_t S>
  98. struct machine_load_store_seq_cst {
  99. static T load ( const volatile T& location ) {
  100. return __atomic_load_explicit(&location, memory_order_seq_cst);
  101. }
  102. static void store ( volatile T &location, T value ) {
  103. __atomic_store_explicit(&location, value, memory_order_seq_cst);
  104. }
  105. };
  106. }} // namespace tbb::internal
  107. namespace tbb{ namespace internal { namespace icc_intrinsics_port{
  108. typedef enum memory_order_map {
  109. relaxed = memory_order_relaxed,
  110. acquire = memory_order_acquire,
  111. release = memory_order_release,
  112. full_fence= memory_order_seq_cst
  113. } memory_order_map;
  114. }}}// namespace tbb::internal
  115. #define __TBB_MACHINE_DEFINE_ATOMICS(S,T,M) \
  116. inline T __TBB_machine_cmpswp##S##M( volatile void *ptr, T value, T comparand ) { \
  117. __atomic_compare_exchange_strong_explicit( \
  118. (T*)ptr \
  119. ,&comparand \
  120. ,value \
  121. , tbb::internal::icc_intrinsics_port::M \
  122. , tbb::internal::icc_intrinsics_port::M); \
  123. return comparand; \
  124. } \
  125. \
  126. inline T __TBB_machine_fetchstore##S##M(volatile void *ptr, T value) { \
  127. return __atomic_exchange_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \
  128. } \
  129. \
  130. inline T __TBB_machine_fetchadd##S##M(volatile void *ptr, T value) { \
  131. return __atomic_fetch_add_explicit((T*)ptr, value, tbb::internal::icc_intrinsics_port::M); \
  132. } \
  133. __TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, full_fence)
  134. __TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, acquire)
  135. __TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, release)
  136. __TBB_MACHINE_DEFINE_ATOMICS(1,tbb::internal::int8_t, relaxed)
  137. __TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, full_fence)
  138. __TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, acquire)
  139. __TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, release)
  140. __TBB_MACHINE_DEFINE_ATOMICS(2,tbb::internal::int16_t, relaxed)
  141. __TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, full_fence)
  142. __TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, acquire)
  143. __TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, release)
  144. __TBB_MACHINE_DEFINE_ATOMICS(4,tbb::internal::int32_t, relaxed)
  145. __TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, full_fence)
  146. __TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, acquire)
  147. __TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, release)
  148. __TBB_MACHINE_DEFINE_ATOMICS(8,tbb::internal::int64_t, relaxed)
  149. #undef __TBB_MACHINE_DEFINE_ATOMICS
  150. #define __TBB_USE_FENCED_ATOMICS 1
  151. namespace tbb { namespace internal {
  152. #if __TBB_FORCE_64BIT_ALIGNMENT_BROKEN
  153. __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(full_fence)
  154. __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(full_fence)
  155. __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(acquire)
  156. __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(release)
  157. __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(relaxed)
  158. __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(relaxed)
  159. template <typename T>
  160. struct machine_load_store<T,8> {
  161. static T load_with_acquire ( const volatile T& location ) {
  162. if( tbb::internal::is_aligned(&location,8)) {
  163. return __atomic_load_explicit(&location, memory_order_acquire);
  164. } else {
  165. return __TBB_machine_generic_load8acquire(&location);
  166. }
  167. }
  168. static void store_with_release ( volatile T &location, T value ) {
  169. if( tbb::internal::is_aligned(&location,8)) {
  170. __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_release);
  171. } else {
  172. return __TBB_machine_generic_store8release(&location,value);
  173. }
  174. }
  175. };
  176. template <typename T>
  177. struct machine_load_store_relaxed<T,8> {
  178. static T load( const volatile T& location ) {
  179. if( tbb::internal::is_aligned(&location,8)) {
  180. return __atomic_load_explicit(&location, memory_order_relaxed);
  181. } else {
  182. return __TBB_machine_generic_load8relaxed(&location);
  183. }
  184. }
  185. static void store( volatile T &location, T value ) {
  186. if( tbb::internal::is_aligned(&location,8)) {
  187. __atomic_store_explicit(&location, icc_intrinsics_port::convert_argument(value), memory_order_relaxed);
  188. } else {
  189. return __TBB_machine_generic_store8relaxed(&location,value);
  190. }
  191. }
  192. };
  193. template <typename T >
  194. struct machine_load_store_seq_cst<T,8> {
  195. static T load ( const volatile T& location ) {
  196. if( tbb::internal::is_aligned(&location,8)) {
  197. return __atomic_load_explicit(&location, memory_order_seq_cst);
  198. } else {
  199. return __TBB_machine_generic_load8full_fence(&location);
  200. }
  201. }
  202. static void store ( volatile T &location, T value ) {
  203. if( tbb::internal::is_aligned(&location,8)) {
  204. __atomic_store_explicit(&location, value, memory_order_seq_cst);
  205. } else {
  206. return __TBB_machine_generic_store8full_fence(&location,value);
  207. }
  208. }
  209. };
  210. #endif
  211. }} // namespace tbb::internal
  212. template <typename T>
  213. inline void __TBB_machine_OR( T *operand, T addend ) {
  214. __atomic_fetch_or_explicit(operand, addend, tbb::internal::memory_order_seq_cst);
  215. }
  216. template <typename T>
  217. inline void __TBB_machine_AND( T *operand, T addend ) {
  218. __atomic_fetch_and_explicit(operand, addend, tbb::internal::memory_order_seq_cst);
  219. }