reduce.hpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. //---------------------------------------------------------------------------//
  2. // Copyright (c) 2013 Kyle Lutz <kyle.r.lutz@gmail.com>
  3. //
  4. // Distributed under the Boost Software License, Version 1.0
  5. // See accompanying file LICENSE_1_0.txt or copy at
  6. // http://www.boost.org/LICENSE_1_0.txt
  7. //
  8. // See http://boostorg.github.com/compute for more information.
  9. //---------------------------------------------------------------------------//
  10. #ifndef BOOST_COMPUTE_ALGORITHM_REDUCE_HPP
  11. #define BOOST_COMPUTE_ALGORITHM_REDUCE_HPP
  12. #include <iterator>
  13. #include <boost/compute/system.hpp>
  14. #include <boost/compute/functional.hpp>
  15. #include <boost/compute/detail/meta_kernel.hpp>
  16. #include <boost/compute/command_queue.hpp>
  17. #include <boost/compute/container/array.hpp>
  18. #include <boost/compute/container/vector.hpp>
  19. #include <boost/compute/algorithm/copy_n.hpp>
  20. #include <boost/compute/algorithm/detail/inplace_reduce.hpp>
  21. #include <boost/compute/algorithm/detail/reduce_on_gpu.hpp>
  22. #include <boost/compute/algorithm/detail/serial_reduce.hpp>
  23. #include <boost/compute/detail/iterator_range_size.hpp>
  24. #include <boost/compute/memory/local_buffer.hpp>
  25. #include <boost/compute/type_traits/result_of.hpp>
  26. namespace boost {
  27. namespace compute {
  28. namespace detail {
  29. template<class InputIterator, class OutputIterator, class BinaryFunction>
  30. size_t reduce(InputIterator first,
  31. size_t count,
  32. OutputIterator result,
  33. size_t block_size,
  34. BinaryFunction function,
  35. command_queue &queue)
  36. {
  37. typedef typename
  38. std::iterator_traits<InputIterator>::value_type
  39. input_type;
  40. typedef typename
  41. boost::compute::result_of<BinaryFunction(input_type, input_type)>::type
  42. result_type;
  43. const context &context = queue.get_context();
  44. size_t block_count = count / 2 / block_size;
  45. size_t total_block_count =
  46. static_cast<size_t>(std::ceil(float(count) / 2.f / float(block_size)));
  47. if(block_count != 0){
  48. meta_kernel k("block_reduce");
  49. size_t output_arg = k.add_arg<result_type *>(memory_object::global_memory, "output");
  50. size_t block_arg = k.add_arg<input_type *>(memory_object::local_memory, "block");
  51. k <<
  52. "const uint gid = get_global_id(0);\n" <<
  53. "const uint lid = get_local_id(0);\n" <<
  54. // copy values to local memory
  55. "block[lid] = " <<
  56. function(first[k.make_var<uint_>("gid*2+0")],
  57. first[k.make_var<uint_>("gid*2+1")]) << ";\n" <<
  58. // perform reduction
  59. "for(uint i = 1; i < " << uint_(block_size) << "; i <<= 1){\n" <<
  60. " barrier(CLK_LOCAL_MEM_FENCE);\n" <<
  61. " uint mask = (i << 1) - 1;\n" <<
  62. " if((lid & mask) == 0){\n" <<
  63. " block[lid] = " <<
  64. function(k.expr<input_type>("block[lid]"),
  65. k.expr<input_type>("block[lid+i]")) << ";\n" <<
  66. " }\n" <<
  67. "}\n" <<
  68. // write block result to global output
  69. "if(lid == 0)\n" <<
  70. " output[get_group_id(0)] = block[0];\n";
  71. kernel kernel = k.compile(context);
  72. kernel.set_arg(output_arg, result.get_buffer());
  73. kernel.set_arg(block_arg, local_buffer<input_type>(block_size));
  74. queue.enqueue_1d_range_kernel(kernel,
  75. 0,
  76. block_count * block_size,
  77. block_size);
  78. }
  79. // serially reduce any leftovers
  80. if(block_count * block_size * 2 < count){
  81. size_t last_block_start = block_count * block_size * 2;
  82. meta_kernel k("extra_serial_reduce");
  83. size_t count_arg = k.add_arg<uint_>("count");
  84. size_t offset_arg = k.add_arg<uint_>("offset");
  85. size_t output_arg = k.add_arg<result_type *>(memory_object::global_memory, "output");
  86. size_t output_offset_arg = k.add_arg<uint_>("output_offset");
  87. k <<
  88. k.decl<result_type>("result") << " = \n" <<
  89. first[k.expr<uint_>("offset")] << ";\n" <<
  90. "for(uint i = offset + 1; i < count; i++)\n" <<
  91. " result = " <<
  92. function(k.var<result_type>("result"),
  93. first[k.var<uint_>("i")]) << ";\n" <<
  94. "output[output_offset] = result;\n";
  95. kernel kernel = k.compile(context);
  96. kernel.set_arg(count_arg, static_cast<uint_>(count));
  97. kernel.set_arg(offset_arg, static_cast<uint_>(last_block_start));
  98. kernel.set_arg(output_arg, result.get_buffer());
  99. kernel.set_arg(output_offset_arg, static_cast<uint_>(block_count));
  100. queue.enqueue_task(kernel);
  101. }
  102. return total_block_count;
  103. }
  104. template<class InputIterator, class BinaryFunction>
  105. inline vector<
  106. typename boost::compute::result_of<
  107. BinaryFunction(
  108. typename std::iterator_traits<InputIterator>::value_type,
  109. typename std::iterator_traits<InputIterator>::value_type
  110. )
  111. >::type
  112. >
  113. block_reduce(InputIterator first,
  114. size_t count,
  115. size_t block_size,
  116. BinaryFunction function,
  117. command_queue &queue)
  118. {
  119. typedef typename
  120. std::iterator_traits<InputIterator>::value_type
  121. input_type;
  122. typedef typename
  123. boost::compute::result_of<BinaryFunction(input_type, input_type)>::type
  124. result_type;
  125. const context &context = queue.get_context();
  126. size_t total_block_count =
  127. static_cast<size_t>(std::ceil(float(count) / 2.f / float(block_size)));
  128. vector<result_type> result_vector(total_block_count, context);
  129. reduce(first, count, result_vector.begin(), block_size, function, queue);
  130. return result_vector;
  131. }
  132. template<class InputIterator, class OutputIterator, class BinaryFunction>
  133. inline void generic_reduce(InputIterator first,
  134. InputIterator last,
  135. OutputIterator result,
  136. BinaryFunction function,
  137. command_queue &queue)
  138. {
  139. typedef typename
  140. std::iterator_traits<InputIterator>::value_type
  141. input_type;
  142. typedef typename
  143. boost::compute::result_of<BinaryFunction(input_type, input_type)>::type
  144. result_type;
  145. const device &device = queue.get_device();
  146. const context &context = queue.get_context();
  147. size_t count = detail::iterator_range_size(first, last);
  148. if(device.type() & device::cpu){
  149. boost::compute::vector<result_type> value(1, context);
  150. detail::serial_reduce(first, last, value.begin(), function, queue);
  151. boost::compute::copy_n(value.begin(), 1, result, queue);
  152. }
  153. else {
  154. size_t block_size = 256;
  155. // first pass
  156. vector<result_type> results = detail::block_reduce(first,
  157. count,
  158. block_size,
  159. function,
  160. queue);
  161. if(results.size() > 1){
  162. detail::inplace_reduce(results.begin(),
  163. results.end(),
  164. function,
  165. queue);
  166. }
  167. boost::compute::copy_n(results.begin(), 1, result, queue);
  168. }
  169. }
  170. template<class InputIterator, class OutputIterator, class T>
  171. inline void dispatch_reduce(InputIterator first,
  172. InputIterator last,
  173. OutputIterator result,
  174. const plus<T> &function,
  175. command_queue &queue)
  176. {
  177. const context &context = queue.get_context();
  178. const device &device = queue.get_device();
  179. // reduce to temporary buffer on device
  180. array<T, 1> tmp(context);
  181. if(device.type() & device::cpu){
  182. detail::serial_reduce(first, last, tmp.begin(), function, queue);
  183. }
  184. else {
  185. reduce_on_gpu(first, last, tmp.begin(), function, queue);
  186. }
  187. // copy to result iterator
  188. copy_n(tmp.begin(), 1, result, queue);
  189. }
  190. template<class InputIterator, class OutputIterator, class BinaryFunction>
  191. inline void dispatch_reduce(InputIterator first,
  192. InputIterator last,
  193. OutputIterator result,
  194. BinaryFunction function,
  195. command_queue &queue)
  196. {
  197. generic_reduce(first, last, result, function, queue);
  198. }
  199. } // end detail namespace
  200. /// Returns the result of applying \p function to the elements in the
  201. /// range [\p first, \p last).
  202. ///
  203. /// If no function is specified, \c plus will be used.
  204. ///
  205. /// \param first first element in the input range
  206. /// \param last last element in the input range
  207. /// \param result iterator pointing to the output
  208. /// \param function binary reduction function
  209. /// \param queue command queue to perform the operation
  210. ///
  211. /// The \c reduce() algorithm assumes that the binary reduction function is
  212. /// associative. When used with non-associative functions the result may
  213. /// be non-deterministic and vary in precision. Notably this affects the
  214. /// \c plus<float>() function as floating-point addition is not associative
  215. /// and may produce slightly different results than a serial algorithm.
  216. ///
  217. /// This algorithm supports both host and device iterators for the
  218. /// result argument. This allows for values to be reduced and copied
  219. /// to the host all with a single function call.
  220. ///
  221. /// For example, to calculate the sum of the values in a device vector and
  222. /// copy the result to a value on the host:
  223. ///
  224. /// \snippet test/test_reduce.cpp sum_int
  225. ///
  226. /// Note that while the the \c reduce() algorithm is conceptually identical to
  227. /// the \c accumulate() algorithm, its implementation is substantially more
  228. /// efficient on parallel hardware. For more information, see the documentation
  229. /// on the \c accumulate() algorithm.
  230. ///
  231. /// \see accumulate()
  232. template<class InputIterator, class OutputIterator, class BinaryFunction>
  233. inline void reduce(InputIterator first,
  234. InputIterator last,
  235. OutputIterator result,
  236. BinaryFunction function,
  237. command_queue &queue = system::default_queue())
  238. {
  239. if(first == last){
  240. return;
  241. }
  242. detail::dispatch_reduce(first, last, result, function, queue);
  243. }
  244. /// \overload
  245. template<class InputIterator, class OutputIterator>
  246. inline void reduce(InputIterator first,
  247. InputIterator last,
  248. OutputIterator result,
  249. command_queue &queue = system::default_queue())
  250. {
  251. typedef typename std::iterator_traits<InputIterator>::value_type T;
  252. if(first == last){
  253. return;
  254. }
  255. detail::dispatch_reduce(first, last, result, plus<T>(), queue);
  256. }
  257. } // end compute namespace
  258. } // end boost namespace
  259. #endif // BOOST_COMPUTE_ALGORITHM_REDUCE_HPP