You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

511 lines
23 KiB

  1. /*
  2. Copyright 2005-2013 Intel Corporation. All Rights Reserved.
  3. This file is part of Threading Building Blocks.
  4. Threading Building Blocks is free software; you can redistribute it
  5. and/or modify it under the terms of the GNU General Public License
  6. version 2 as published by the Free Software Foundation.
  7. Threading Building Blocks is distributed in the hope that it will be
  8. useful, but WITHOUT ANY WARRANTY; without even the implied warranty
  9. of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with Threading Building Blocks; if not, write to the Free Software
  13. Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  14. As a special exception, you may use this file as part of a free software
  15. library without restriction. Specifically, if other files instantiate
  16. templates or use macros or inline functions from this file, or you compile
  17. this file and link it with other files to produce an executable, this
  18. file does not by itself cause the resulting executable to be covered by
  19. the GNU General Public License. This exception does not however
  20. invalidate any other reasons why the executable file might be covered by
  21. the GNU General Public License.
  22. */
  23. #ifndef __TBB_parallel_reduce_H
  24. #define __TBB_parallel_reduce_H
  25. #include <new>
  26. #include "task.h"
  27. #include "aligned_space.h"
  28. #include "partitioner.h"
  29. #include "tbb_profiling.h"
  30. namespace tbb {
  31. namespace interface6 {
  32. //! @cond INTERNAL
  33. namespace internal {
  34. using namespace tbb::internal;
  35. /** Values for reduction_context. */
  36. enum {
  37. root_task, left_child, right_child
  38. };
  39. /** Represented as a char, not enum, for compactness. */
  40. typedef char reduction_context;
  41. //! Task type used to combine the partial results of parallel_reduce.
  42. /** @ingroup algorithms */
  43. template<typename Body>
  44. class finish_reduce: public flag_task {
  45. //! Pointer to body, or NULL if the left child has not yet finished.
  46. bool has_right_zombie;
  47. const reduction_context my_context;
  48. Body* my_body;
  49. aligned_space<Body,1> zombie_space;
  50. finish_reduce( reduction_context context_ ) :
  51. has_right_zombie(false), // TODO: substitute by flag_task::child_stolen?
  52. my_context(context_),
  53. my_body(NULL)
  54. {
  55. }
  56. task* execute() {
  57. if( has_right_zombie ) {
  58. // Right child was stolen.
  59. Body* s = zombie_space.begin();
  60. my_body->join( *s );
  61. s->~Body();
  62. }
  63. if( my_context==left_child )
  64. itt_store_word_with_release( static_cast<finish_reduce*>(parent())->my_body, my_body );
  65. return NULL;
  66. }
  67. template<typename Range,typename Body_, typename Partitioner>
  68. friend class start_reduce;
  69. };
  70. //! Task type used to split the work of parallel_reduce.
  71. /** @ingroup algorithms */
  72. template<typename Range, typename Body, typename Partitioner>
  73. class start_reduce: public task {
  74. typedef finish_reduce<Body> finish_type;
  75. Body* my_body;
  76. Range my_range;
  77. typename Partitioner::task_partition_type my_partition;
  78. reduction_context my_context; // TODO: factor out into start_reduce_base
  79. /*override*/ task* execute();
  80. template<typename Body_>
  81. friend class finish_reduce;
  82. public:
  83. //! Constructor used for root task
  84. start_reduce( const Range& range, Body* body, Partitioner& partitioner ) :
  85. my_body(body),
  86. my_range(range),
  87. my_partition(partitioner),
  88. my_context(root_task)
  89. {
  90. }
  91. //! Splitting constructor used to generate children.
  92. /** parent_ becomes left child. Newly constructed object is right child. */
  93. start_reduce( start_reduce& parent_, split ) :
  94. my_body(parent_.my_body),
  95. my_range(parent_.my_range,split()),
  96. my_partition(parent_.my_partition,split()),
  97. my_context(right_child)
  98. {
  99. my_partition.set_affinity(*this);
  100. parent_.my_context = left_child;
  101. }
  102. //! Construct right child from the given range as response to the demand.
  103. /** parent_ remains left child. Newly constructed object is right child. */
  104. start_reduce( start_reduce& parent_, const Range& r, depth_t d ) :
  105. my_body(parent_.my_body),
  106. my_range(r),
  107. my_partition(parent_.my_partition,split()),
  108. my_context(right_child)
  109. {
  110. my_partition.set_affinity(*this);
  111. my_partition.align_depth( d );
  112. parent_.my_context = left_child;
  113. }
  114. //! Update affinity info, if any
  115. /*override*/ void note_affinity( affinity_id id ) {
  116. my_partition.note_affinity( id );
  117. }
  118. static void run( const Range& range, Body& body, Partitioner& partitioner ) {
  119. if( !range.empty() ) {
  120. #if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
  121. task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) );
  122. #else
  123. // Bound context prevents exceptions from body to affect nesting or sibling algorithms,
  124. // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.
  125. task_group_context context;
  126. task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );
  127. #endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */
  128. }
  129. }
  130. #if __TBB_TASK_GROUP_CONTEXT
  131. static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) {
  132. if( !range.empty() )
  133. task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) );
  134. }
  135. #endif /* __TBB_TASK_GROUP_CONTEXT */
  136. //! create a continuation task, serve as callback for partitioner
  137. finish_type *create_continuation() {
  138. return new( allocate_continuation() ) finish_type(my_context);
  139. }
  140. //! Run body for range
  141. void run_body( Range &r ) { (*my_body)( r ); }
  142. };
  143. template<typename Range, typename Body, typename Partitioner>
  144. task* start_reduce<Range,Body,Partitioner>::execute() {
  145. my_partition.check_being_stolen( *this );
  146. if( my_context==right_child ) {
  147. finish_type* parent_ptr = static_cast<finish_type*>(parent());
  148. if( !itt_load_word_with_acquire(parent_ptr->my_body) ) { // TODO: replace by is_stolen_task() or by parent_ptr->ref_count() == 2???
  149. my_body = new( parent_ptr->zombie_space.begin() ) Body(*my_body,split());
  150. parent_ptr->has_right_zombie = true;
  151. }
  152. } else __TBB_ASSERT(my_context==root_task,NULL);// because left leaf spawns right leafs without recycling
  153. my_partition.execute(*this, my_range);
  154. if( my_context==left_child ) {
  155. finish_type* parent_ptr = static_cast<finish_type*>(parent());
  156. __TBB_ASSERT(my_body!=parent_ptr->zombie_space.begin(),NULL);
  157. itt_store_word_with_release(parent_ptr->my_body, my_body );
  158. }
  159. return NULL;
  160. }
  161. //! Task type used to combine the partial results of parallel_deterministic_reduce.
  162. /** @ingroup algorithms */
  163. template<typename Body>
  164. class finish_deterministic_reduce: public task {
  165. Body &my_left_body;
  166. Body my_right_body;
  167. finish_deterministic_reduce( Body &body ) :
  168. my_left_body( body ),
  169. my_right_body( body, split() )
  170. {
  171. }
  172. task* execute() {
  173. my_left_body.join( my_right_body );
  174. return NULL;
  175. }
  176. template<typename Range,typename Body_>
  177. friend class start_deterministic_reduce;
  178. };
  179. //! Task type used to split the work of parallel_deterministic_reduce.
  180. /** @ingroup algorithms */
  181. template<typename Range, typename Body>
  182. class start_deterministic_reduce: public task {
  183. typedef finish_deterministic_reduce<Body> finish_type;
  184. Body &my_body;
  185. Range my_range;
  186. /*override*/ task* execute();
  187. //! Constructor used for root task
  188. start_deterministic_reduce( const Range& range, Body& body ) :
  189. my_body( body ),
  190. my_range( range )
  191. {
  192. }
  193. //! Splitting constructor used to generate children.
  194. /** parent_ becomes left child. Newly constructed object is right child. */
  195. start_deterministic_reduce( start_deterministic_reduce& parent_, finish_type& c ) :
  196. my_body( c.my_right_body ),
  197. my_range( parent_.my_range, split() )
  198. {
  199. }
  200. public:
  201. static void run( const Range& range, Body& body ) {
  202. if( !range.empty() ) {
  203. #if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP
  204. task::spawn_root_and_wait( *new(task::allocate_root()) start_deterministic_reduce(range,&body) );
  205. #else
  206. // Bound context prevents exceptions from body to affect nesting or sibling algorithms,
  207. // and allows users to handle exceptions safely by wrapping parallel_for in the try-block.
  208. task_group_context context;
  209. task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) );
  210. #endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */
  211. }
  212. }
  213. #if __TBB_TASK_GROUP_CONTEXT
  214. static void run( const Range& range, Body& body, task_group_context& context ) {
  215. if( !range.empty() )
  216. task::spawn_root_and_wait( *new(task::allocate_root(context)) start_deterministic_reduce(range,body) );
  217. }
  218. #endif /* __TBB_TASK_GROUP_CONTEXT */
  219. };
  220. template<typename Range, typename Body>
  221. task* start_deterministic_reduce<Range,Body>::execute() {
  222. if( !my_range.is_divisible() ) {
  223. my_body( my_range );
  224. return NULL;
  225. } else {
  226. finish_type& c = *new( allocate_continuation() ) finish_type( my_body );
  227. recycle_as_child_of(c);
  228. c.set_ref_count(2);
  229. start_deterministic_reduce& b = *new( c.allocate_child() ) start_deterministic_reduce( *this, c );
  230. task::spawn(b);
  231. return this;
  232. }
  233. }
  234. } // namespace internal
  235. //! @endcond
  236. } //namespace interfaceX
  237. //! @cond INTERNAL
  238. namespace internal {
  239. using interface6::internal::start_reduce;
  240. using interface6::internal::start_deterministic_reduce;
  241. //! Auxiliary class for parallel_reduce; for internal use only.
  242. /** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body"
  243. using given \ref parallel_reduce_lambda_req "anonymous function objects".
  244. **/
  245. /** @ingroup algorithms */
  246. template<typename Range, typename Value, typename RealBody, typename Reduction>
  247. class lambda_reduce_body {
  248. //FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced
  249. // (might require some performance measurements)
  250. const Value& identity_element;
  251. const RealBody& my_real_body;
  252. const Reduction& my_reduction;
  253. Value my_value;
  254. lambda_reduce_body& operator= ( const lambda_reduce_body& other );
  255. public:
  256. lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction )
  257. : identity_element(identity)
  258. , my_real_body(body)
  259. , my_reduction(reduction)
  260. , my_value(identity)
  261. { }
  262. lambda_reduce_body( const lambda_reduce_body& other )
  263. : identity_element(other.identity_element)
  264. , my_real_body(other.my_real_body)
  265. , my_reduction(other.my_reduction)
  266. , my_value(other.my_value)
  267. { }
  268. lambda_reduce_body( lambda_reduce_body& other, tbb::split )
  269. : identity_element(other.identity_element)
  270. , my_real_body(other.my_real_body)
  271. , my_reduction(other.my_reduction)
  272. , my_value(other.identity_element)
  273. { }
  274. void operator()(Range& range) {
  275. my_value = my_real_body(range, const_cast<const Value&>(my_value));
  276. }
  277. void join( lambda_reduce_body& rhs ) {
  278. my_value = my_reduction(const_cast<const Value&>(my_value), const_cast<const Value&>(rhs.my_value));
  279. }
  280. Value result() const {
  281. return my_value;
  282. }
  283. };
  284. } // namespace internal
  285. //! @endcond
  286. // Requirements on Range concept are documented in blocked_range.h
  287. /** \page parallel_reduce_body_req Requirements on parallel_reduce body
  288. Class \c Body implementing the concept of parallel_reduce body must define:
  289. - \code Body::Body( Body&, split ); \endcode Splitting constructor.
  290. Must be able to run concurrently with operator() and method \c join
  291. - \code Body::~Body(); \endcode Destructor
  292. - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r
  293. and accumulating the result
  294. - \code void Body::join( Body& b ); \endcode Join results.
  295. The result in \c b should be merged into the result of \c this
  296. **/
  297. /** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions)
  298. TO BE DOCUMENTED
  299. **/
  300. /** \name parallel_reduce
  301. See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/
  302. //@{
  303. //! Parallel iteration with reduction and default partitioner.
  304. /** @ingroup algorithms **/
  305. template<typename Range, typename Body>
  306. void parallel_reduce( const Range& range, Body& body ) {
  307. internal::start_reduce<Range,Body, const __TBB_DEFAULT_PARTITIONER>::run( range, body, __TBB_DEFAULT_PARTITIONER() );
  308. }
  309. //! Parallel iteration with reduction and simple_partitioner
  310. /** @ingroup algorithms **/
  311. template<typename Range, typename Body>
  312. void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) {
  313. internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner );
  314. }
  315. //! Parallel iteration with reduction and auto_partitioner
  316. /** @ingroup algorithms **/
  317. template<typename Range, typename Body>
  318. void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) {
  319. internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner );
  320. }
  321. //! Parallel iteration with reduction and affinity_partitioner
  322. /** @ingroup algorithms **/
  323. template<typename Range, typename Body>
  324. void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) {
  325. internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner );
  326. }
  327. #if __TBB_TASK_GROUP_CONTEXT
  328. //! Parallel iteration with reduction, simple partitioner and user-supplied context.
  329. /** @ingroup algorithms **/
  330. template<typename Range, typename Body>
  331. void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) {
  332. internal::start_reduce<Range,Body,const simple_partitioner>::run( range, body, partitioner, context );
  333. }
  334. //! Parallel iteration with reduction, auto_partitioner and user-supplied context
  335. /** @ingroup algorithms **/
  336. template<typename Range, typename Body>
  337. void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) {
  338. internal::start_reduce<Range,Body,const auto_partitioner>::run( range, body, partitioner, context );
  339. }
  340. //! Parallel iteration with reduction, affinity_partitioner and user-supplied context
  341. /** @ingroup algorithms **/
  342. template<typename Range, typename Body>
  343. void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) {
  344. internal::start_reduce<Range,Body,affinity_partitioner>::run( range, body, partitioner, context );
  345. }
  346. #endif /* __TBB_TASK_GROUP_CONTEXT */
  347. /** parallel_reduce overloads that work with anonymous function objects
  348. (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/
  349. //! Parallel iteration with reduction and default partitioner.
  350. /** @ingroup algorithms **/
  351. template<typename Range, typename Value, typename RealBody, typename Reduction>
  352. Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {
  353. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  354. internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const __TBB_DEFAULT_PARTITIONER>
  355. ::run(range, body, __TBB_DEFAULT_PARTITIONER() );
  356. return body.result();
  357. }
  358. //! Parallel iteration with reduction and simple_partitioner.
  359. /** @ingroup algorithms **/
  360. template<typename Range, typename Value, typename RealBody, typename Reduction>
  361. Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
  362. const simple_partitioner& partitioner ) {
  363. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  364. internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const simple_partitioner>
  365. ::run(range, body, partitioner );
  366. return body.result();
  367. }
  368. //! Parallel iteration with reduction and auto_partitioner
  369. /** @ingroup algorithms **/
  370. template<typename Range, typename Value, typename RealBody, typename Reduction>
  371. Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
  372. const auto_partitioner& partitioner ) {
  373. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  374. internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const auto_partitioner>
  375. ::run( range, body, partitioner );
  376. return body.result();
  377. }
  378. //! Parallel iteration with reduction and affinity_partitioner
  379. /** @ingroup algorithms **/
  380. template<typename Range, typename Value, typename RealBody, typename Reduction>
  381. Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
  382. affinity_partitioner& partitioner ) {
  383. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  384. internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,affinity_partitioner>
  385. ::run( range, body, partitioner );
  386. return body.result();
  387. }
  388. #if __TBB_TASK_GROUP_CONTEXT
  389. //! Parallel iteration with reduction, simple partitioner and user-supplied context.
  390. /** @ingroup algorithms **/
  391. template<typename Range, typename Value, typename RealBody, typename Reduction>
  392. Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
  393. const simple_partitioner& partitioner, task_group_context& context ) {
  394. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  395. internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const simple_partitioner>
  396. ::run( range, body, partitioner, context );
  397. return body.result();
  398. }
  399. //! Parallel iteration with reduction, auto_partitioner and user-supplied context
  400. /** @ingroup algorithms **/
  401. template<typename Range, typename Value, typename RealBody, typename Reduction>
  402. Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
  403. const auto_partitioner& partitioner, task_group_context& context ) {
  404. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  405. internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,const auto_partitioner>
  406. ::run( range, body, partitioner, context );
  407. return body.result();
  408. }
  409. //! Parallel iteration with reduction, affinity_partitioner and user-supplied context
  410. /** @ingroup algorithms **/
  411. template<typename Range, typename Value, typename RealBody, typename Reduction>
  412. Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
  413. affinity_partitioner& partitioner, task_group_context& context ) {
  414. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  415. internal::start_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction>,affinity_partitioner>
  416. ::run( range, body, partitioner, context );
  417. return body.result();
  418. }
  419. #endif /* __TBB_TASK_GROUP_CONTEXT */
  420. //! Parallel iteration with deterministic reduction and default partitioner.
  421. /** @ingroup algorithms **/
  422. template<typename Range, typename Body>
  423. void parallel_deterministic_reduce( const Range& range, Body& body ) {
  424. internal::start_deterministic_reduce<Range,Body>::run( range, body );
  425. }
  426. #if __TBB_TASK_GROUP_CONTEXT
  427. //! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context.
  428. /** @ingroup algorithms **/
  429. template<typename Range, typename Body>
  430. void parallel_deterministic_reduce( const Range& range, Body& body, task_group_context& context ) {
  431. internal::start_deterministic_reduce<Range,Body>::run( range, body, context );
  432. }
  433. #endif /* __TBB_TASK_GROUP_CONTEXT */
  434. /** parallel_reduce overloads that work with anonymous function objects
  435. (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/
  436. //! Parallel iteration with deterministic reduction and default partitioner.
  437. /** @ingroup algorithms **/
  438. template<typename Range, typename Value, typename RealBody, typename Reduction>
  439. Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) {
  440. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  441. internal::start_deterministic_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction> >
  442. ::run(range, body);
  443. return body.result();
  444. }
  445. #if __TBB_TASK_GROUP_CONTEXT
  446. //! Parallel iteration with deterministic reduction, simple partitioner and user-supplied context.
  447. /** @ingroup algorithms **/
  448. template<typename Range, typename Value, typename RealBody, typename Reduction>
  449. Value parallel_deterministic_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction,
  450. task_group_context& context ) {
  451. internal::lambda_reduce_body<Range,Value,RealBody,Reduction> body(identity, real_body, reduction);
  452. internal::start_deterministic_reduce<Range,internal::lambda_reduce_body<Range,Value,RealBody,Reduction> >
  453. ::run( range, body, context );
  454. return body.result();
  455. }
  456. #endif /* __TBB_TASK_GROUP_CONTEXT */
  457. //@}
  458. } // namespace tbb
  459. #endif /* __TBB_parallel_reduce_H */