Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_task_H
18 #define __TBB_task_H
19 
20 #include "tbb_stddef.h"
21 #include "tbb_machine.h"
22 #include "tbb_profiling.h"
23 #include <climits>
24 
25 typedef struct ___itt_caller *__itt_caller;
26 
27 namespace tbb {
28 
29 class task;
30 class task_list;
31 class task_group_context;
32 
33 // MSVC does not allow taking the address of a member that was defined
34 // privately in task_base and made public in class task via a using declaration.
35 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
36 #define __TBB_TASK_BASE_ACCESS public
37 #else
38 #define __TBB_TASK_BASE_ACCESS private
39 #endif
40 
41 namespace internal { //< @cond INTERNAL
42 
45  task* self;
47  public:
48  explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {
50  }
51  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
52  void __TBB_EXPORTED_METHOD free( task& ) const;
53  };
54 
55  struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
56 } //< namespace internal @endcond
57 
58 namespace interface5 {
59  namespace internal {
61 
68  friend class tbb::task;
69 
71  static void spawn( task& t );
72 
74  static void spawn( task_list& list );
75 
77 
81  }
82 
84 
88  static void __TBB_EXPORTED_FUNC destroy( task& victim );
89  };
90  } // internal
91 } // interface5
92 
94 namespace internal {
95 
96  class scheduler: no_copy {
97  public:
99  virtual void spawn( task& first, task*& next ) = 0;
100 
102  virtual void wait_for_all( task& parent, task* child ) = 0;
103 
105  virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
106 
108  // Have to have it just to shut up overzealous compilation warnings
109  virtual ~scheduler() = 0;
110 
112  virtual void enqueue( task& t, void* reserved ) = 0;
113  };
114 
116 
117  typedef intptr_t reference_count;
118 
120  typedef unsigned short affinity_id;
121 
122 #if __TBB_TASK_ISOLATION
123  typedef intptr_t isolation_tag;
126 #endif /* __TBB_TASK_ISOLATION */
127 
128 #if __TBB_TASK_GROUP_CONTEXT
129  class generic_scheduler;
130 
133  *my_next;
134  };
135 
138  public:
140  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
141  void __TBB_EXPORTED_METHOD free( task& ) const;
142  };
143 #endif /* __TBB_TASK_GROUP_CONTEXT */
144 
146  public:
147  static task& __TBB_EXPORTED_FUNC allocate( size_t size );
148  static void __TBB_EXPORTED_FUNC free( task& );
149  };
150 
152  public:
153  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
154  void __TBB_EXPORTED_METHOD free( task& ) const;
155  };
156 
158  public:
159  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
160  void __TBB_EXPORTED_METHOD free( task& ) const;
161  };
162 
163 #if __TBB_PREVIEW_CRITICAL_TASKS
164  // TODO: move to class methods when critical task API becomes public
165  void make_critical( task& t );
166  bool is_critical( task& t );
167 #endif
168 
170 
184  class task_prefix {
185  private:
186  friend class tbb::task;
188  friend class tbb::task_list;
189  friend class internal::scheduler;
194 #if __TBB_PREVIEW_CRITICAL_TASKS
195  friend void make_critical( task& );
196  friend bool is_critical( task& );
197 #endif
198 
199 #if __TBB_TASK_ISOLATION
202 #else
203  intptr_t reserved_space_for_task_isolation_tag;
204 #endif /* __TBB_TASK_ISOLATION */
205 
206 #if __TBB_TASK_GROUP_CONTEXT
207 
212 #endif /* __TBB_TASK_GROUP_CONTEXT */
213 
215 
221 
222 #if __TBB_TASK_PRIORITY
223  union {
224 #endif /* __TBB_TASK_PRIORITY */
225 
229 
230 #if __TBB_TASK_PRIORITY
231 
234  };
235 #endif /* __TBB_TASK_PRIORITY */
236 
238 
242 
244 
249 
251 
253  int depth;
254 
256 
257  unsigned char state;
258 
260 
266  unsigned char extra_state;
267 
269 
272 
274  tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
275  };
276 
277 } // namespace internal
279 
280 #if __TBB_TASK_GROUP_CONTEXT
281 
282 #if __TBB_TASK_PRIORITY
283 namespace internal {
284  static const int priority_stride_v4 = INT_MAX / 4;
285 #if __TBB_PREVIEW_CRITICAL_TASKS
286  // TODO: move into priority_t enum when critical tasks become public feature
287  static const int priority_critical = priority_stride_v4 * 3 + priority_stride_v4 / 3 * 2;
288 #endif
289 }
290 
295 };
296 
297 #endif /* __TBB_TASK_PRIORITY */
298 
299 #if TBB_USE_CAPTURED_EXCEPTION
300  class tbb_exception;
301 #else
302  namespace internal {
303  class tbb_exception_ptr;
304  }
305 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
306 
307 class task_scheduler_init;
308 namespace interface7 { class task_arena; }
309 using interface7::task_arena;
310 
312 
332 class task_group_context : internal::no_copy {
333 private:
335  friend class task_scheduler_init;
336  friend class task_arena;
337 
338 #if TBB_USE_CAPTURED_EXCEPTION
340 #else
341  typedef internal::tbb_exception_ptr exception_container_type;
342 #endif
343 
346  version_mask = 0xFFFF,
348  };
349 
350 public:
351  enum kind_type {
354  };
355 
356  enum traits_type {
358 #if __TBB_FP_CONTEXT
359  fp_settings = 0x0002ul << traits_offset,
360 #endif
362 #if TBB_USE_CAPTURED_EXCEPTION
363  default_traits = 0
364 #else
366 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
367  };
368 
369 private:
370  enum state {
372  // the following enumerations must be the last, new 2^x values must go above
374  };
375 
376  union {
378  // TODO: describe asynchronous use, and whether any memory semantics are needed
380  uintptr_t _my_kind_aligner;
381  };
382 
385 
387 
389  internal::context_list_node_t my_node;
390 
393 
395 
399  - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
400  - sizeof(__itt_caller)
401 #if __TBB_FP_CONTEXT
402  - sizeof(internal::cpu_ctl_env_space)
403 #endif
404  ];
405 
406 #if __TBB_FP_CONTEXT
407 
410  internal::cpu_ctl_env_space my_cpu_ctl_env;
411 #endif
412 
415 
417 
421 
424 
427 
429  uintptr_t my_state;
430 
431 #if __TBB_TASK_PRIORITY
432  intptr_t my_priority;
434 #endif /* __TBB_TASK_PRIORITY */
435 
438 
440 
441  char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
442 #if __TBB_TASK_PRIORITY
443  - sizeof(intptr_t)
444 #endif /* __TBB_TASK_PRIORITY */
445  - sizeof(internal::string_index)
446  ];
447 
448 public:
450 
478  task_group_context ( kind_type relation_with_parent = bound,
479  uintptr_t t = default_traits )
480  : my_kind(relation_with_parent)
481  , my_version_and_traits(3 | t)
482  , my_name(internal::CUSTOM_CTX)
483  {
484  init();
485  }
486 
487  // Custom constructor for instrumentation of tbb algorithm
489  : my_kind(bound)
491  , my_name(name)
492  {
493  init();
494  }
495 
496  // Do not introduce standalone unbind method since it will break state propagation assumptions
498 
500 
508 
510 
518 
521 
523 
530 
531 #if __TBB_FP_CONTEXT
532 
541 #endif
542 
543 #if __TBB_TASK_PRIORITY
544  void set_priority ( priority_t );
546 
548  priority_t priority () const;
549 #endif /* __TBB_TASK_PRIORITY */
550 
552  uintptr_t traits() const { return my_version_and_traits & traits_mask; }
553 
554 protected:
556 
557  void __TBB_EXPORTED_METHOD init ();
558 
559 private:
560  friend class task;
562 
566  static const kind_type dying = kind_type(detached+1);
567 
569  template <typename T>
570  void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
571 
573  void bind_to ( internal::generic_scheduler *local_sched );
574 
576  void register_with ( internal::generic_scheduler *local_sched );
577 
578 #if __TBB_FP_CONTEXT
579  // TODO: Consider adding #else stub in order to omit #if sections in other code
581  void copy_fp_settings( const task_group_context &src );
582 #endif /* __TBB_FP_CONTEXT */
583 }; // class task_group_context
584 
585 #endif /* __TBB_TASK_GROUP_CONTEXT */
586 
588 
590 
593 
596 
597 protected:
599  task() {prefix().extra_state=1;}
600 
601 public:
603  virtual ~task() {}
604 
606  virtual task* execute() = 0;
607 
609  enum state_type {
622 #if __TBB_RECYCLE_TO_ENQUEUE
623  ,to_enqueue
625 #endif
626  };
627 
628  //------------------------------------------------------------------------
629  // Allocating tasks
630  //------------------------------------------------------------------------
631 
635  }
636 
637 #if __TBB_TASK_GROUP_CONTEXT
638  static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
640  return internal::allocate_root_with_context_proxy(ctx);
641  }
642 #endif /* __TBB_TASK_GROUP_CONTEXT */
643 
645 
647  return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
648  }
649 
652  return *reinterpret_cast<internal::allocate_child_proxy*>(this);
653  }
654 
656  using task_base::allocate_additional_child_of;
657 
658 #if __TBB_DEPRECATED_TASK_INTERFACE
659 
664  void __TBB_EXPORTED_METHOD destroy( task& t );
665 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
666  using task_base::destroy;
668 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
669 
670  //------------------------------------------------------------------------
671  // Recycling of tasks
672  //------------------------------------------------------------------------
673 
675 
682  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
683  prefix().state = allocated;
684  }
685 
687 
690  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
691  prefix().state = recycle;
692  }
693 
695  void recycle_as_child_of( task& new_parent ) {
696  internal::task_prefix& p = prefix();
697  __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
698  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
699  __TBB_ASSERT( p.parent==NULL, "parent must be null" );
700  __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
701  __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
702  p.state = allocated;
703  p.parent = &new_parent;
704 #if __TBB_TASK_GROUP_CONTEXT
705  p.context = new_parent.prefix().context;
706 #endif /* __TBB_TASK_GROUP_CONTEXT */
707  }
708 
710 
712  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
713  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
714  prefix().state = reexecute;
715  }
716 
717 #if __TBB_RECYCLE_TO_ENQUEUE
718 
720  void recycle_to_enqueue() {
721  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
722  prefix().state = to_enqueue;
723  }
724 #endif /* __TBB_RECYCLE_TO_ENQUEUE */
725 
726  //------------------------------------------------------------------------
727  // Spawning and blocking
728  //------------------------------------------------------------------------
729 
731  void set_ref_count( int count ) {
732 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
734 #else
735  prefix().ref_count = count;
736 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
737  }
738 
740 
743  }
744 
746 
747  int add_ref_count( int count ) {
749  internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
750  __TBB_ASSERT( k>=0, "task's reference count underflowed" );
751  if( k==0 )
753  return int(k);
754  }
755 
757 
759 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
761 #else
763 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
764  }
765 
767  using task_base::spawn;
768 
770  void spawn_and_wait_for_all( task& child ) {
771  prefix().owner->wait_for_all( *this, &child );
772  }
773 
776 
778  static void spawn_root_and_wait( task& root ) {
779  root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
780  }
781 
783 
785  static void spawn_root_and_wait( task_list& root_list );
786 
788 
789  void wait_for_all() {
790  prefix().owner->wait_for_all( *this, NULL );
791  }
792 
794 #if __TBB_TASK_PRIORITY
795 
805 #endif /* __TBB_TASK_PRIORITY */
806  static void enqueue( task& t ) {
807  t.prefix().owner->enqueue( t, NULL );
808  }
809 
810 #if __TBB_TASK_PRIORITY
811  static void enqueue( task& t, priority_t p ) {
813 #if __TBB_PREVIEW_CRITICAL_TASKS
815  || p == internal::priority_critical, "Invalid priority level value");
816 #else
817  __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
818 #endif
819  t.prefix().owner->enqueue( t, (void*)p );
820  }
821 #endif /* __TBB_TASK_PRIORITY */
822 
825  inline static void enqueue( task& t, task_arena& arena
827  , priority_t p = priority_t(0)
828 #endif
829  );
830 
832  static task& __TBB_EXPORTED_FUNC self();
833 
835  task* parent() const {return prefix().parent;}
836 
838  void set_parent(task* p) {
839 #if __TBB_TASK_GROUP_CONTEXT
840  __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
841 #endif
842  prefix().parent = p;
843  }
844 
845 #if __TBB_TASK_GROUP_CONTEXT
846 
848  task_group_context* context() {return prefix().context;}
849 
851  task_group_context* group () { return prefix().context; }
852 #endif /* __TBB_TASK_GROUP_CONTEXT */
853 
855  bool is_stolen_task() const {
856  return (prefix().extra_state & 0x80)!=0;
857  }
858 
859  //------------------------------------------------------------------------
860  // Debugging
861  //------------------------------------------------------------------------
862 
864  state_type state() const {return state_type(prefix().state);}
865 
867  int ref_count() const {
868 #if TBB_USE_ASSERT
869  internal::reference_count ref_count_ = prefix().ref_count;
870  __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
871 #endif
872  return int(prefix().ref_count);
873  }
874 
877 
878  //------------------------------------------------------------------------
879  // Affinity
880  //------------------------------------------------------------------------
881 
883 
885 
887  void set_affinity( affinity_id id ) {prefix().affinity = id;}
888 
890  affinity_id affinity() const {return prefix().affinity;}
891 
893 
898 
899 #if __TBB_TASK_GROUP_CONTEXT
900 
912 
914 
915  bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
916 
918  bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
919 #else
920  bool is_cancelled () const { return false; }
921 #endif /* __TBB_TASK_GROUP_CONTEXT */
922 
923 #if __TBB_TASK_PRIORITY
924  void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
926 
928  priority_t group_priority () const { return prefix().context->priority(); }
929 
930 #endif /* __TBB_TASK_PRIORITY */
931 
932 private:
934  friend class task_list;
935  friend class internal::scheduler;
937 #if __TBB_TASK_GROUP_CONTEXT
939 #endif /* __TBB_TASK_GROUP_CONTEXT */
943 
945 
946  internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
947  return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
948  }
949 #if __TBB_PREVIEW_CRITICAL_TASKS
950  friend void internal::make_critical( task& );
951  friend bool internal::is_critical( task& );
952 #endif
953 }; // class task
954 
955 #if __TBB_PREVIEW_CRITICAL_TASKS
956 namespace internal {
957 inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; }
958 inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); }
959 } // namespace internal
960 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
961 
963 
964 class empty_task: public task {
966  return NULL;
967  }
968 };
969 
971 namespace internal {
972  template<typename F>
973  class function_task : public task {
974 #if __TBB_ALLOW_MUTABLE_FUNCTORS
975  F my_func;
976 #else
977  const F my_func;
978 #endif
980  my_func();
981  return NULL;
982  }
983  public:
984  function_task( const F& f ) : my_func(f) {}
985 #if __TBB_CPP11_RVALUE_REF_PRESENT
986  function_task( F&& f ) : my_func( std::move(f) ) {}
987 #endif
988  };
989 } // namespace internal
991 
993 
995 class task_list: internal::no_copy {
996 private:
999  friend class task;
1001 public:
1003  task_list() : first(NULL), next_ptr(&first) {}
1004 
1007 
1009  bool empty() const {return !first;}
1010 
1012  void push_back( task& task ) {
1013  task.prefix().next = NULL;
1014  *next_ptr = &task;
1015  next_ptr = &task.prefix().next;
1016  }
1017 #if __TBB_TODO
1018  // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
1020  void push_front( task& task ) {
1021  if( empty() ) {
1022  push_back(task);
1023  } else {
1024  task.prefix().next = first;
1025  first = &task;
1026  }
1027  }
1028 #endif
1029  task& pop_front() {
1031  __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
1032  task* result = first;
1033  first = result->prefix().next;
1034  if( !first ) next_ptr = &first;
1035  return *result;
1036  }
1037 
1039  void clear() {
1040  first=NULL;
1041  next_ptr=&first;
1042  }
1043 };
1044 
1046  t.prefix().owner->spawn( t, t.prefix().next );
1047 }
1048 
1050  if( task* t = list.first ) {
1051  t->prefix().owner->spawn( *t, *list.next_ptr );
1052  list.clear();
1053  }
1054 }
1055 
1056 inline void task::spawn_root_and_wait( task_list& root_list ) {
1057  if( task* t = root_list.first ) {
1058  t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
1059  root_list.clear();
1060  }
1061 }
1062 
1063 } // namespace tbb
1064 
1065 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
1067 }
1068 
1069 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
1070  tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
1071 }
1072 
1073 #if __TBB_TASK_GROUP_CONTEXT
1074 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
1075  return &p.allocate(bytes);
1076 }
1077 
1078 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
1079  p.free( *static_cast<tbb::task*>(task) );
1080 }
1081 #endif /* __TBB_TASK_GROUP_CONTEXT */
1082 
1083 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
1084  return &p.allocate(bytes);
1085 }
1086 
1087 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
1088  p.free( *static_cast<tbb::task*>(task) );
1089 }
1090 
1091 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
1092  return &p.allocate(bytes);
1093 }
1094 
1095 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
1096  p.free( *static_cast<tbb::task*>(task) );
1097 }
1098 
1099 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1100  return &p.allocate(bytes);
1101 }
1102 
1103 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1104  p.free( *static_cast<tbb::task*>(task) );
1105 }
1106 
1107 #endif /* __TBB_task_H */
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:96
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
Definition: task.h:806
virtual ~scheduler()=0
Pure virtual destructor;.
Definition: scheduler.cpp:72
friend class internal::scheduler
Definition: task.h:935
friend class internal::generic_scheduler
Definition: task.h:334
state_type
Enumeration of task states that the scheduler considers.
Definition: task.h:609
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:414
static const int priority_critical
Definition: task.h:287
bool is_stolen_task() const
True if task was stolen from the task pool of another thread.
Definition: task.h:855
priority_t group_priority() const
Retrieves current priority of the task group this task belongs to.
Definition: task.h:928
affinity_id affinity
Definition: task.h:268
task ** next_ptr
Definition: task.h:998
#define __TBB_override
Definition: tbb_stddef.h:240
bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const
Obsolete, and only retained for the sake of backward compatibility. Always returns true.
Definition: task.cpp:208
static const int priority_stride_v4
Definition: task.h:284
internal::tbb_exception_ptr exception_container_type
Definition: task.h:341
#define __TBB_TASK_PRIORITY
Definition: tbb_config.h:574
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
Definition: task.h:423
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
Definition: task.h:478
static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of(task &t)
Like allocate_child, except that task's parent becomes "t", not this.
Definition: task.h:79
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
Definition: task.cpp:35
allocate_root_with_context_proxy(task_group_context &ctx)
Definition: task.h:139
intptr_t reference_count
A reference count.
Definition: task.h:117
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
void make_critical(task &t)
Definition: task.h:957
friend class task
Definition: task.h:999
friend class internal::scheduler
Definition: task.h:189
task_group_context * context()
This method is deprecated and will be removed in the future.
Definition: task.h:848
task * next_offloaded
Pointer to the next offloaded lower priority task.
Definition: task.h:233
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:433
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:965
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
Definition: task.h:770
friend class internal::allocate_continuation_proxy
Definition: task.h:192
char _leading_padding[internal::NFS_MaxLineSize - 2 *sizeof(uintptr_t) - sizeof(void *) - sizeof(internal::context_list_node_t) - sizeof(__itt_caller) - sizeof(internal::cpu_ctl_env_space)]
Leading padding protecting accesses to frequently used members from false sharing.
Definition: task.h:404
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
friend bool is_critical(task &)
Definition: task.h:958
char _trailing_padding[internal::NFS_MaxLineSize - 2 *sizeof(uintptr_t) - 2 *sizeof(void *) - sizeof(intptr_t) - sizeof(internal::string_index)]
Trailing padding protecting accesses to frequently used members from false sharing.
Definition: task.h:446
virtual ~task()
Destructor.
Definition: task.h:603
virtual void enqueue(task &t, void *reserved)=0
For internal use only.
internal::string_index my_name
Description of algorithm for scheduler based instrumentation.
Definition: task.h:437
friend void make_critical(task &)
Definition: task.h:957
Memory prefix to a task object.
Definition: task.h:184
context_list_node_t * my_prev
Definition: task.h:132
static void spawn_root_and_wait(task &root)
Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
Definition: task.h:778
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
void __TBB_EXPORTED_METHOD free(task &) const
virtual void wait_for_all(task &parent, task *child)=0
For internal use only.
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
#define __TBB_FetchAndDecrementWrelease(P)
Definition: tbb_machine.h:311
friend class internal::allocate_additional_child_of_proxy
Definition: task.h:942
friend class internal::allocate_root_with_context_proxy
Definition: task.h:938
internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count()
Decrement reference count and return its new value.
Definition: task.cpp:192
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:128
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
context_list_node_t * my_next
Definition: task.h:132
#define __TBB_EXPORTED_FUNC
task * first
Definition: task.h:997
void set_group_priority(priority_t p)
Changes priority of the task group this task belongs to.
Definition: task.h:925
state_type state() const
Current execution state.
Definition: task.h:864
auto first(Container &c) -> decltype(begin(c))
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:134
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
The graph class.
virtual void spawn_root_and_wait(task &first, task *&next)=0
For internal use only.
task_list()
Construct empty list.
Definition: task.h:1003
task that does nothing. Useful for synchronization.
Definition: task.h:964
void const char const char int ITT_FORMAT __itt_group_sync x void const char * name
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:121
Used to form groups of tasks.
Definition: task.h:332
task object is freshly allocated or recycled.
Definition: task.h:617
tbb::task & task()
The task corresponding to this task_prefix.
Definition: task.h:274
static void spawn(task &t)
Schedule task for execution when a worker becomes available.
Definition: task.h:1045
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:946
internal::allocate_child_proxy & allocate_child()
Returns proxy for overloaded new that allocates a child task of *this.
Definition: task.h:651
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:979
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:120
void wait_for_all()
Wait for reference count to become one, and set reference count to zero.
Definition: task.h:789
task()
Default constructor.
Definition: task.h:599
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:377
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:331
static const kind_type dying
Definition: task.h:566
bool is_cancelled() const
Returns true if the context has received cancellation request.
Definition: task.h:918
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:114
uintptr_t traits() const
Returns the context's trait.
Definition: task.h:552
friend class internal::allocate_continuation_proxy
Definition: task.h:940
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
Definition: task.h:410
scheduler * owner
Obsolete. The scheduler that owns the task.
Definition: task.h:228
A list of children.
Definition: task.h:995
task_group_context(internal::string_index name)
Definition: task.h:488
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:379
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
tbb::task * parent
The task whose reference count includes me.
Definition: task.h:241
bool empty() const
True if list is empty; false otherwise.
Definition: task.h:1009
task object is on free list, or is going to be put there, or was just taken off.
Definition: task.h:619
task * self
No longer used, but retained for binary layout compatibility. Always NULL.
Definition: task.h:45
#define __TBB_FetchAndIncrementWacquire(P)
Definition: tbb_machine.h:310
task to be rescheduled.
Definition: task.h:613
void recycle_as_child_of(task &new_parent)
Change this to be a child of new_parent.
Definition: task.h:695
bool cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
Definition: task.h:915
tbb::task * next
"next" field for list of task
Definition: task.h:271
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
unsigned char state
A task::state_type, stored as a byte for compactness.
Definition: task.h:257
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
Definition: task.h:248
priority_t
Definition: task.h:291
isolation_tag isolation
The tag used for task isolation.
Definition: task.h:201
Interface to be implemented by all exceptions TBB recognizes and propagates across the threads.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
Class delimiting the scope of task scheduler activity.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
Definition: task.h:429
void push_back(task &task)
Push task onto back of list.
Definition: task.h:1012
int decrement_ref_count()
Atomically decrement reference count and returns its new value.
Definition: task.h:758
Base class for methods that became static in TBB 3.0.
Definition: task.h:66
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
Definition: task.h:253
task & pop_front()
Pop the front task from the list.
Definition: task.h:1030
__TBB_EXPORTED_METHOD ~task_group_context()
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:305
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Definition: task.h:266
priority_t priority() const
Retrieves current priority of the current task group.
void set_ref_count(int count)
Set reference count.
Definition: task.h:731
static const kind_type detached
Definition: task.h:565
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition: task.h:884
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
Definition: task.h:220
Base class for user-defined tasks.
Definition: task.h:589
virtual void spawn(task &first, task *&next)=0
For internal use only.
friend class internal::allocate_child_proxy
Definition: task.h:941
void set_priority(priority_t)
Changes priority of the task group.
Work stealing task scheduler.
Definition: scheduler.h:120
internal::allocate_continuation_proxy & allocate_continuation()
Returns proxy for overloaded new that allocates a continuation task of *this.
Definition: task.h:646
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:633
virtual task * execute()=0
Should be overridden by derived classes.
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
Definition: task.h:389
void recycle_to_reexecute()
Schedule this for reexecution after current execute() returns.
Definition: task.h:711
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
task_group_context * group()
Pointer to the task group descriptor.
Definition: task.h:851
void set_parent(task *p)
sets parent task pointer to specified value
Definition: task.h:838
~task_list()
Destroys the list, but does not destroy the task objects.
Definition: task.h:1006
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
bool is_critical(task &t)
Definition: task.h:958
Base class for types that should not be assigned.
Definition: tbb_stddef.h:320
void __TBB_EXPORTED_METHOD change_group(task_group_context &ctx)
Moves this task from its current group into another one.
affinity_id affinity() const
Current affinity of this task.
Definition: task.h:890
#define __TBB_TASK_BASE_ACCESS
Definition: task.h:38
intptr_t isolation_tag
A tag for task isolation.
Definition: task.h:124
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:835
static void __TBB_EXPORTED_FUNC destroy(task &victim)
Destroy a task.
Definition: task.cpp:212
void call_itt_notify(notify_type, void *)
friend class internal::allocate_root_proxy
Definition: task.h:936
void increment_ref_count()
Atomically increment reference count.
Definition: task.h:741
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:420
void recycle_as_continuation()
Change this to be a continuation of its former self.
Definition: task.h:681
uintptr_t _my_kind_aligner
Definition: task.h:380
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
Definition: task.h:426
const isolation_tag no_isolation
Definition: task.h:125
task to be recycled as continuation
Definition: task.h:621
void set_affinity(affinity_id id)
Set affinity for this task.
Definition: task.h:887
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
#define __TBB_atomic
Definition: tbb_stddef.h:237
static void __TBB_EXPORTED_FUNC free(task &)
Definition: task.cpp:47
static const kind_type binding_completed
Definition: task.h:564
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:105
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
Definition: task.h:384
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
Definition: task.h:392
friend class internal::allocate_child_proxy
Definition: task.h:191
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
Definition: task.cpp:245
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:615
void const char const char int ITT_FORMAT __itt_group_sync p
struct ___itt_caller * __itt_caller
Definition: task.h:25
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
int add_ref_count(int count)
Atomically adds to reference count and returns its new value.
Definition: task.h:747
int space[sizeof(internal::uint64_t)/sizeof(int)]
Definition: task.h:55
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
Definition: task.h:211
void __TBB_EXPORTED_METHOD internal_set_ref_count(int count)
Set reference count.
Definition: task.cpp:183
static const kind_type binding_required
Definition: task.h:563
friend class internal::allocate_root_proxy
Definition: task.h:190
task is running, and will be destroyed after method execute() completes.
Definition: task.h:611
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
version_tag_v3 version_tag
Definition: tbb_stddef.h:386
function_task(const F &f)
Definition: task.h:984
friend class internal::allocate_additional_child_of_proxy
Definition: task.h:193
void recycle_as_safe_continuation()
Recommended to use, safe variant of recycle_as_continuation.
Definition: task.h:689
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
void clear()
Clear the list.
Definition: task.h:1039
friend class internal::allocate_root_with_context_proxy
Definition: task.h:561
int ref_count() const
The internal reference count.
Definition: task.h:867

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.