Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
governor.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include "governor.h"
20 #include "tbb_main.h"
21 #include "scheduler.h"
22 #include "market.h"
23 #include "arena.h"
24 
26 
27 #include "dynamic_link.h"
28 
29 namespace tbb {
30 namespace internal {
31 
32 //------------------------------------------------------------------------
33 // governor
34 //------------------------------------------------------------------------
35 
36 #if __TBB_SURVIVE_THREAD_SWITCH
37 // Support for interoperability with Intel(R) Cilk(TM) Plus.
38 
39 #if _WIN32
40 #define CILKLIB_NAME "cilkrts20.dll"
41 #else
42 #define CILKLIB_NAME "libcilkrts.so"
43 #endif
44 
46 static __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u,
47  struct __cilk_tbb_stack_op_thunk o);
48 
50 static const dynamic_link_descriptor CilkLinkTable[] = {
51  DLD_NOWEAK(__cilkrts_watch_stack, watch_stack_handler)
52 };
53 
54 static atomic<do_once_state> cilkrts_load_state;
55 
56 bool initialize_cilk_interop() {
57  // Pinning can fail. This is a normal situation, and means that the current
58  // thread does not use cilkrts and consequently does not need interop.
59  return dynamic_link( CILKLIB_NAME, CilkLinkTable, 1, /*handle=*/0, DYNAMIC_LINK_GLOBAL );
60 }
61 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
62 
63 namespace rml {
64  tbb_server* make_private_server( tbb_client& client );
65 }
66 
68 #if USE_PTHREAD
69  int status = theTLS.create(auto_terminate);
70 #else
71  int status = theTLS.create();
72 #endif
73  if( status )
74  handle_perror(status, "TBB failed to initialize task scheduler TLS\n");
77 }
78 
80  theRMLServerFactory.close();
82 #if TBB_USE_ASSERT
84  runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" );
85 #endif
86  int status = theTLS.destroy();
87  if( status )
88  runtime_warning("failed to destroy task scheduler TLS: %s", strerror(status));
90 }
91 
92 rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) {
93  rml::tbb_server* server = NULL;
94  if( !UsePrivateRML ) {
95  ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client );
96  if( status != ::rml::factory::st_success ) {
97  UsePrivateRML = true;
98  runtime_warning( "rml::tbb_factory::make_server failed with status %x, falling back on private rml", status );
99  }
100  }
101  if ( !server ) {
102  __TBB_ASSERT( UsePrivateRML, NULL );
103  server = rml::make_private_server( client );
104  }
105  __TBB_ASSERT( server, "Failed to create RML server" );
106  return server;
107 }
108 
109 
111  __TBB_ASSERT( (uintptr_t(s)&1) == 0, "Bad pointer to the scheduler" );
112  // LSB marks the scheduler initialized with arena
113  return uintptr_t(s) | uintptr_t((s && (s->my_arena || s->is_worker()))? 1 : 0);
114 }
115 
117  theTLS.set( tls_value_of(s) );
118 }
119 
121  return theTLS.get() == tls_value_of(s);
122 }
123 
125  __TBB_ASSERT( is_set(NULL) && s, NULL );
126  assume_scheduler( s );
127 #if __TBB_SURVIVE_THREAD_SWITCH
128  if( watch_stack_handler ) {
130  o.routine = &stack_op_handler;
131  o.data = s;
132  if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) {
133  // Failed to register with cilkrts, make sure we are clean
134  s->my_cilk_unwatch_thunk.routine = NULL;
135  }
136 #if TBB_USE_ASSERT
137  else
138  s->my_cilk_state = generic_scheduler::cs_running;
139 #endif /* TBB_USE_ASSERT */
140  }
141 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
142  __TBB_ASSERT( is_set(s), NULL );
143 }
144 
147  __TBB_ASSERT( is_set(s), "attempt to unregister a wrong scheduler instance" );
148  assume_scheduler(NULL);
149 #if __TBB_SURVIVE_THREAD_SWITCH
150  __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk;
151  if ( ut.routine )
152  (*ut.routine)(ut.data);
153 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
154 }
155 
159 #if __TBB_SURVIVE_THREAD_SWITCH
160  atomic_do_once( &initialize_cilk_interop, cilkrts_load_state );
161 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
162 }
163 
165  one_time_init();
166  __TBB_ASSERT( is_set(NULL), "TLS contains a scheduler?" );
167  generic_scheduler* s = generic_scheduler::create_master( NULL ); // without arena
168  s->my_auto_initialized = true;
169  return s;
170 }
171 
172 generic_scheduler* governor::init_scheduler( int num_threads, stack_size_type stack_size, bool auto_init ) {
173  one_time_init();
174  if ( uintptr_t v = theTLS.get() ) {
176  if ( (v&1) == 0 ) { // TLS holds scheduler instance without arena
177  __TBB_ASSERT( s->my_ref_count == 1, "weakly initialized scheduler must have refcount equal to 1" );
178  __TBB_ASSERT( !s->my_arena, "weakly initialized scheduler must have no arena" );
179  __TBB_ASSERT( s->my_auto_initialized, "weakly initialized scheduler is supposed to be auto-initialized" );
180  s->attach_arena( market::create_arena( default_num_threads(), 1, 0 ), 0, /*is_master*/true );
181  __TBB_ASSERT( s->my_arena_index == 0, "Master thread must occupy the first slot in its arena" );
182  s->my_arena_slot->my_scheduler = s;
183  s->my_arena->my_default_ctx = s->default_context(); // it also transfers implied ownership
184  // Mark the scheduler as fully initialized
185  assume_scheduler( s );
186  }
187  // Increment refcount only for explicit instances of task_scheduler_init.
188  if ( !auto_init ) s->my_ref_count += 1;
189  __TBB_ASSERT( s->my_arena, "scheduler is not initialized fully" );
190  return s;
191  }
192  // Create new scheduler instance with arena
193  if( num_threads == task_scheduler_init::automatic )
194  num_threads = default_num_threads();
195  arena *a = market::create_arena( num_threads, 1, stack_size );
197  __TBB_ASSERT(s, "Somehow a local scheduler creation for a master thread failed");
198  __TBB_ASSERT( is_set(s), NULL );
199  s->my_auto_initialized = auto_init;
200  return s;
201 }
202 
204  bool ok = false;
205  __TBB_ASSERT( is_set(s), "Attempt to terminate non-local scheduler instance" );
206  if (0 == --(s->my_ref_count)) {
207  ok = s->cleanup_master( blocking );
208  __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
209  }
210  return ok;
211 }
212 
213 void governor::auto_terminate(void* arg){
214  generic_scheduler* s = tls_scheduler_of( uintptr_t(arg) ); // arg is equivalent to theTLS.get()
215  if( s && s->my_auto_initialized ) {
216  if( !--(s->my_ref_count) ) {
217  // If the TLS slot is already cleared by OS or underlying concurrency
218  // runtime, restore its value.
219  if( !is_set(s) )
221  s->cleanup_master( /*blocking_terminate=*/false );
222  __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
223  }
224  }
225 }
226 
228  if ( UsePrivateRML )
229  PrintExtraVersionInfo( "RML", "private" );
230  else {
231  PrintExtraVersionInfo( "RML", "shared" );
232  theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)"" );
233  }
234 #if __TBB_SURVIVE_THREAD_SWITCH
235  if( watch_stack_handler )
236  PrintExtraVersionInfo( "CILK", CILKLIB_NAME );
237 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
238 }
239 
241  ::rml::factory::status_type res = theRMLServerFactory.open();
242  UsePrivateRML = res != ::rml::factory::st_success;
243 }
244 
245 #if __TBB_SURVIVE_THREAD_SWITCH
246 __cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) {
247  __TBB_ASSERT(data,NULL);
248  generic_scheduler* s = static_cast<generic_scheduler*>(data);
249 #if TBB_USE_ASSERT
250  void* current = local_scheduler_if_initialized();
251 #if _WIN32||_WIN64
252  uintptr_t thread_id = GetCurrentThreadId();
253 #else
254  uintptr_t thread_id = uintptr_t(pthread_self());
255 #endif
256 #endif /* TBB_USE_ASSERT */
257  switch( op ) {
258  case CILK_TBB_STACK_ADOPT: {
259  __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
260  current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid adoption" );
261 #if TBB_USE_ASSERT
262  if( current==s )
263  runtime_warning( "redundant adoption of %p by thread %p\n", s, (void*)thread_id );
264  s->my_cilk_state = generic_scheduler::cs_running;
265 #endif /* TBB_USE_ASSERT */
266  assume_scheduler( s );
267  break;
268  }
269  case CILK_TBB_STACK_ORPHAN: {
270  __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid orphaning" );
271 #if TBB_USE_ASSERT
272  s->my_cilk_state = generic_scheduler::cs_limbo;
273 #endif /* TBB_USE_ASSERT */
274  assume_scheduler(NULL);
275  break;
276  }
277  case CILK_TBB_STACK_RELEASE: {
278  __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
279  current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid release" );
280 #if TBB_USE_ASSERT
281  s->my_cilk_state = generic_scheduler::cs_freed;
282 #endif /* TBB_USE_ASSERT */
283  s->my_cilk_unwatch_thunk.routine = NULL;
284  auto_terminate( s );
285  break;
286  }
287  default:
288  __TBB_ASSERT(0, "invalid op");
289  }
290  return 0;
291 }
292 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
293 
294 } // namespace internal
295 
296 //------------------------------------------------------------------------
297 // task_scheduler_init
298 //------------------------------------------------------------------------
299 
300 using namespace internal;
301 
303 void task_scheduler_init::initialize( int number_of_threads ) {
304  initialize( number_of_threads, 0 );
305 }
306 
307 void task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) {
308 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
309  uintptr_t new_mode = thread_stack_size & propagation_mode_mask;
310 #endif
311  thread_stack_size &= ~(stack_size_type)propagation_mode_mask;
312  if( number_of_threads!=deferred ) {
313  __TBB_ASSERT_RELEASE( !my_scheduler, "task_scheduler_init already initialized" );
314  __TBB_ASSERT_RELEASE( number_of_threads==automatic || number_of_threads > 0,
315  "number_of_threads for task_scheduler_init must be automatic or positive" );
316  internal::generic_scheduler *s = governor::init_scheduler( number_of_threads, thread_stack_size, /*auto_init=*/false );
317 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
318  if ( s->master_outermost_level() ) {
319  uintptr_t &vt = s->default_context()->my_version_and_traits;
320  uintptr_t prev_mode = vt & task_group_context::exact_exception ? propagation_mode_exact : 0;
321  vt = new_mode & propagation_mode_exact ? vt | task_group_context::exact_exception
322  : new_mode & propagation_mode_captured ? vt & ~task_group_context::exact_exception : vt;
323  // Use least significant bit of the scheduler pointer to store previous mode.
324  // This is necessary when components compiled with different compilers and/or
325  // TBB versions initialize the
326  my_scheduler = static_cast<scheduler*>((generic_scheduler*)((uintptr_t)s | prev_mode));
327  }
328  else
329 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
330  my_scheduler = s;
331  } else {
332  __TBB_ASSERT_RELEASE( !thread_stack_size, "deferred initialization ignores stack size setting" );
333  }
334 }
335 
337 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
338  uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact;
339  my_scheduler = (scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact);
340 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
341  generic_scheduler* s = static_cast<generic_scheduler*>(my_scheduler);
342  my_scheduler = NULL;
343  __TBB_ASSERT_RELEASE( s, "task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()");
344 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
345  if ( s->master_outermost_level() ) {
346  uintptr_t &vt = s->default_context()->my_version_and_traits;
347  vt = prev_mode & propagation_mode_exact ? vt | task_group_context::exact_exception
348  : vt & ~task_group_context::exact_exception;
349  }
350 #endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
351  return governor::terminate_scheduler(s, blocking);
352 }
353 
355  internal_terminate(/*blocking_terminate=*/false);
356 }
357 
358 #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
359 bool task_scheduler_init::internal_blocking_terminate( bool throwing ) {
360  bool ok = internal_terminate( /*blocking_terminate=*/true );
361 #if TBB_USE_EXCEPTIONS
362  if( throwing && !ok )
363  throw_exception( eid_blocking_thread_join_impossible );
364 #else
365  suppress_unused_warning( throwing );
366 #endif
367  return ok;
368 }
369 #endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
370 
373 }
374 
375 } // namespace tbb
void atomic_do_once(const F &initializer, atomic< do_once_state > &state)
One-time initialization function.
Definition: tbb_misc.h:206
static void auto_terminate(void *scheduler)
The internal routine to undo automatic initialization.
Definition: governor.cpp:213
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
Definition: governor.cpp:145
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Definition: governor.cpp:116
static void release_resources()
Destroy the thread-local storage key and deinitialize RML.
Definition: governor.cpp:79
void destroy_process_mask()
Definition: tbb_misc.h:259
static void initialize_rml_factory()
Definition: governor.cpp:240
static bool UsePrivateRML
Definition: governor.h:61
static rml::tbb_factory theRMLServerFactory
Definition: governor.h:59
static bool terminate_scheduler(generic_scheduler *s, bool blocking)
Processes scheduler termination request (possibly nested) in a master thread.
Definition: governor.cpp:203
static void acquire_resources()
Create key for thread-local storage and initialize RML.
Definition: governor.cpp:67
OPEN_INTERNAL_NAMESPACE bool dynamic_link(const char *, const dynamic_link_descriptor *, size_t, dynamic_link_handle *handle, int)
void const char const char int ITT_FORMAT __itt_group_sync s
static int __TBB_EXPORTED_FUNC default_num_threads()
Returns the number of threads TBB scheduler would create if initialized by default.
Definition: governor.cpp:371
bool internal_terminate(bool blocking)
Definition: governor.cpp:336
__cilk_tbb_stack_op
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
Definition: governor.cpp:124
void __TBB_EXPORTED_FUNC handle_perror(int error_code, const char *aux_info)
Throws std::runtime_error with what() returning error_code description prefixed with aux_info.
Definition: tbb_misc.cpp:74
void dynamic_unlink_all()
The graph class.
static void one_time_init()
Definition: governor.cpp:156
bool gcc_rethrow_exception_broken()
Definition: tbb_misc.cpp:185
Used to form groups of tasks.
Definition: task.h:332
static generic_scheduler * init_scheduler_weak()
Automatic initialization of scheduler in a master thread with default settings without arena.
Definition: governor.cpp:164
bool cpu_has_speculation()
check for transaction support.
Definition: tbb_misc.cpp:217
static basic_tls< uintptr_t > theTLS
TLS for scheduler instances associated with individual threads.
Definition: governor.h:54
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
Definition: market.cpp:296
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:377
static bool is_rethrow_broken
Definition: governor.h:65
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void * data
tbb_server * make_private_server(tbb_client &client)
Factory method called from task.cpp to create a private_server.
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:120
void PrintExtraVersionInfo(const char *category, const char *format,...)
Prints arbitrary extra TBB version information on stderr.
Definition: tbb_misc.cpp:198
void __TBB_EXPORTED_METHOD terminate()
Inverse of method initialize.
Definition: governor.cpp:354
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
CILK_EXPORT __cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk *u, struct __cilk_tbb_stack_op_thunk o)
Work stealing task scheduler.
Definition: scheduler.h:120
Association between a handler name and location of pointer to it.
Definition: dynamic_link.h:60
static unsigned default_num_threads()
Definition: governor.h:81
static const int automatic
Typedef for number of threads that is automatic.
void __TBB_EXPORTED_METHOD initialize(int number_of_threads=automatic)
Ensure that scheduler exists for this thread.
Definition: governor.cpp:303
void set(T value)
Definition: tls.h:56
#define DLD_NOWEAK(s, h)
Definition: dynamic_link.h:57
__cilk_tbb_pfn_unwatch_stacks routine
static generic_scheduler * init_scheduler(int num_threads, stack_size_type stack_size, bool auto_init)
Processes scheduler initialization request (possibly nested) in a master thread.
Definition: governor.cpp:172
static bool initialization_done()
Definition: tbb_main.h:64
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
Definition: scheduler.cpp:1248
const int DYNAMIC_LINK_GLOBAL
Definition: dynamic_link.h:77
static generic_scheduler * tls_scheduler_of(uintptr_t v)
Converts TLS value to the scheduler pointer.
Definition: governor.h:115
static void print_version_info()
Definition: governor.cpp:227
#define __TBB_ASSERT_RELEASE(predicate, message)
Definition: tbb_stddef.h:134
static rml::tbb_server * create_rml_server(rml::tbb_client &)
Definition: governor.cpp:92
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:132
void PrintRMLVersionInfo(void *arg, const char *server_info)
A callback routine to print RML version information on stderr.
Definition: tbb_misc.cpp:209
static uintptr_t tls_value_of(generic_scheduler *s)
Computes the value of the TLS.
Definition: governor.cpp:110
int __cilk_tbb_retcode
__cilk_tbb_pfn_stack_op routine
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
static bool is_speculation_enabled
Definition: governor.h:64
void DoOneTimeInitializations()
Performs thread-safe lazy one-time general TBB initialization.
Definition: tbb_main.cpp:214
std::size_t stack_size_type

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.