LLVM OpenMP* Runtime Library
kmp_taskdeps.cpp
1 /*
2  * kmp_taskdeps.cpp
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 //#define KMP_SUPPORT_GRAPH_OUTPUT 1
17 
18 #include "kmp.h"
19 #include "kmp_io.h"
20 #include "kmp_wait_release.h"
21 
22 #if OMP_40_ENABLED
23 
24 //TODO: Improve memory allocation? keep a list of pre-allocated structures? allocate in blocks? re-use list finished list entries?
25 //TODO: don't use atomic ref counters for stack-allocated nodes.
26 //TODO: find an alternate to atomic refs for heap-allocated nodes?
27 //TODO: Finish graph output support
28 //TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other runtime locks
29 //TODO: Any ITT support needed?
30 
31 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
32 static kmp_int32 kmp_node_id_seed = 0;
33 #endif
34 
35 static void
36 __kmp_init_node ( kmp_depnode_t *node )
37 {
38  node->dn.task = NULL; // set to null initially, it will point to the right task once dependences have been processed
39  node->dn.successors = NULL;
40  __kmp_init_lock(&node->dn.lock);
41  node->dn.nrefs = 1; // init creates the first reference to the node
42 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
43  node->dn.id = KMP_TEST_THEN_INC32(&kmp_node_id_seed);
44 #endif
45 }
46 
47 static inline kmp_depnode_t *
48 __kmp_node_ref ( kmp_depnode_t *node )
49 {
50  KMP_TEST_THEN_INC32(&node->dn.nrefs);
51  return node;
52 }
53 
54 static inline void
55 __kmp_node_deref ( kmp_info_t *thread, kmp_depnode_t *node )
56 {
57  if (!node) return;
58 
59  kmp_int32 n = KMP_TEST_THEN_DEC32(&node->dn.nrefs) - 1;
60  if ( n == 0 ) {
61  KMP_ASSERT(node->dn.nrefs == 0);
62 #if USE_FAST_MEMORY
63  __kmp_fast_free(thread,node);
64 #else
65  __kmp_thread_free(thread,node);
66 #endif
67  }
68 }
69 
70 #define KMP_ACQUIRE_DEPNODE(gtid,n) __kmp_acquire_lock(&(n)->dn.lock,(gtid))
71 #define KMP_RELEASE_DEPNODE(gtid,n) __kmp_release_lock(&(n)->dn.lock,(gtid))
72 
73 static void
74 __kmp_depnode_list_free ( kmp_info_t *thread, kmp_depnode_list *list );
75 
76 static const kmp_int32 kmp_dephash_log2 = 6;
77 static const kmp_int32 kmp_dephash_size = (1 << kmp_dephash_log2);
78 
79 static inline kmp_int32
80 __kmp_dephash_hash ( kmp_intptr_t addr )
81 {
82  //TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) % m_num_sets );
83  return ((addr >> kmp_dephash_log2) ^ addr) % kmp_dephash_size;
84 }
85 
86 static kmp_dephash_t *
87 __kmp_dephash_create ( kmp_info_t *thread )
88 {
89  kmp_dephash_t *h;
90 
91  kmp_int32 size = kmp_dephash_size * sizeof(kmp_dephash_entry_t) + sizeof(kmp_dephash_t);
92 
93 #if USE_FAST_MEMORY
94  h = (kmp_dephash_t *) __kmp_fast_allocate( thread, size );
95 #else
96  h = (kmp_dephash_t *) __kmp_thread_malloc( thread, size );
97 #endif
98 
99 #ifdef KMP_DEBUG
100  h->nelements = 0;
101 #endif
102  h->buckets = (kmp_dephash_entry **)(h+1);
103 
104  for ( kmp_int32 i = 0; i < kmp_dephash_size; i++ )
105  h->buckets[i] = 0;
106 
107  return h;
108 }
109 
110 static void
111 __kmp_dephash_free ( kmp_info_t *thread, kmp_dephash_t *h )
112 {
113  for ( kmp_int32 i=0; i < kmp_dephash_size; i++ ) {
114  if ( h->buckets[i] ) {
115  kmp_dephash_entry_t *next;
116  for ( kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next ) {
117  next = entry->next_in_bucket;
118  __kmp_depnode_list_free(thread,entry->last_ins);
119  __kmp_node_deref(thread,entry->last_out);
120 #if USE_FAST_MEMORY
121  __kmp_fast_free(thread,entry);
122 #else
123  __kmp_thread_free(thread,entry);
124 #endif
125  }
126  }
127  }
128 #if USE_FAST_MEMORY
129  __kmp_fast_free(thread,h);
130 #else
131  __kmp_thread_free(thread,h);
132 #endif
133 }
134 
135 static kmp_dephash_entry *
136 __kmp_dephash_find ( kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr )
137 {
138  kmp_int32 bucket = __kmp_dephash_hash(addr);
139 
140  kmp_dephash_entry_t *entry;
141  for ( entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket )
142  if ( entry->addr == addr ) break;
143 
144  if ( entry == NULL ) {
145  // create entry. This is only done by one thread so no locking required
146 #if USE_FAST_MEMORY
147  entry = (kmp_dephash_entry_t *) __kmp_fast_allocate( thread, sizeof(kmp_dephash_entry_t) );
148 #else
149  entry = (kmp_dephash_entry_t *) __kmp_thread_malloc( thread, sizeof(kmp_dephash_entry_t) );
150 #endif
151  entry->addr = addr;
152  entry->last_out = NULL;
153  entry->last_ins = NULL;
154  entry->next_in_bucket = h->buckets[bucket];
155  h->buckets[bucket] = entry;
156 #ifdef KMP_DEBUG
157  h->nelements++;
158  if ( entry->next_in_bucket ) h->nconflicts++;
159 #endif
160  }
161  return entry;
162 }
163 
164 static kmp_depnode_list_t *
165 __kmp_add_node ( kmp_info_t *thread, kmp_depnode_list_t *list, kmp_depnode_t *node )
166 {
167  kmp_depnode_list_t *new_head;
168 
169 #if USE_FAST_MEMORY
170  new_head = (kmp_depnode_list_t *) __kmp_fast_allocate(thread,sizeof(kmp_depnode_list_t));
171 #else
172  new_head = (kmp_depnode_list_t *) __kmp_thread_malloc(thread,sizeof(kmp_depnode_list_t));
173 #endif
174 
175  new_head->node = __kmp_node_ref(node);
176  new_head->next = list;
177 
178  return new_head;
179 }
180 
181 static void
182 __kmp_depnode_list_free ( kmp_info_t *thread, kmp_depnode_list *list )
183 {
184  kmp_depnode_list *next;
185 
186  for ( ; list ; list = next ) {
187  next = list->next;
188 
189  __kmp_node_deref(thread,list->node);
190 #if USE_FAST_MEMORY
191  __kmp_fast_free(thread,list);
192 #else
193  __kmp_thread_free(thread,list);
194 #endif
195  }
196 }
197 
198 static inline void
199 __kmp_track_dependence ( kmp_depnode_t *source, kmp_depnode_t *sink )
200 {
201 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
202  kmp_taskdata_t * task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
203  kmp_taskdata_t * task_sink = KMP_TASK_TO_TASKDATA(sink->dn.task); // this can be NULL when if(0) ...
204 
205  __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id, task_source->td_ident->psource, sink->dn.id, task_sink->td_ident->psource);
206 #endif
207 }
208 
209 template< bool filter >
210 static inline kmp_int32
211 __kmp_process_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
212  bool dep_barrier,kmp_int32 ndeps, kmp_depend_info_t *dep_list)
213 {
214  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d depencies : dep_barrier = %d\n", filter, gtid, ndeps, dep_barrier ) );
215 
216  kmp_info_t *thread = __kmp_threads[ gtid ];
217  kmp_int32 npredecessors=0;
218  for ( kmp_int32 i = 0; i < ndeps ; i++ ) {
219  const kmp_depend_info_t * dep = &dep_list[i];
220 
221  KMP_DEBUG_ASSERT(dep->flags.in);
222 
223  if ( filter && dep->base_addr == 0 ) continue; // skip filtered entries
224 
225  kmp_dephash_entry_t *info = __kmp_dephash_find(thread,hash,dep->base_addr);
226  kmp_depnode_t *last_out = info->last_out;
227 
228  if ( dep->flags.out && info->last_ins ) {
229  for ( kmp_depnode_list_t * p = info->last_ins; p; p = p->next ) {
230  kmp_depnode_t * indep = p->node;
231  if ( indep->dn.task ) {
232  KMP_ACQUIRE_DEPNODE(gtid,indep);
233  if ( indep->dn.task ) {
234  __kmp_track_dependence(indep,node);
235  indep->dn.successors = __kmp_add_node(thread, indep->dn.successors, node);
236  KA_TRACE(40,("__kmp_process_deps<%d>: T#%d adding dependence from %p to %p",
237  filter,gtid, KMP_TASK_TO_TASKDATA(indep->dn.task), KMP_TASK_TO_TASKDATA(node->dn.task)));
238  npredecessors++;
239  }
240  KMP_RELEASE_DEPNODE(gtid,indep);
241  }
242  }
243 
244  __kmp_depnode_list_free(thread,info->last_ins);
245  info->last_ins = NULL;
246 
247  } else if ( last_out && last_out->dn.task ) {
248  KMP_ACQUIRE_DEPNODE(gtid,last_out);
249  if ( last_out->dn.task ) {
250  __kmp_track_dependence(last_out,node);
251  last_out->dn.successors = __kmp_add_node(thread, last_out->dn.successors, node);
252  KA_TRACE(40,("__kmp_process_deps<%d>: T#%d adding dependence from %p to %p",
253  filter,gtid, KMP_TASK_TO_TASKDATA(last_out->dn.task), KMP_TASK_TO_TASKDATA(node->dn.task)));
254 
255  npredecessors++;
256  }
257  KMP_RELEASE_DEPNODE(gtid,last_out);
258  }
259 
260  if ( dep_barrier ) {
261  // if this is a sync point in the serial sequence, then the previous outputs are guaranteed to be completed after
262  // the execution of this task so the previous output nodes can be cleared.
263  __kmp_node_deref(thread,last_out);
264  info->last_out = NULL;
265  } else {
266  if ( dep->flags.out ) {
267  __kmp_node_deref(thread,last_out);
268  info->last_out = __kmp_node_ref(node);
269  } else
270  info->last_ins = __kmp_add_node(thread, info->last_ins, node);
271  }
272 
273  }
274 
275  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter, gtid, npredecessors ) );
276 
277  return npredecessors;
278 }
279 
280 #define NO_DEP_BARRIER (false)
281 #define DEP_BARRIER (true)
282 
283 // returns true if the task has any outstanding dependence
284 static bool
285 __kmp_check_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_task_t *task, kmp_dephash_t *hash, bool dep_barrier,
286  kmp_int32 ndeps, kmp_depend_info_t *dep_list,
287  kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
288 {
289  int i;
290  kmp_taskdata_t * taskdata;
291 
292  taskdata = KMP_TASK_TO_TASKDATA(task);
293  KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d possibly aliased dependencies, %d non-aliased depedencies : dep_barrier=%d .\n", gtid, taskdata, ndeps, ndeps_noalias, dep_barrier ) );
294 
295  // Filter deps in dep_list
296  // TODO: Different algorithm for large dep_list ( > 10 ? )
297  for ( i = 0; i < ndeps; i ++ ) {
298  if ( dep_list[i].base_addr != 0 )
299  for ( int j = i+1; j < ndeps; j++ )
300  if ( dep_list[i].base_addr == dep_list[j].base_addr ) {
301  dep_list[i].flags.in |= dep_list[j].flags.in;
302  dep_list[i].flags.out |= dep_list[j].flags.out;
303  dep_list[j].base_addr = 0; // Mark j element as void
304  }
305  }
306 
307  // doesn't need to be atomic as no other thread is going to be accessing this node just yet
308  // npredecessors is set -1 to ensure that none of the releasing tasks queues this task before we have finished processing all the dependencies
309  node->dn.npredecessors = -1;
310 
311  // used to pack all npredecessors additions into a single atomic operation at the end
312  int npredecessors;
313 
314  npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps, dep_list);
315  npredecessors += __kmp_process_deps<false>(gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list);
316 
317  node->dn.task = task;
318  KMP_MB();
319 
320  // Account for our initial fake value
321  npredecessors++;
322 
323  // Update predecessors and obtain current value to check if there are still any outstandig dependences (some tasks may have finished while we processed the dependences)
324  npredecessors = KMP_TEST_THEN_ADD32(&node->dn.npredecessors, npredecessors) + npredecessors;
325 
326  KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n", gtid, npredecessors, taskdata ) );
327 
328  // beyond this point the task could be queued (and executed) by a releasing task...
329  return npredecessors > 0 ? true : false;
330 }
331 
332 void
333 __kmp_release_deps ( kmp_int32 gtid, kmp_taskdata_t *task )
334 {
335  kmp_info_t *thread = __kmp_threads[ gtid ];
336  kmp_depnode_t *node = task->td_depnode;
337 
338  if ( task->td_dephash ) {
339  KA_TRACE(40, ("__kmp_realease_deps: T#%d freeing dependencies hash of task %p.\n", gtid, task ) );
340  __kmp_dephash_free(thread,task->td_dephash);
341  }
342 
343  if ( !node ) return;
344 
345  KA_TRACE(20, ("__kmp_realease_deps: T#%d notifying succesors of task %p.\n", gtid, task ) );
346 
347  KMP_ACQUIRE_DEPNODE(gtid,node);
348  node->dn.task = NULL; // mark this task as finished, so no new dependencies are generated
349  KMP_RELEASE_DEPNODE(gtid,node);
350 
351  kmp_depnode_list_t *next;
352  for ( kmp_depnode_list_t *p = node->dn.successors; p; p = next ) {
353  kmp_depnode_t *successor = p->node;
354  kmp_int32 npredecessors = KMP_TEST_THEN_DEC32(&successor->dn.npredecessors) - 1;
355 
356  // successor task can be NULL for wait_depends or because deps are still being processed
357  if ( npredecessors == 0 ) {
358  KMP_MB();
359  if ( successor->dn.task ) {
360  KA_TRACE(20, ("__kmp_realease_deps: T#%d successor %p of %p scheduled for execution.\n", gtid, successor->dn.task, task ) );
361  __kmp_omp_task(gtid,successor->dn.task,false);
362  }
363  }
364 
365  next = p->next;
366  __kmp_node_deref(thread,p->node);
367 #if USE_FAST_MEMORY
368  __kmp_fast_free(thread,p);
369 #else
370  __kmp_thread_free(thread,p);
371 #endif
372  }
373 
374  __kmp_node_deref(thread,node);
375 
376  KA_TRACE(20, ("__kmp_realease_deps: T#%d all successors of %p notified of completation\n", gtid, task ) );
377 }
378 
393 kmp_int32
394 __kmpc_omp_task_with_deps( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task,
395  kmp_int32 ndeps, kmp_depend_info_t *dep_list,
396  kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
397 {
398 
399  kmp_taskdata_t * new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
400  KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n",
401  gtid, loc_ref, new_taskdata ) );
402 
403  kmp_info_t *thread = __kmp_threads[ gtid ];
404  kmp_taskdata_t * current_task = thread->th.th_current_task;
405 
406  bool serial = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
407 #if OMP_41_ENABLED
408  serial = serial && !(new_taskdata->td_flags.proxy == TASK_PROXY);
409 #endif
410 
411  if ( !serial && ( ndeps > 0 || ndeps_noalias > 0 )) {
412  /* if no dependencies have been tracked yet, create the dependence hash */
413  if ( current_task->td_dephash == NULL )
414  current_task->td_dephash = __kmp_dephash_create(thread);
415 
416 #if USE_FAST_MEMORY
417  kmp_depnode_t *node = (kmp_depnode_t *) __kmp_fast_allocate(thread,sizeof(kmp_depnode_t));
418 #else
419  kmp_depnode_t *node = (kmp_depnode_t *) __kmp_thread_malloc(thread,sizeof(kmp_depnode_t));
420 #endif
421 
422  __kmp_init_node(node);
423  new_taskdata->td_depnode = node;
424 
425  if ( __kmp_check_deps( gtid, node, new_task, current_task->td_dephash, NO_DEP_BARRIER,
426  ndeps, dep_list, ndeps_noalias,noalias_dep_list ) ) {
427  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking dependencies: "
428  "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref,
429  new_taskdata ) );
430  return TASK_CURRENT_NOT_QUEUED;
431  }
432  } else {
433 #if OMP_41_ENABLED
434  kmp_task_team_t * task_team = thread->th.th_task_team;
435  if ( task_team && task_team->tt.tt_found_proxy_tasks )
436  __kmpc_omp_wait_deps ( loc_ref, gtid, ndeps, dep_list, ndeps_noalias, noalias_dep_list );
437  else
438 #endif
439  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies for task (serialized)"
440  "loc=%p task=%p\n", gtid, loc_ref, new_taskdata ) );
441  }
442 
443  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking dependencies : "
444  "loc=%p task=%p, transferring to __kmpc_omp_task\n", gtid, loc_ref,
445  new_taskdata ) );
446 
447  return __kmpc_omp_task(loc_ref,gtid,new_task);
448 }
449 
461 void
462 __kmpc_omp_wait_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
463  kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
464 {
465  KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref) );
466 
467  if ( ndeps == 0 && ndeps_noalias == 0 ) {
468  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to wait upon : loc=%p\n", gtid, loc_ref) );
469  return;
470  }
471 
472  kmp_info_t *thread = __kmp_threads[ gtid ];
473  kmp_taskdata_t * current_task = thread->th.th_current_task;
474 
475  // We can return immediately as:
476  // - dependences are not computed in serial teams (except if we have proxy tasks)
477  // - if the dephash is not yet created it means we have nothing to wait for
478  bool ignore = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
479 #if OMP_41_ENABLED
480  ignore = ignore && thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
481 #endif
482  ignore = ignore || current_task->td_dephash == NULL;
483 
484  if ( ignore ) {
485  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking dependencies : loc=%p\n", gtid, loc_ref) );
486  return;
487  }
488 
489  kmp_depnode_t node;
490  __kmp_init_node(&node);
491 
492  if (!__kmp_check_deps( gtid, &node, NULL, current_task->td_dephash, DEP_BARRIER,
493  ndeps, dep_list, ndeps_noalias, noalias_dep_list )) {
494  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking dependencies : loc=%p\n", gtid, loc_ref) );
495  return;
496  }
497 
498  int thread_finished = FALSE;
499  kmp_flag_32 flag((volatile kmp_uint32 *)&(node.dn.npredecessors), 0U);
500  while ( node.dn.npredecessors > 0 ) {
501  flag.execute_tasks(thread, gtid, FALSE, &thread_finished,
502 #if USE_ITT_BUILD
503  NULL,
504 #endif
505  __kmp_task_stealing_constraint );
506  }
507 
508  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n", gtid, loc_ref) );
509 }
510 
511 #endif /* OMP_40_ENABLED */
512 
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
Definition: kmp.h:198