28 #include "kmp_error.h"
29 #include "kmp_stats.h"
33 #include "ompt-specific.h"
37 template<
typename T >
43 struct i_maxmin< int > {
44 static const int mx = 0x7fffffff;
45 static const int mn = 0x80000000;
48 struct i_maxmin< unsigned int > {
49 static const unsigned int mx = 0xffffffff;
50 static const unsigned int mn = 0x00000000;
53 struct i_maxmin< long long > {
54 static const long long mx = 0x7fffffffffffffffLL;
55 static const long long mn = 0x8000000000000000LL;
58 struct i_maxmin< unsigned long long > {
59 static const unsigned long long mx = 0xffffffffffffffffLL;
60 static const unsigned long long mn = 0x0000000000000000LL;
66 char const * traits_t< int >::spec =
"d";
67 char const * traits_t< unsigned int >::spec =
"u";
68 char const * traits_t< long long >::spec =
"lld";
69 char const * traits_t< unsigned long long >::spec =
"llu";
73 template<
typename T >
75 __kmp_for_static_init(
82 typename traits_t< T >::signed_t *pstride,
83 typename traits_t< T >::signed_t incr,
84 typename traits_t< T >::signed_t chunk
87 typedef typename traits_t< T >::unsigned_t UT;
88 typedef typename traits_t< T >::signed_t ST;
90 register kmp_int32 gtid = global_tid;
91 register kmp_uint32 tid;
92 register kmp_uint32 nth;
93 register UT trip_count;
94 register kmp_team_t *team;
95 register kmp_info_t *th = __kmp_threads[ gtid ];
97 #if OMPT_SUPPORT && OMPT_TRACE
98 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
99 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
102 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pstride );
103 KE_TRACE( 10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
108 buff = __kmp_str_format(
109 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," \
110 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
111 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
112 traits_t< ST >::spec, traits_t< ST >::spec, traits_t< T >::spec );
113 KD_TRACE(100, ( buff, global_tid, schedtype, *plastiter,
114 *plower, *pupper, *pstride, incr, chunk ) );
115 __kmp_str_free( &buff );
119 if ( __kmp_env_consistency_check ) {
120 __kmp_push_workshare( global_tid, ct_pdo, loc );
122 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
126 if ( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
127 if( plastiter != NULL )
137 buff = __kmp_str_format(
138 "__kmpc_for_static_init:(ZERO TRIP) liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>, loc = %%s\n",
139 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
140 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride, loc->
psource ) );
141 __kmp_str_free( &buff );
144 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
146 #if OMPT_SUPPORT && OMPT_TRACE
147 if ((ompt_status == ompt_status_track_callback) &&
148 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
149 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
150 team_info->parallel_id, task_info->task_id,
151 team_info->microtask);
161 tid = th->th.th_team->t.t_master_tid;
162 team = th->th.th_team->t.t_parent;
166 tid = __kmp_tid_from_gtid( global_tid );
167 team = th->th.th_team;
171 if ( team -> t.t_serialized ) {
173 if( plastiter != NULL )
176 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
182 buff = __kmp_str_format(
183 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
184 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
185 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
186 __kmp_str_free( &buff );
189 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
191 #if OMPT_SUPPORT && OMPT_TRACE
192 if ((ompt_status == ompt_status_track_callback) &&
193 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
194 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
195 team_info->parallel_id, task_info->task_id,
196 team_info->microtask);
201 nth = team->t.t_nproc;
203 if( plastiter != NULL )
205 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
210 buff = __kmp_str_format(
211 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
212 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
213 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
214 __kmp_str_free( &buff );
217 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
219 #if OMPT_SUPPORT && OMPT_TRACE
220 if ((ompt_status == ompt_status_track_callback) &&
221 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
222 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
223 team_info->parallel_id, task_info->task_id,
224 team_info->microtask);
232 trip_count = *pupper - *plower + 1;
233 }
else if (incr == -1) {
234 trip_count = *plower - *pupper + 1;
237 trip_count = (*pupper - *plower) / incr + 1;
239 trip_count = (*plower - *pupper) / ( -incr ) + 1;
243 if ( __kmp_env_consistency_check ) {
245 if ( trip_count == 0 && *pupper != *plower ) {
246 __kmp_error_construct( kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo, loc );
251 switch ( schedtype ) {
254 if ( trip_count < nth ) {
256 __kmp_static == kmp_sch_static_greedy || \
257 __kmp_static == kmp_sch_static_balanced
259 if ( tid < trip_count ) {
260 *pupper = *plower = *plower + tid * incr;
262 *plower = *pupper + incr;
264 if( plastiter != NULL )
265 *plastiter = ( tid == trip_count - 1 );
267 if ( __kmp_static == kmp_sch_static_balanced ) {
268 register UT small_chunk = trip_count / nth;
269 register UT extras = trip_count % nth;
270 *plower += incr * ( tid * small_chunk + ( tid < extras ? tid : extras ) );
271 *pupper = *plower + small_chunk * incr - ( tid < extras ? 0 : incr );
272 if( plastiter != NULL )
273 *plastiter = ( tid == nth - 1 );
275 register T big_chunk_inc_count = ( trip_count/nth +
276 ( ( trip_count % nth ) ? 1 : 0) ) * incr;
277 register T old_upper = *pupper;
279 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
282 *plower += tid * big_chunk_inc_count;
283 *pupper = *plower + big_chunk_inc_count - incr;
285 if( *pupper < *plower )
286 *pupper = i_maxmin< T >::mx;
287 if( plastiter != NULL )
288 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
289 if ( *pupper > old_upper ) *pupper = old_upper;
291 if( *pupper > *plower )
292 *pupper = i_maxmin< T >::mn;
293 if( plastiter != NULL )
294 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
295 if ( *pupper < old_upper ) *pupper = old_upper;
301 case kmp_sch_static_chunked:
308 *pstride = span * nth;
309 *plower = *plower + (span * tid);
310 *pupper = *plower + span - incr;
311 if( plastiter != NULL )
312 *plastiter = (tid == ((trip_count - 1)/( UT )chunk) % nth);
316 KMP_ASSERT2( 0,
"__kmpc_for_static_init: unknown scheduling type" );
322 if ( KMP_MASTER_TID(tid) && __itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
324 th->th.th_teams_microtask == NULL &&
326 team->t.t_active_level == 1 )
328 kmp_uint64 cur_chunk = chunk;
331 cur_chunk = trip_count / nth + ( ( trip_count % nth ) ? 1 : 0);
334 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
341 buff = __kmp_str_format(
342 "__kmpc_for_static_init: liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>\n",
343 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
344 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
345 __kmp_str_free( &buff );
348 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
350 #if OMPT_SUPPORT && OMPT_TRACE
351 if ((ompt_status == ompt_status_track_callback) &&
352 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
353 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
354 team_info->parallel_id, task_info->task_id, team_info->microtask);
361 template<
typename T >
363 __kmp_dist_for_static_init(
367 kmp_int32 *plastiter,
371 typename traits_t< T >::signed_t *pstride,
372 typename traits_t< T >::signed_t incr,
373 typename traits_t< T >::signed_t chunk
376 typedef typename traits_t< T >::unsigned_t UT;
377 typedef typename traits_t< T >::signed_t ST;
378 register kmp_uint32 tid;
379 register kmp_uint32 nth;
380 register kmp_uint32 team_id;
381 register kmp_uint32 nteams;
382 register UT trip_count;
383 register kmp_team_t *team;
386 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pupperDist && pstride );
387 KE_TRACE( 10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
392 buff = __kmp_str_format(
393 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "\
394 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
395 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
396 traits_t< ST >::spec, traits_t< T >::spec );
397 KD_TRACE(100, ( buff, gtid, schedule, *plastiter,
398 *plower, *pupper, incr, chunk ) );
399 __kmp_str_free( &buff );
403 if( __kmp_env_consistency_check ) {
404 __kmp_push_workshare( gtid, ct_pdo, loc );
406 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
408 if( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
418 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
421 tid = __kmp_tid_from_gtid( gtid );
422 th = __kmp_threads[gtid];
423 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
424 nth = th->th.th_team_nproc;
425 team = th->th.th_team;
427 nteams = th->th.th_teams_size.nteams;
429 team_id = team->t.t_master_tid;
430 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
434 trip_count = *pupper - *plower + 1;
435 }
else if(incr == -1) {
436 trip_count = *plower - *pupper + 1;
438 trip_count = (ST)(*pupper - *plower) / incr + 1;
440 *pstride = *pupper - *plower;
441 if( trip_count <= nteams ) {
443 __kmp_static == kmp_sch_static_greedy || \
444 __kmp_static == kmp_sch_static_balanced
447 if( team_id < trip_count && tid == 0 ) {
448 *pupper = *pupperDist = *plower = *plower + team_id * incr;
450 *pupperDist = *pupper;
451 *plower = *pupper + incr;
453 if( plastiter != NULL )
454 *plastiter = ( tid == 0 && team_id == trip_count - 1 );
457 if( __kmp_static == kmp_sch_static_balanced ) {
458 register UT chunkD = trip_count / nteams;
459 register UT extras = trip_count % nteams;
460 *plower += incr * ( team_id * chunkD + ( team_id < extras ? team_id : extras ) );
461 *pupperDist = *plower + chunkD * incr - ( team_id < extras ? 0 : incr );
462 if( plastiter != NULL )
463 *plastiter = ( team_id == nteams - 1 );
465 register T chunk_inc_count =
466 ( trip_count / nteams + ( ( trip_count % nteams ) ? 1 : 0) ) * incr;
467 register T upper = *pupper;
468 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
470 *plower += team_id * chunk_inc_count;
471 *pupperDist = *plower + chunk_inc_count - incr;
474 if( *pupperDist < *plower )
475 *pupperDist = i_maxmin< T >::mx;
476 if( plastiter != NULL )
477 *plastiter = *plower <= upper && *pupperDist > upper - incr;
478 if( *pupperDist > upper )
480 if( *plower > *pupperDist ) {
481 *pupper = *pupperDist;
485 if( *pupperDist > *plower )
486 *pupperDist = i_maxmin< T >::mn;
487 if( plastiter != NULL )
488 *plastiter = *plower >= upper && *pupperDist < upper - incr;
489 if( *pupperDist < upper )
491 if( *plower < *pupperDist ) {
492 *pupper = *pupperDist;
500 trip_count = *pupperDist - *plower + 1;
501 }
else if(incr == -1) {
502 trip_count = *plower - *pupperDist + 1;
504 trip_count = (ST)(*pupperDist - *plower) / incr + 1;
506 KMP_DEBUG_ASSERT( trip_count );
510 if( trip_count <= nth ) {
512 __kmp_static == kmp_sch_static_greedy || \
513 __kmp_static == kmp_sch_static_balanced
515 if( tid < trip_count )
516 *pupper = *plower = *plower + tid * incr;
518 *plower = *pupper + incr;
519 if( plastiter != NULL )
520 if( *plastiter != 0 && !( tid == trip_count - 1 ) )
523 if( __kmp_static == kmp_sch_static_balanced ) {
524 register UT chunkL = trip_count / nth;
525 register UT extras = trip_count % nth;
526 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
527 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
528 if( plastiter != NULL )
529 if( *plastiter != 0 && !( tid == nth - 1 ) )
532 register T chunk_inc_count =
533 ( trip_count / nth + ( ( trip_count % nth ) ? 1 : 0) ) * incr;
534 register T upper = *pupperDist;
535 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
537 *plower += tid * chunk_inc_count;
538 *pupper = *plower + chunk_inc_count - incr;
540 if( *pupper < *plower )
541 *pupper = i_maxmin< T >::mx;
542 if( plastiter != NULL )
543 if( *plastiter != 0 && !(*plower <= upper && *pupper > upper - incr) )
545 if( *pupper > upper )
548 if( *pupper > *plower )
549 *pupper = i_maxmin< T >::mn;
550 if( plastiter != NULL )
551 if( *plastiter != 0 && !(*plower >= upper && *pupper < upper - incr) )
553 if( *pupper < upper )
560 case kmp_sch_static_chunked:
566 *pstride = span * nth;
567 *plower = *plower + (span * tid);
568 *pupper = *plower + span - incr;
569 if( plastiter != NULL )
570 if( *plastiter != 0 && !(tid == ((trip_count - 1) / ( UT )chunk) % nth) )
575 KMP_ASSERT2( 0,
"__kmpc_dist_for_static_init: unknown loop scheduling type" );
584 buff = __kmp_str_format(
585 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "\
586 "stride=%%%s signed?<%s>\n",
587 traits_t< T >::spec, traits_t< T >::spec, traits_t< T >::spec,
588 traits_t< ST >::spec, traits_t< T >::spec );
589 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pupperDist, *pstride ) );
590 __kmp_str_free( &buff );
593 KE_TRACE( 10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid ) );
597 template<
typename T >
599 __kmp_team_static_init(
605 typename traits_t< T >::signed_t *p_st,
606 typename traits_t< T >::signed_t incr,
607 typename traits_t< T >::signed_t chunk
614 typedef typename traits_t< T >::unsigned_t UT;
615 typedef typename traits_t< T >::signed_t ST;
625 KMP_DEBUG_ASSERT( p_last && p_lb && p_ub && p_st );
626 KE_TRACE( 10, (
"__kmp_team_static_init called (%d)\n", gtid));
631 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "\
632 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
633 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
634 traits_t< ST >::spec, traits_t< T >::spec );
635 KD_TRACE(100, ( buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
636 __kmp_str_free( &buff );
642 if( __kmp_env_consistency_check ) {
644 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
646 if( incr > 0 ? (upper < lower) : (lower < upper) ) {
656 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
659 th = __kmp_threads[gtid];
660 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
661 team = th->th.th_team;
663 nteams = th->th.th_teams_size.nteams;
665 team_id = team->t.t_master_tid;
666 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
670 trip_count = upper - lower + 1;
671 }
else if(incr == -1) {
672 trip_count = lower - upper + 1;
674 trip_count = (ST)(upper - lower) / incr + 1;
679 *p_st = span * nteams;
680 *p_lb = lower + (span * team_id);
681 *p_ub = *p_lb + span - incr;
682 if ( p_last != NULL )
683 *p_last = (team_id == ((trip_count - 1)/(UT)chunk) % nteams);
687 *p_ub = i_maxmin< T >::mx;
692 *p_ub = i_maxmin< T >::mn;
700 buff = __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "\
701 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
702 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
703 traits_t< ST >::spec );
704 KD_TRACE(100, ( buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
705 __kmp_str_free( &buff );
735 kmp_int32 *plower, kmp_int32 *pupper,
736 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
738 __kmp_for_static_init< kmp_int32 >(
739 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
747 kmp_uint32 *plower, kmp_uint32 *pupper,
748 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
750 __kmp_for_static_init< kmp_uint32 >(
751 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
759 kmp_int64 *plower, kmp_int64 *pupper,
760 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
762 __kmp_for_static_init< kmp_int64 >(
763 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
771 kmp_uint64 *plower, kmp_uint64 *pupper,
772 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
774 __kmp_for_static_init< kmp_uint64 >(
775 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
805 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
806 kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD,
807 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
809 __kmp_dist_for_static_init< kmp_int32 >(
810 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
818 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
819 kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD,
820 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
822 __kmp_dist_for_static_init< kmp_uint32 >(
823 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
831 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
832 kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD,
833 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
835 __kmp_dist_for_static_init< kmp_int64 >(
836 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
844 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
845 kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD,
846 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
848 __kmp_dist_for_static_init< kmp_uint64 >(
849 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
882 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
883 kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
885 KMP_DEBUG_ASSERT( __kmp_init_serial );
886 __kmp_team_static_init< kmp_int32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
894 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
895 kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
897 KMP_DEBUG_ASSERT( __kmp_init_serial );
898 __kmp_team_static_init< kmp_uint32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
906 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
907 kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
909 KMP_DEBUG_ASSERT( __kmp_init_serial );
910 __kmp_team_static_init< kmp_int64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
918 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
919 kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
921 KMP_DEBUG_ASSERT( __kmp_init_serial );
922 __kmp_team_static_init< kmp_uint64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)