19 #include "kmp_error.h"
27 static char const * cons_text_c[] = {
31 "\"ordered\" work-sharing",
46 #define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource )
48 #define PUSH_MSG( ct, ident ) \
49 "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) )
50 #define POP_MSG( p ) \
51 "\tpopping off stack: %s (%s)\n", \
52 cons_text_c[ (p)->stack_data[ tos ].type ], \
53 get_src( (p)->stack_data[ tos ].ident )
55 static int const cons_text_c_num =
sizeof( cons_text_c ) /
sizeof(
char const * );
62 __kmp_check_null_func(
void )
68 __kmp_expand_cons_stack(
int gtid,
struct cons_header *p )
75 __kmp_check_null_func();
77 KE_TRACE( 10, (
"expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );
81 p->stack_size = (p->stack_size * 2) + 100;
84 p->stack_data = (
struct cons_data *) __kmp_allocate(
sizeof(
struct cons_data ) * (p->stack_size+1) );
86 for (i = p->stack_top; i >= 0; --i)
87 p->stack_data[i] = d[i];
98 char const * cons = NULL;
102 kmp_str_buf_t buffer;
104 __kmp_str_buf_init( & buffer );
105 if ( 0 < ct && ct < cons_text_c_num ) {
106 cons = cons_text_c[ ct ];
108 KMP_DEBUG_ASSERT( 0 );
110 if ( ident != NULL && ident->
psource != NULL ) {
112 __kmp_str_buf_print( & buffer,
"%s", ident->
psource );
115 __kmp_str_split( tail,
';', NULL, & tail );
116 __kmp_str_split( tail,
';', & file, & tail );
117 __kmp_str_split( tail,
';', & func, & tail );
118 __kmp_str_split( tail,
';', & line, & tail );
120 prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line );
121 __kmp_str_buf_free( & buffer );
131 __kmp_error_construct(
136 char const * construct = __kmp_pragma( ct, ident );
137 __kmp_msg( kmp_ms_fatal, __kmp_msg_format(
id, construct ), __kmp_msg_null );
138 KMP_INTERNAL_FREE( (
void *) construct );
142 __kmp_error_construct2(
146 struct cons_data
const * cons
148 char const * construct1 = __kmp_pragma( ct, ident );
149 char const * construct2 = __kmp_pragma( cons->type, cons->ident );
150 __kmp_msg( kmp_ms_fatal, __kmp_msg_format(
id, construct1, construct2 ), __kmp_msg_null );
151 KMP_INTERNAL_FREE( (
void *) construct1 );
152 KMP_INTERNAL_FREE( (
void *) construct2 );
157 __kmp_allocate_cons_stack(
int gtid )
159 struct cons_header *p;
163 __kmp_check_null_func();
165 KE_TRACE( 10, (
"allocate cons_stack (%d)\n", gtid ) );
166 p = (
struct cons_header *) __kmp_allocate(
sizeof(
struct cons_header ) );
167 p->p_top = p->w_top = p->s_top = 0;
168 p->stack_data = (
struct cons_data *) __kmp_allocate(
sizeof(
struct cons_data ) * (MIN_STACK+1) );
169 p->stack_size = MIN_STACK;
171 p->stack_data[ 0 ].type = ct_none;
172 p->stack_data[ 0 ].prev = 0;
173 p->stack_data[ 0 ].ident = NULL;
178 __kmp_free_cons_stack(
void * ptr ) {
179 struct cons_header * p = (
struct cons_header *) ptr;
181 if ( p->stack_data != NULL ) {
182 __kmp_free( p->stack_data );
183 p->stack_data = NULL;
192 dump_cons_stack(
int gtid,
struct cons_header * p ) {
194 int tos = p->stack_top;
195 kmp_str_buf_t buffer;
196 __kmp_str_buf_init( & buffer );
197 __kmp_str_buf_print( & buffer,
"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
198 __kmp_str_buf_print( & buffer,
"Begin construct stack with %d items for thread %d\n", tos, gtid );
199 __kmp_str_buf_print( & buffer,
" stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top );
200 for ( i = tos; i > 0; i-- ) {
201 struct cons_data * c = & ( p->stack_data[ i ] );
202 __kmp_str_buf_print( & buffer,
" stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name );
204 __kmp_str_buf_print( & buffer,
"End construct stack for thread %d\n", gtid );
205 __kmp_str_buf_print( & buffer,
"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
206 __kmp_debug_printf(
"%s", buffer.str );
207 __kmp_str_buf_free( & buffer );
212 __kmp_push_parallel(
int gtid,
ident_t const * ident )
215 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
217 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
218 KE_TRACE( 10, (
"__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
219 KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
220 if ( p->stack_top >= p->stack_size ) {
221 __kmp_expand_cons_stack( gtid, p );
223 tos = ++p->stack_top;
224 p->stack_data[ tos ].type = ct_parallel;
225 p->stack_data[ tos ].prev = p->p_top;
226 p->stack_data[ tos ].ident = ident;
227 p->stack_data[ tos ].name = NULL;
229 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
233 __kmp_check_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
235 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
237 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
238 KE_TRACE( 10, (
"__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
241 if ( p->stack_top >= p->stack_size ) {
242 __kmp_expand_cons_stack( gtid, p );
244 if ( p->w_top > p->p_top &&
245 !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
247 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
249 if ( p->s_top > p->p_top ) {
251 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
256 __kmp_push_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
259 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
260 KE_TRACE( 10, (
"__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
261 __kmp_check_workshare( gtid, ct, ident );
262 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
263 tos = ++p->stack_top;
264 p->stack_data[ tos ].type = ct;
265 p->stack_data[ tos ].prev = p->w_top;
266 p->stack_data[ tos ].ident = ident;
267 p->stack_data[ tos ].name = NULL;
269 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
273 #if KMP_USE_DYNAMIC_LOCK
274 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
276 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
279 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
281 KE_TRACE( 10, (
"__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );
283 if (p->stack_top >= p->stack_size)
284 __kmp_expand_cons_stack( gtid, p );
286 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
287 if (p->w_top <= p->p_top) {
289 #ifdef BUILD_PARALLEL_ORDERED
291 KMP_ASSERT( ct == ct_ordered_in_parallel );
293 __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
297 if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
298 if (p->stack_data[ p->w_top ].type == ct_taskq) {
299 __kmp_error_construct2(
300 kmp_i18n_msg_CnsNotInTaskConstruct,
302 & p->stack_data[ p->w_top ]
305 __kmp_error_construct2(
306 kmp_i18n_msg_CnsNoOrderedClause,
308 & p->stack_data[ p->w_top ]
313 if (p->s_top > p->p_top && p->s_top > p->w_top) {
315 int index = p->s_top;
316 enum cons_type stack_type;
318 stack_type = p->stack_data[ index ].type;
320 if (stack_type == ct_critical ||
321 ( ( stack_type == ct_ordered_in_parallel ||
322 stack_type == ct_ordered_in_pdo ||
323 stack_type == ct_ordered_in_taskq ) &&
324 p->stack_data[ index ].ident != NULL &&
327 __kmp_error_construct2(
328 kmp_i18n_msg_CnsInvalidNesting,
330 & p->stack_data[ index ]
334 }
else if ( ct == ct_critical ) {
335 #if KMP_USE_DYNAMIC_LOCK
336 if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) {
338 if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) {
340 int index = p->s_top;
341 struct cons_data cons = { NULL, ct_critical, 0, NULL };
343 while ( index != 0 && p->stack_data[ index ].name != lck ) {
344 index = p->stack_data[ index ].prev;
348 cons = p->stack_data[ index ];
351 __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
353 }
else if ( ct == ct_master || ct == ct_reduce ) {
354 if (p->w_top > p->p_top) {
356 __kmp_error_construct2(
357 kmp_i18n_msg_CnsInvalidNesting,
359 & p->stack_data[ p->w_top ]
362 if (ct == ct_reduce && p->s_top > p->p_top) {
364 __kmp_error_construct2(
365 kmp_i18n_msg_CnsInvalidNesting,
367 & p->stack_data[ p->s_top ]
374 #if KMP_USE_DYNAMIC_LOCK
375 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
377 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
381 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
383 KMP_ASSERT( gtid == __kmp_get_gtid() );
384 KE_TRACE( 10, (
"__kmp_push_sync (gtid=%d)\n", gtid ) );
385 #if KMP_USE_DYNAMIC_LOCK
386 __kmp_check_sync( gtid, ct, ident, lck, seq );
388 __kmp_check_sync( gtid, ct, ident, lck );
390 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
391 tos = ++ p->stack_top;
392 p->stack_data[ tos ].type = ct;
393 p->stack_data[ tos ].prev = p->s_top;
394 p->stack_data[ tos ].ident = ident;
395 p->stack_data[ tos ].name = lck;
397 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
403 __kmp_pop_parallel(
int gtid,
ident_t const * ident )
406 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
408 KE_TRACE( 10, (
"__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
409 if ( tos == 0 || p->p_top == 0 ) {
410 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident );
412 if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) {
413 __kmp_error_construct2(
414 kmp_i18n_msg_CnsExpectedEnd,
416 & p->stack_data[ tos ]
419 KE_TRACE( 100, ( POP_MSG( p ) ) );
420 p->p_top = p->stack_data[ tos ].prev;
421 p->stack_data[ tos ].type = ct_none;
422 p->stack_data[ tos ].ident = NULL;
423 p->stack_top = tos - 1;
424 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
428 __kmp_pop_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
431 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
434 KE_TRACE( 10, (
"__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
435 if ( tos == 0 || p->w_top == 0 ) {
436 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
439 if ( tos != p->w_top ||
440 ( p->stack_data[ tos ].type != ct &&
442 ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
443 ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
446 __kmp_check_null_func();
447 __kmp_error_construct2(
448 kmp_i18n_msg_CnsExpectedEnd,
450 & p->stack_data[ tos ]
453 KE_TRACE( 100, ( POP_MSG( p ) ) );
454 p->w_top = p->stack_data[ tos ].prev;
455 p->stack_data[ tos ].type = ct_none;
456 p->stack_data[ tos ].ident = NULL;
457 p->stack_top = tos - 1;
458 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
459 return p->stack_data[ p->w_top ].type;
463 __kmp_pop_sync(
int gtid,
enum cons_type ct,
ident_t const * ident )
466 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
468 KE_TRACE( 10, (
"__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
469 if ( tos == 0 || p->s_top == 0 ) {
470 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
472 if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
473 __kmp_check_null_func();
474 __kmp_error_construct2(
475 kmp_i18n_msg_CnsExpectedEnd,
477 & p->stack_data[ tos ]
481 __kmp_check_null_func();
483 KE_TRACE( 100, ( POP_MSG( p ) ) );
484 p->s_top = p->stack_data[ tos ].prev;
485 p->stack_data[ tos ].type = ct_none;
486 p->stack_data[ tos ].ident = NULL;
487 p->stack_top = tos - 1;
488 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
494 __kmp_check_barrier(
int gtid,
enum cons_type ct,
ident_t const * ident )
496 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
497 KE_TRACE( 10, (
"__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
499 __kmp_check_null_func();
501 if ( p->w_top > p->p_top ) {
503 __kmp_error_construct2(
504 kmp_i18n_msg_CnsInvalidNesting,
506 & p->stack_data[ p->w_top ]
509 if (p->s_top > p->p_top) {
511 __kmp_error_construct2(
512 kmp_i18n_msg_CnsInvalidNesting,
514 & p->stack_data[ p->s_top ]