17 #include "kmp_error.h" 25 static char const *cons_text_c[] = {
26 "(none)",
"\"parallel\"",
"work-sharing",
29 "\"ordered\" work-sharing",
34 "\"taskq\"",
"\"taskq\"",
"\"taskq ordered\"",
"\"critical\"",
38 "\"master\"",
"\"reduce\"",
"\"barrier\""};
40 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource) 42 #define PUSH_MSG(ct, ident) \ 43 "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident)) 45 "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \ 46 get_src((p)->stack_data[tos].ident) 48 static int const cons_text_c_num =
sizeof(cons_text_c) /
sizeof(
char const *);
52 static void __kmp_check_null_func(
void) {
55 static void __kmp_expand_cons_stack(
int gtid,
struct cons_header *p) {
61 __kmp_check_null_func();
63 KE_TRACE(10, (
"expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
67 p->stack_size = (p->stack_size * 2) + 100;
70 p->stack_data = (
struct cons_data *)__kmp_allocate(
sizeof(
struct cons_data) *
73 for (i = p->stack_top; i >= 0; --i)
74 p->stack_data[i] = d[i];
80 static char const *__kmp_pragma(
int ct,
ident_t const *
ident) {
81 char const *cons = NULL;
87 __kmp_str_buf_init(&buffer);
88 if (0 < ct && ct < cons_text_c_num) {
89 cons = cons_text_c[ct];
93 if (ident != NULL && ident->
psource != NULL) {
95 __kmp_str_buf_print(&buffer,
"%s",
99 __kmp_str_split(tail,
';', NULL, &tail);
100 __kmp_str_split(tail,
';', &file, &tail);
101 __kmp_str_split(tail,
';', &func, &tail);
102 __kmp_str_split(tail,
';', &line, &tail);
104 prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
105 __kmp_str_buf_free(&buffer);
111 void __kmp_error_construct(kmp_i18n_id_t
id,
115 char const *construct = __kmp_pragma(ct, ident);
116 __kmp_msg(kmp_ms_fatal, __kmp_msg_format(
id, construct), __kmp_msg_null);
117 KMP_INTERNAL_FREE(CCAST(
char *, construct));
120 void __kmp_error_construct2(kmp_i18n_id_t
id,
123 struct cons_data
const *cons
125 char const *construct1 = __kmp_pragma(ct, ident);
126 char const *construct2 = __kmp_pragma(cons->type, cons->ident);
127 __kmp_msg(kmp_ms_fatal, __kmp_msg_format(
id, construct1, construct2),
129 KMP_INTERNAL_FREE(CCAST(
char *, construct1));
130 KMP_INTERNAL_FREE(CCAST(
char *, construct2));
133 struct cons_header *__kmp_allocate_cons_stack(
int gtid) {
134 struct cons_header *p;
138 __kmp_check_null_func();
140 KE_TRACE(10, (
"allocate cons_stack (%d)\n", gtid));
141 p = (
struct cons_header *)__kmp_allocate(
sizeof(
struct cons_header));
142 p->p_top = p->w_top = p->s_top = 0;
143 p->stack_data = (
struct cons_data *)__kmp_allocate(
sizeof(
struct cons_data) *
145 p->stack_size = MIN_STACK;
147 p->stack_data[0].type = ct_none;
148 p->stack_data[0].prev = 0;
149 p->stack_data[0].ident = NULL;
153 void __kmp_free_cons_stack(
void *ptr) {
154 struct cons_header *p = (
struct cons_header *)ptr;
156 if (p->stack_data != NULL) {
157 __kmp_free(p->stack_data);
158 p->stack_data = NULL;
165 static void dump_cons_stack(
int gtid,
struct cons_header *p) {
167 int tos = p->stack_top;
168 kmp_str_buf_t buffer;
169 __kmp_str_buf_init(&buffer);
172 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
173 __kmp_str_buf_print(&buffer,
174 "Begin construct stack with %d items for thread %d\n",
176 __kmp_str_buf_print(&buffer,
" stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
177 p->p_top, p->w_top, p->s_top);
178 for (i = tos; i > 0; i--) {
179 struct cons_data *c = &(p->stack_data[i]);
181 &buffer,
" stack_data[%2d] = { %s (%s) %d %p }\n", i,
182 cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
184 __kmp_str_buf_print(&buffer,
"End construct stack for thread %d\n", gtid);
187 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
188 __kmp_debug_printf(
"%s", buffer.str);
189 __kmp_str_buf_free(&buffer);
193 void __kmp_push_parallel(
int gtid,
ident_t const *ident) {
195 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
197 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
198 KE_TRACE(10, (
"__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
199 KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
200 if (p->stack_top >= p->stack_size) {
201 __kmp_expand_cons_stack(gtid, p);
203 tos = ++p->stack_top;
204 p->stack_data[tos].type = ct_parallel;
205 p->stack_data[tos].prev = p->p_top;
206 p->stack_data[tos].ident = ident;
207 p->stack_data[tos].name = NULL;
209 KE_DUMP(1000, dump_cons_stack(gtid, p));
212 void __kmp_check_workshare(
int gtid,
enum cons_type ct,
ident_t const *ident) {
213 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
215 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
216 KE_TRACE(10, (
"__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
218 if (p->stack_top >= p->stack_size) {
219 __kmp_expand_cons_stack(gtid, p);
221 if (p->w_top > p->p_top &&
222 !(IS_CONS_TYPE_TASKQ(p->stack_data[p->w_top].type) &&
223 IS_CONS_TYPE_TASKQ(ct))) {
225 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
226 &p->stack_data[p->w_top]);
228 if (p->s_top > p->p_top) {
230 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
231 &p->stack_data[p->s_top]);
235 void __kmp_push_workshare(
int gtid,
enum cons_type ct,
ident_t const *ident) {
237 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
238 KE_TRACE(10, (
"__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
239 __kmp_check_workshare(gtid, ct, ident);
240 KE_TRACE(100, (PUSH_MSG(ct, ident)));
241 tos = ++p->stack_top;
242 p->stack_data[tos].type = ct;
243 p->stack_data[tos].prev = p->w_top;
244 p->stack_data[tos].ident = ident;
245 p->stack_data[tos].name = NULL;
247 KE_DUMP(1000, dump_cons_stack(gtid, p));
251 #if KMP_USE_DYNAMIC_LOCK 252 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
254 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
257 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
259 KE_TRACE(10, (
"__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
261 if (p->stack_top >= p->stack_size)
262 __kmp_expand_cons_stack(gtid, p);
264 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo ||
265 ct == ct_ordered_in_taskq) {
266 if (p->w_top <= p->p_top) {
268 #ifdef BUILD_PARALLEL_ORDERED 270 KMP_ASSERT(ct == ct_ordered_in_parallel);
272 __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
276 if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
277 if (p->stack_data[p->w_top].type == ct_taskq) {
278 __kmp_error_construct2(kmp_i18n_msg_CnsNotInTaskConstruct, ct, ident,
279 &p->stack_data[p->w_top]);
281 __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
282 &p->stack_data[p->w_top]);
286 if (p->s_top > p->p_top && p->s_top > p->w_top) {
288 int index = p->s_top;
289 enum cons_type stack_type;
291 stack_type = p->stack_data[index].type;
293 if (stack_type == ct_critical ||
294 ((stack_type == ct_ordered_in_parallel ||
295 stack_type == ct_ordered_in_pdo ||
297 ct_ordered_in_taskq) &&
299 p->stack_data[index].ident != NULL &&
302 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
303 &p->stack_data[index]);
306 }
else if (ct == ct_critical) {
307 #if KMP_USE_DYNAMIC_LOCK 309 __kmp_get_user_lock_owner(lck, seq) ==
313 __kmp_get_user_lock_owner(lck) ==
316 int index = p->s_top;
317 struct cons_data cons = {NULL, ct_critical, 0, NULL};
319 while (index != 0 && p->stack_data[index].name != lck) {
320 index = p->stack_data[index].prev;
325 cons = p->stack_data[index];
328 __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
330 }
else if (ct == ct_master || ct == ct_reduce) {
331 if (p->w_top > p->p_top) {
333 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
334 &p->stack_data[p->w_top]);
336 if (ct == ct_reduce && p->s_top > p->p_top) {
338 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
339 &p->stack_data[p->s_top]);
345 #if KMP_USE_DYNAMIC_LOCK 346 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
348 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
352 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
354 KMP_ASSERT(gtid == __kmp_get_gtid());
355 KE_TRACE(10, (
"__kmp_push_sync (gtid=%d)\n", gtid));
356 #if KMP_USE_DYNAMIC_LOCK 357 __kmp_check_sync(gtid, ct, ident, lck, seq);
359 __kmp_check_sync(gtid, ct, ident, lck);
361 KE_TRACE(100, (PUSH_MSG(ct, ident)));
362 tos = ++p->stack_top;
363 p->stack_data[tos].type = ct;
364 p->stack_data[tos].prev = p->s_top;
365 p->stack_data[tos].ident = ident;
366 p->stack_data[tos].name = lck;
368 KE_DUMP(1000, dump_cons_stack(gtid, p));
373 void __kmp_pop_parallel(
int gtid,
ident_t const *ident) {
375 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
377 KE_TRACE(10, (
"__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
378 if (tos == 0 || p->p_top == 0) {
379 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
381 if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
382 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
383 &p->stack_data[tos]);
385 KE_TRACE(100, (POP_MSG(p)));
386 p->p_top = p->stack_data[tos].prev;
387 p->stack_data[tos].type = ct_none;
388 p->stack_data[tos].ident = NULL;
389 p->stack_top = tos - 1;
390 KE_DUMP(1000, dump_cons_stack(gtid, p));
393 enum cons_type __kmp_pop_workshare(
int gtid,
enum cons_type ct,
396 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
399 KE_TRACE(10, (
"__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
400 if (tos == 0 || p->w_top == 0) {
401 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
404 if (tos != p->w_top ||
405 (p->stack_data[tos].type != ct &&
407 !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo) &&
408 !(p->stack_data[tos].type == ct_task_ordered && ct == ct_task))) {
409 __kmp_check_null_func();
410 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
411 &p->stack_data[tos]);
413 KE_TRACE(100, (POP_MSG(p)));
414 p->w_top = p->stack_data[tos].prev;
415 p->stack_data[tos].type = ct_none;
416 p->stack_data[tos].ident = NULL;
417 p->stack_top = tos - 1;
418 KE_DUMP(1000, dump_cons_stack(gtid, p));
419 return p->stack_data[p->w_top].type;
422 void __kmp_pop_sync(
int gtid,
enum cons_type ct,
ident_t const *ident) {
424 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
426 KE_TRACE(10, (
"__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
427 if (tos == 0 || p->s_top == 0) {
428 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
430 if (tos != p->s_top || p->stack_data[tos].type != ct) {
431 __kmp_check_null_func();
432 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
433 &p->stack_data[tos]);
436 __kmp_check_null_func();
438 KE_TRACE(100, (POP_MSG(p)));
439 p->s_top = p->stack_data[tos].prev;
440 p->stack_data[tos].type = ct_none;
441 p->stack_data[tos].ident = NULL;
442 p->stack_top = tos - 1;
443 KE_DUMP(1000, dump_cons_stack(gtid, p));
448 void __kmp_check_barrier(
int gtid,
enum cons_type ct,
ident_t const *ident) {
449 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
450 KE_TRACE(10, (
"__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
453 __kmp_check_null_func();
455 if (p->w_top > p->p_top) {
457 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
458 &p->stack_data[p->w_top]);
460 if (p->s_top > p->p_top) {
462 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
463 &p->stack_data[p->s_top]);