LLVM OpenMP* Runtime Library
kmp_error.cpp
1 /*
2  * kmp_error.cpp -- KPTS functions for error checking at runtime
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #include "kmp.h"
17 #include "kmp_error.h"
18 #include "kmp_i18n.h"
19 #include "kmp_str.h"
20 
21 /* ------------------------------------------------------------------------ */
22 
23 #define MIN_STACK 100
24 
25 static char const *cons_text_c[] = {
26  "(none)", "\"parallel\"", "work-sharing", /* this is not called "for"
27  because of lowering of
28  "sections" pragmas */
29  "\"ordered\" work-sharing", /* this is not called "for ordered" because of
30  lowering of "sections" pragmas */
31  "\"sections\"",
32  "work-sharing", /* this is not called "single" because of lowering of
33  "sections" pragmas */
34  "\"taskq\"", "\"taskq\"", "\"taskq ordered\"", "\"critical\"",
35  "\"ordered\"", /* in PARALLEL */
36  "\"ordered\"", /* in PDO */
37  "\"ordered\"", /* in TASKQ */
38  "\"master\"", "\"reduce\"", "\"barrier\""};
39 
40 #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
41 
42 #define PUSH_MSG(ct, ident) \
43  "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
44 #define POP_MSG(p) \
45  "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \
46  get_src((p)->stack_data[tos].ident)
47 
48 static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
49 
50 /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
51 
52 static void __kmp_check_null_func(void) { /* nothing to do */
53 }
54 
55 static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
56  int i;
57  struct cons_data *d;
58 
59  /* TODO for monitor perhaps? */
60  if (gtid < 0)
61  __kmp_check_null_func();
62 
63  KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
64 
65  d = p->stack_data;
66 
67  p->stack_size = (p->stack_size * 2) + 100;
68 
69  /* TODO free the old data */
70  p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
71  (p->stack_size + 1));
72 
73  for (i = p->stack_top; i >= 0; --i)
74  p->stack_data[i] = d[i];
75 
76  /* NOTE: we do not free the old stack_data */
77 }
78 
79 // NOTE: Function returns allocated memory, caller must free it!
80 static char const *__kmp_pragma(int ct, ident_t const *ident) {
81  char const *cons = NULL; // Construct name.
82  char *file = NULL; // File name.
83  char *func = NULL; // Function (routine) name.
84  char *line = NULL; // Line number.
85  kmp_str_buf_t buffer;
86  kmp_msg_t prgm;
87  __kmp_str_buf_init(&buffer);
88  if (0 < ct && ct < cons_text_c_num) {
89  cons = cons_text_c[ct];
90  } else {
91  KMP_DEBUG_ASSERT(0);
92  };
93  if (ident != NULL && ident->psource != NULL) {
94  char *tail = NULL;
95  __kmp_str_buf_print(&buffer, "%s",
96  ident->psource); // Copy source to buffer.
97  // Split string in buffer to file, func, and line.
98  tail = buffer.str;
99  __kmp_str_split(tail, ';', NULL, &tail);
100  __kmp_str_split(tail, ';', &file, &tail);
101  __kmp_str_split(tail, ';', &func, &tail);
102  __kmp_str_split(tail, ';', &line, &tail);
103  }; // if
104  prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
105  __kmp_str_buf_free(&buffer);
106  return prgm.str;
107 } // __kmp_pragma
108 
109 /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
110 
111 void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
112  enum cons_type ct, // Construct type.
113  ident_t const *ident // Construct ident.
114  ) {
115  char const *construct = __kmp_pragma(ct, ident);
116  __kmp_msg(kmp_ms_fatal, __kmp_msg_format(id, construct), __kmp_msg_null);
117  KMP_INTERNAL_FREE(CCAST(char *, construct));
118 }
119 
120 void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
121  enum cons_type ct, // First construct type.
122  ident_t const *ident, // First construct ident.
123  struct cons_data const *cons // Second construct.
124  ) {
125  char const *construct1 = __kmp_pragma(ct, ident);
126  char const *construct2 = __kmp_pragma(cons->type, cons->ident);
127  __kmp_msg(kmp_ms_fatal, __kmp_msg_format(id, construct1, construct2),
128  __kmp_msg_null);
129  KMP_INTERNAL_FREE(CCAST(char *, construct1));
130  KMP_INTERNAL_FREE(CCAST(char *, construct2));
131 }
132 
133 struct cons_header *__kmp_allocate_cons_stack(int gtid) {
134  struct cons_header *p;
135 
136  /* TODO for monitor perhaps? */
137  if (gtid < 0) {
138  __kmp_check_null_func();
139  }; // if
140  KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
141  p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
142  p->p_top = p->w_top = p->s_top = 0;
143  p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
144  (MIN_STACK + 1));
145  p->stack_size = MIN_STACK;
146  p->stack_top = 0;
147  p->stack_data[0].type = ct_none;
148  p->stack_data[0].prev = 0;
149  p->stack_data[0].ident = NULL;
150  return p;
151 }
152 
153 void __kmp_free_cons_stack(void *ptr) {
154  struct cons_header *p = (struct cons_header *)ptr;
155  if (p != NULL) {
156  if (p->stack_data != NULL) {
157  __kmp_free(p->stack_data);
158  p->stack_data = NULL;
159  }; // if
160  __kmp_free(p);
161  }; // if
162 }
163 
164 #if KMP_DEBUG
165 static void dump_cons_stack(int gtid, struct cons_header *p) {
166  int i;
167  int tos = p->stack_top;
168  kmp_str_buf_t buffer;
169  __kmp_str_buf_init(&buffer);
170  __kmp_str_buf_print(
171  &buffer,
172  "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
173  __kmp_str_buf_print(&buffer,
174  "Begin construct stack with %d items for thread %d\n",
175  tos, gtid);
176  __kmp_str_buf_print(&buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
177  p->p_top, p->w_top, p->s_top);
178  for (i = tos; i > 0; i--) {
179  struct cons_data *c = &(p->stack_data[i]);
180  __kmp_str_buf_print(
181  &buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i,
182  cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
183  }; // for i
184  __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
185  __kmp_str_buf_print(
186  &buffer,
187  "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
188  __kmp_debug_printf("%s", buffer.str);
189  __kmp_str_buf_free(&buffer);
190 }
191 #endif
192 
193 void __kmp_push_parallel(int gtid, ident_t const *ident) {
194  int tos;
195  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
196 
197  KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
198  KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
199  KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
200  if (p->stack_top >= p->stack_size) {
201  __kmp_expand_cons_stack(gtid, p);
202  }; // if
203  tos = ++p->stack_top;
204  p->stack_data[tos].type = ct_parallel;
205  p->stack_data[tos].prev = p->p_top;
206  p->stack_data[tos].ident = ident;
207  p->stack_data[tos].name = NULL;
208  p->p_top = tos;
209  KE_DUMP(1000, dump_cons_stack(gtid, p));
210 }
211 
212 void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
213  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
214 
215  KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
216  KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
217 
218  if (p->stack_top >= p->stack_size) {
219  __kmp_expand_cons_stack(gtid, p);
220  }; // if
221  if (p->w_top > p->p_top &&
222  !(IS_CONS_TYPE_TASKQ(p->stack_data[p->w_top].type) &&
223  IS_CONS_TYPE_TASKQ(ct))) {
224  // We are already in a WORKSHARE construct for this PARALLEL region.
225  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
226  &p->stack_data[p->w_top]);
227  }; // if
228  if (p->s_top > p->p_top) {
229  // We are already in a SYNC construct for this PARALLEL region.
230  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
231  &p->stack_data[p->s_top]);
232  }; // if
233 }
234 
235 void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
236  int tos;
237  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
238  KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
239  __kmp_check_workshare(gtid, ct, ident);
240  KE_TRACE(100, (PUSH_MSG(ct, ident)));
241  tos = ++p->stack_top;
242  p->stack_data[tos].type = ct;
243  p->stack_data[tos].prev = p->w_top;
244  p->stack_data[tos].ident = ident;
245  p->stack_data[tos].name = NULL;
246  p->w_top = tos;
247  KE_DUMP(1000, dump_cons_stack(gtid, p));
248 }
249 
250 void
251 #if KMP_USE_DYNAMIC_LOCK
252 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
253 #else
254 __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
255 #endif
256 {
257  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
258 
259  KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
260 
261  if (p->stack_top >= p->stack_size)
262  __kmp_expand_cons_stack(gtid, p);
263 
264  if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo ||
265  ct == ct_ordered_in_taskq) {
266  if (p->w_top <= p->p_top) {
267 /* we are not in a worksharing construct */
268 #ifdef BUILD_PARALLEL_ORDERED
269  /* do not report error messages for PARALLEL ORDERED */
270  KMP_ASSERT(ct == ct_ordered_in_parallel);
271 #else
272  __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
273 #endif /* BUILD_PARALLEL_ORDERED */
274  } else {
275  /* inside a WORKSHARING construct for this PARALLEL region */
276  if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
277  if (p->stack_data[p->w_top].type == ct_taskq) {
278  __kmp_error_construct2(kmp_i18n_msg_CnsNotInTaskConstruct, ct, ident,
279  &p->stack_data[p->w_top]);
280  } else {
281  __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
282  &p->stack_data[p->w_top]);
283  }
284  }
285  }
286  if (p->s_top > p->p_top && p->s_top > p->w_top) {
287  /* inside a sync construct which is inside a worksharing construct */
288  int index = p->s_top;
289  enum cons_type stack_type;
290 
291  stack_type = p->stack_data[index].type;
292 
293  if (stack_type == ct_critical ||
294  ((stack_type == ct_ordered_in_parallel ||
295  stack_type == ct_ordered_in_pdo ||
296  stack_type ==
297  ct_ordered_in_taskq) && /* C doesn't allow named ordered;
298  ordered in ordered gets error */
299  p->stack_data[index].ident != NULL &&
300  (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
301  /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
302  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
303  &p->stack_data[index]);
304  }
305  }
306  } else if (ct == ct_critical) {
307 #if KMP_USE_DYNAMIC_LOCK
308  if (lck != NULL &&
309  __kmp_get_user_lock_owner(lck, seq) ==
310  gtid) { /* this thread already has lock for this critical section */
311 #else
312  if (lck != NULL &&
313  __kmp_get_user_lock_owner(lck) ==
314  gtid) { /* this thread already has lock for this critical section */
315 #endif
316  int index = p->s_top;
317  struct cons_data cons = {NULL, ct_critical, 0, NULL};
318  /* walk up construct stack and try to find critical with matching name */
319  while (index != 0 && p->stack_data[index].name != lck) {
320  index = p->stack_data[index].prev;
321  }
322  if (index != 0) {
323  /* found match on the stack (may not always because of interleaved
324  * critical for Fortran) */
325  cons = p->stack_data[index];
326  }
327  /* we are in CRITICAL which is inside a CRITICAL construct of same name */
328  __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
329  }
330  } else if (ct == ct_master || ct == ct_reduce) {
331  if (p->w_top > p->p_top) {
332  /* inside a WORKSHARING construct for this PARALLEL region */
333  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
334  &p->stack_data[p->w_top]);
335  }
336  if (ct == ct_reduce && p->s_top > p->p_top) {
337  /* inside a another SYNC construct for this PARALLEL region */
338  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
339  &p->stack_data[p->s_top]);
340  }; // if
341  }; // if
342 }
343 
344 void
345 #if KMP_USE_DYNAMIC_LOCK
346 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
347 #else
348 __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
349 #endif
350 {
351  int tos;
352  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
353 
354  KMP_ASSERT(gtid == __kmp_get_gtid());
355  KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
356 #if KMP_USE_DYNAMIC_LOCK
357  __kmp_check_sync(gtid, ct, ident, lck, seq);
358 #else
359  __kmp_check_sync(gtid, ct, ident, lck);
360 #endif
361  KE_TRACE(100, (PUSH_MSG(ct, ident)));
362  tos = ++p->stack_top;
363  p->stack_data[tos].type = ct;
364  p->stack_data[tos].prev = p->s_top;
365  p->stack_data[tos].ident = ident;
366  p->stack_data[tos].name = lck;
367  p->s_top = tos;
368  KE_DUMP(1000, dump_cons_stack(gtid, p));
369 }
370 
371 /* ------------------------------------------------------------------------ */
372 
373 void __kmp_pop_parallel(int gtid, ident_t const *ident) {
374  int tos;
375  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
376  tos = p->stack_top;
377  KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
378  if (tos == 0 || p->p_top == 0) {
379  __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
380  }
381  if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
382  __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
383  &p->stack_data[tos]);
384  }
385  KE_TRACE(100, (POP_MSG(p)));
386  p->p_top = p->stack_data[tos].prev;
387  p->stack_data[tos].type = ct_none;
388  p->stack_data[tos].ident = NULL;
389  p->stack_top = tos - 1;
390  KE_DUMP(1000, dump_cons_stack(gtid, p));
391 }
392 
393 enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
394  ident_t const *ident) {
395  int tos;
396  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
397 
398  tos = p->stack_top;
399  KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
400  if (tos == 0 || p->w_top == 0) {
401  __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
402  }
403 
404  if (tos != p->w_top ||
405  (p->stack_data[tos].type != ct &&
406  // below are two exceptions to the rule that construct types must match
407  !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo) &&
408  !(p->stack_data[tos].type == ct_task_ordered && ct == ct_task))) {
409  __kmp_check_null_func();
410  __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
411  &p->stack_data[tos]);
412  }
413  KE_TRACE(100, (POP_MSG(p)));
414  p->w_top = p->stack_data[tos].prev;
415  p->stack_data[tos].type = ct_none;
416  p->stack_data[tos].ident = NULL;
417  p->stack_top = tos - 1;
418  KE_DUMP(1000, dump_cons_stack(gtid, p));
419  return p->stack_data[p->w_top].type;
420 }
421 
422 void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
423  int tos;
424  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
425  tos = p->stack_top;
426  KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
427  if (tos == 0 || p->s_top == 0) {
428  __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
429  };
430  if (tos != p->s_top || p->stack_data[tos].type != ct) {
431  __kmp_check_null_func();
432  __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
433  &p->stack_data[tos]);
434  };
435  if (gtid < 0) {
436  __kmp_check_null_func();
437  };
438  KE_TRACE(100, (POP_MSG(p)));
439  p->s_top = p->stack_data[tos].prev;
440  p->stack_data[tos].type = ct_none;
441  p->stack_data[tos].ident = NULL;
442  p->stack_top = tos - 1;
443  KE_DUMP(1000, dump_cons_stack(gtid, p));
444 }
445 
446 /* ------------------------------------------------------------------------ */
447 
448 void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
449  struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
450  KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
451  __kmp_get_gtid()));
452  if (ident != 0) {
453  __kmp_check_null_func();
454  }
455  if (p->w_top > p->p_top) {
456  /* we are already in a WORKSHARING construct for this PARALLEL region */
457  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
458  &p->stack_data[p->w_top]);
459  }
460  if (p->s_top > p->p_top) {
461  /* we are already in a SYNC construct for this PARALLEL region */
462  __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
463  &p->stack_data[p->s_top]);
464  }
465 }
Definition: kmp.h:208
#define KMP_IDENT_KMPC
Definition: kmp.h:188
char const * psource
Definition: kmp.h:218