LLVM OpenMP* Runtime Library
kmp_lock.cpp
1 /*
2  * kmp_lock.cpp -- lock-related functions
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #include <stddef.h>
17 #include <atomic>
18 
19 #include "kmp.h"
20 #include "kmp_i18n.h"
21 #include "kmp_io.h"
22 #include "kmp_itt.h"
23 #include "kmp_lock.h"
24 
25 #include "tsan_annotations.h"
26 
27 #if KMP_USE_FUTEX
28 #include <sys/syscall.h>
29 #include <unistd.h>
30 // We should really include <futex.h>, but that causes compatibility problems on
31 // different Linux* OS distributions that either require that you include (or
32 // break when you try to include) <pci/types.h>. Since all we need is the two
33 // macros below (which are part of the kernel ABI, so can't change) we just
34 // define the constants here and don't include <futex.h>
35 #ifndef FUTEX_WAIT
36 #define FUTEX_WAIT 0
37 #endif
38 #ifndef FUTEX_WAKE
39 #define FUTEX_WAKE 1
40 #endif
41 #endif
42 
43 /* Implement spin locks for internal library use. */
44 /* The algorithm implemented is Lamport's bakery lock [1974]. */
45 
46 void __kmp_validate_locks(void) {
47  int i;
48  kmp_uint32 x, y;
49 
50  /* Check to make sure unsigned arithmetic does wraps properly */
51  x = ~((kmp_uint32)0) - 2;
52  y = x - 2;
53 
54  for (i = 0; i < 8; ++i, ++x, ++y) {
55  kmp_uint32 z = (x - y);
56  KMP_ASSERT(z == 2);
57  }
58 
59  KMP_ASSERT(offsetof(kmp_base_queuing_lock, tail_id) % 8 == 0);
60 }
61 
62 /* ------------------------------------------------------------------------ */
63 /* test and set locks */
64 
65 // For the non-nested locks, we can only assume that the first 4 bytes were
66 // allocated, since gcc only allocates 4 bytes for omp_lock_t, and the Intel
67 // compiler only allocates a 4 byte pointer on IA-32 architecture. On
68 // Windows* OS on Intel(R) 64, we can assume that all 8 bytes were allocated.
69 //
70 // gcc reserves >= 8 bytes for nested locks, so we can assume that the
71 // entire 8 bytes were allocated for nested locks on all 64-bit platforms.
72 
73 static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) {
74  return KMP_LOCK_STRIP(TCR_4(lck->lk.poll)) - 1;
75 }
76 
77 static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) {
78  return lck->lk.depth_locked != -1;
79 }
80 
81 __forceinline static int
82 __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
83  KMP_MB();
84 
85 #ifdef USE_LOCK_PROFILE
86  kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
87  if ((curr != 0) && (curr != gtid + 1))
88  __kmp_printf("LOCK CONTENTION: %p\n", lck);
89 /* else __kmp_printf( "." );*/
90 #endif /* USE_LOCK_PROFILE */
91 
92  if ((lck->lk.poll == KMP_LOCK_FREE(tas)) &&
93  KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(tas),
94  KMP_LOCK_BUSY(gtid + 1, tas))) {
95  KMP_FSYNC_ACQUIRED(lck);
96  return KMP_LOCK_ACQUIRED_FIRST;
97  }
98 
99  kmp_uint32 spins;
100  KMP_FSYNC_PREPARE(lck);
101  KMP_INIT_YIELD(spins);
102  if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
103  KMP_YIELD(TRUE);
104  } else {
105  KMP_YIELD_SPIN(spins);
106  }
107 
108  kmp_backoff_t backoff = __kmp_spin_backoff_params;
109  while ((lck->lk.poll != KMP_LOCK_FREE(tas)) ||
110  (!KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(tas),
111  KMP_LOCK_BUSY(gtid + 1, tas)))) {
112 
113  __kmp_spin_backoff(&backoff);
114  if (TCR_4(__kmp_nth) >
115  (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
116  KMP_YIELD(TRUE);
117  } else {
118  KMP_YIELD_SPIN(spins);
119  }
120  }
121  KMP_FSYNC_ACQUIRED(lck);
122  return KMP_LOCK_ACQUIRED_FIRST;
123 }
124 
125 int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
126  int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid);
127  ANNOTATE_TAS_ACQUIRED(lck);
128  return retval;
129 }
130 
131 static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck,
132  kmp_int32 gtid) {
133  char const *const func = "omp_set_lock";
134  if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
135  __kmp_is_tas_lock_nestable(lck)) {
136  KMP_FATAL(LockNestableUsedAsSimple, func);
137  }
138  if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) {
139  KMP_FATAL(LockIsAlreadyOwned, func);
140  }
141  return __kmp_acquire_tas_lock(lck, gtid);
142 }
143 
144 int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
145  if ((lck->lk.poll == KMP_LOCK_FREE(tas)) &&
146  KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(tas),
147  KMP_LOCK_BUSY(gtid + 1, tas))) {
148  KMP_FSYNC_ACQUIRED(lck);
149  return TRUE;
150  }
151  return FALSE;
152 }
153 
154 static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck,
155  kmp_int32 gtid) {
156  char const *const func = "omp_test_lock";
157  if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
158  __kmp_is_tas_lock_nestable(lck)) {
159  KMP_FATAL(LockNestableUsedAsSimple, func);
160  }
161  return __kmp_test_tas_lock(lck, gtid);
162 }
163 
164 int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
165  KMP_MB(); /* Flush all pending memory write invalidates. */
166 
167  KMP_FSYNC_RELEASING(lck);
168  ANNOTATE_TAS_RELEASED(lck);
169  KMP_ST_REL32(&(lck->lk.poll), KMP_LOCK_FREE(tas));
170  KMP_MB(); /* Flush all pending memory write invalidates. */
171 
172  KMP_YIELD(TCR_4(__kmp_nth) >
173  (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
174  return KMP_LOCK_RELEASED;
175 }
176 
177 static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck,
178  kmp_int32 gtid) {
179  char const *const func = "omp_unset_lock";
180  KMP_MB(); /* in case another processor initialized lock */
181  if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
182  __kmp_is_tas_lock_nestable(lck)) {
183  KMP_FATAL(LockNestableUsedAsSimple, func);
184  }
185  if (__kmp_get_tas_lock_owner(lck) == -1) {
186  KMP_FATAL(LockUnsettingFree, func);
187  }
188  if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) >= 0) &&
189  (__kmp_get_tas_lock_owner(lck) != gtid)) {
190  KMP_FATAL(LockUnsettingSetByAnother, func);
191  }
192  return __kmp_release_tas_lock(lck, gtid);
193 }
194 
195 void __kmp_init_tas_lock(kmp_tas_lock_t *lck) {
196  TCW_4(lck->lk.poll, KMP_LOCK_FREE(tas));
197 }
198 
199 static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) {
200  __kmp_init_tas_lock(lck);
201 }
202 
203 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
204 
205 static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) {
206  char const *const func = "omp_destroy_lock";
207  if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
208  __kmp_is_tas_lock_nestable(lck)) {
209  KMP_FATAL(LockNestableUsedAsSimple, func);
210  }
211  if (__kmp_get_tas_lock_owner(lck) != -1) {
212  KMP_FATAL(LockStillOwned, func);
213  }
214  __kmp_destroy_tas_lock(lck);
215 }
216 
217 // nested test and set locks
218 
219 int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
220  KMP_DEBUG_ASSERT(gtid >= 0);
221 
222  if (__kmp_get_tas_lock_owner(lck) == gtid) {
223  lck->lk.depth_locked += 1;
224  return KMP_LOCK_ACQUIRED_NEXT;
225  } else {
226  __kmp_acquire_tas_lock_timed_template(lck, gtid);
227  ANNOTATE_TAS_ACQUIRED(lck);
228  lck->lk.depth_locked = 1;
229  return KMP_LOCK_ACQUIRED_FIRST;
230  }
231 }
232 
233 static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
234  kmp_int32 gtid) {
235  char const *const func = "omp_set_nest_lock";
236  if (!__kmp_is_tas_lock_nestable(lck)) {
237  KMP_FATAL(LockSimpleUsedAsNestable, func);
238  }
239  return __kmp_acquire_nested_tas_lock(lck, gtid);
240 }
241 
242 int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
243  int retval;
244 
245  KMP_DEBUG_ASSERT(gtid >= 0);
246 
247  if (__kmp_get_tas_lock_owner(lck) == gtid) {
248  retval = ++lck->lk.depth_locked;
249  } else if (!__kmp_test_tas_lock(lck, gtid)) {
250  retval = 0;
251  } else {
252  KMP_MB();
253  retval = lck->lk.depth_locked = 1;
254  }
255  return retval;
256 }
257 
258 static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
259  kmp_int32 gtid) {
260  char const *const func = "omp_test_nest_lock";
261  if (!__kmp_is_tas_lock_nestable(lck)) {
262  KMP_FATAL(LockSimpleUsedAsNestable, func);
263  }
264  return __kmp_test_nested_tas_lock(lck, gtid);
265 }
266 
267 int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
268  KMP_DEBUG_ASSERT(gtid >= 0);
269 
270  KMP_MB();
271  if (--(lck->lk.depth_locked) == 0) {
272  __kmp_release_tas_lock(lck, gtid);
273  return KMP_LOCK_RELEASED;
274  }
275  return KMP_LOCK_STILL_HELD;
276 }
277 
278 static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
279  kmp_int32 gtid) {
280  char const *const func = "omp_unset_nest_lock";
281  KMP_MB(); /* in case another processor initialized lock */
282  if (!__kmp_is_tas_lock_nestable(lck)) {
283  KMP_FATAL(LockSimpleUsedAsNestable, func);
284  }
285  if (__kmp_get_tas_lock_owner(lck) == -1) {
286  KMP_FATAL(LockUnsettingFree, func);
287  }
288  if (__kmp_get_tas_lock_owner(lck) != gtid) {
289  KMP_FATAL(LockUnsettingSetByAnother, func);
290  }
291  return __kmp_release_nested_tas_lock(lck, gtid);
292 }
293 
294 void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) {
295  __kmp_init_tas_lock(lck);
296  lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
297 }
298 
299 static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
300  __kmp_init_nested_tas_lock(lck);
301 }
302 
303 void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) {
304  __kmp_destroy_tas_lock(lck);
305  lck->lk.depth_locked = 0;
306 }
307 
308 static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
309  char const *const func = "omp_destroy_nest_lock";
310  if (!__kmp_is_tas_lock_nestable(lck)) {
311  KMP_FATAL(LockSimpleUsedAsNestable, func);
312  }
313  if (__kmp_get_tas_lock_owner(lck) != -1) {
314  KMP_FATAL(LockStillOwned, func);
315  }
316  __kmp_destroy_nested_tas_lock(lck);
317 }
318 
319 #if KMP_USE_FUTEX
320 
321 /* ------------------------------------------------------------------------ */
322 /* futex locks */
323 
324 // futex locks are really just test and set locks, with a different method
325 // of handling contention. They take the same amount of space as test and
326 // set locks, and are allocated the same way (i.e. use the area allocated by
327 // the compiler for non-nested locks / allocate nested locks on the heap).
328 
329 static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) {
330  return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
331 }
332 
333 static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) {
334  return lck->lk.depth_locked != -1;
335 }
336 
337 __forceinline static int
338 __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) {
339  kmp_int32 gtid_code = (gtid + 1) << 1;
340 
341  KMP_MB();
342 
343 #ifdef USE_LOCK_PROFILE
344  kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
345  if ((curr != 0) && (curr != gtid_code))
346  __kmp_printf("LOCK CONTENTION: %p\n", lck);
347 /* else __kmp_printf( "." );*/
348 #endif /* USE_LOCK_PROFILE */
349 
350  KMP_FSYNC_PREPARE(lck);
351  KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
352  lck, lck->lk.poll, gtid));
353 
354  kmp_int32 poll_val;
355 
356  while ((poll_val = KMP_COMPARE_AND_STORE_RET32(
357  &(lck->lk.poll), KMP_LOCK_FREE(futex),
358  KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) {
359 
360  kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
361  KA_TRACE(
362  1000,
363  ("__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
364  lck, gtid, poll_val, cond));
365 
366  // NOTE: if you try to use the following condition for this branch
367  //
368  // if ( poll_val & 1 == 0 )
369  //
370  // Then the 12.0 compiler has a bug where the following block will
371  // always be skipped, regardless of the value of the LSB of poll_val.
372  if (!cond) {
373  // Try to set the lsb in the poll to indicate to the owner
374  // thread that they need to wake this thread up.
375  if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
376  poll_val | KMP_LOCK_BUSY(1, futex))) {
377  KA_TRACE(
378  1000,
379  ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
380  lck, lck->lk.poll, gtid));
381  continue;
382  }
383  poll_val |= KMP_LOCK_BUSY(1, futex);
384 
385  KA_TRACE(1000,
386  ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", lck,
387  lck->lk.poll, gtid));
388  }
389 
390  KA_TRACE(
391  1000,
392  ("__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
393  lck, gtid, poll_val));
394 
395  kmp_int32 rc;
396  if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
397  NULL, 0)) != 0) {
398  KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) "
399  "failed (rc=%d errno=%d)\n",
400  lck, gtid, poll_val, rc, errno));
401  continue;
402  }
403 
404  KA_TRACE(1000,
405  ("__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
406  lck, gtid, poll_val));
407  // This thread has now done a successful futex wait call and was entered on
408  // the OS futex queue. We must now perform a futex wake call when releasing
409  // the lock, as we have no idea how many other threads are in the queue.
410  gtid_code |= 1;
411  }
412 
413  KMP_FSYNC_ACQUIRED(lck);
414  KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
415  lck->lk.poll, gtid));
416  return KMP_LOCK_ACQUIRED_FIRST;
417 }
418 
419 int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
420  int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid);
421  ANNOTATE_FUTEX_ACQUIRED(lck);
422  return retval;
423 }
424 
425 static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck,
426  kmp_int32 gtid) {
427  char const *const func = "omp_set_lock";
428  if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
429  __kmp_is_futex_lock_nestable(lck)) {
430  KMP_FATAL(LockNestableUsedAsSimple, func);
431  }
432  if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) == gtid)) {
433  KMP_FATAL(LockIsAlreadyOwned, func);
434  }
435  return __kmp_acquire_futex_lock(lck, gtid);
436 }
437 
438 int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
439  if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
440  KMP_LOCK_BUSY((gtid + 1) << 1, futex))) {
441  KMP_FSYNC_ACQUIRED(lck);
442  return TRUE;
443  }
444  return FALSE;
445 }
446 
447 static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck,
448  kmp_int32 gtid) {
449  char const *const func = "omp_test_lock";
450  if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
451  __kmp_is_futex_lock_nestable(lck)) {
452  KMP_FATAL(LockNestableUsedAsSimple, func);
453  }
454  return __kmp_test_futex_lock(lck, gtid);
455 }
456 
457 int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
458  KMP_MB(); /* Flush all pending memory write invalidates. */
459 
460  KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
461  lck, lck->lk.poll, gtid));
462 
463  KMP_FSYNC_RELEASING(lck);
464  ANNOTATE_FUTEX_RELEASED(lck);
465 
466  kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
467 
468  KA_TRACE(1000,
469  ("__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
470  lck, gtid, poll_val));
471 
472  if (KMP_LOCK_STRIP(poll_val) & 1) {
473  KA_TRACE(1000,
474  ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
475  lck, gtid));
476  syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
477  NULL, NULL, 0);
478  }
479 
480  KMP_MB(); /* Flush all pending memory write invalidates. */
481 
482  KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
483  lck->lk.poll, gtid));
484 
485  KMP_YIELD(TCR_4(__kmp_nth) >
486  (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
487  return KMP_LOCK_RELEASED;
488 }
489 
490 static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck,
491  kmp_int32 gtid) {
492  char const *const func = "omp_unset_lock";
493  KMP_MB(); /* in case another processor initialized lock */
494  if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
495  __kmp_is_futex_lock_nestable(lck)) {
496  KMP_FATAL(LockNestableUsedAsSimple, func);
497  }
498  if (__kmp_get_futex_lock_owner(lck) == -1) {
499  KMP_FATAL(LockUnsettingFree, func);
500  }
501  if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) >= 0) &&
502  (__kmp_get_futex_lock_owner(lck) != gtid)) {
503  KMP_FATAL(LockUnsettingSetByAnother, func);
504  }
505  return __kmp_release_futex_lock(lck, gtid);
506 }
507 
508 void __kmp_init_futex_lock(kmp_futex_lock_t *lck) {
509  TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
510 }
511 
512 static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) {
513  __kmp_init_futex_lock(lck);
514 }
515 
516 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
517 
518 static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) {
519  char const *const func = "omp_destroy_lock";
520  if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
521  __kmp_is_futex_lock_nestable(lck)) {
522  KMP_FATAL(LockNestableUsedAsSimple, func);
523  }
524  if (__kmp_get_futex_lock_owner(lck) != -1) {
525  KMP_FATAL(LockStillOwned, func);
526  }
527  __kmp_destroy_futex_lock(lck);
528 }
529 
530 // nested futex locks
531 
532 int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
533  KMP_DEBUG_ASSERT(gtid >= 0);
534 
535  if (__kmp_get_futex_lock_owner(lck) == gtid) {
536  lck->lk.depth_locked += 1;
537  return KMP_LOCK_ACQUIRED_NEXT;
538  } else {
539  __kmp_acquire_futex_lock_timed_template(lck, gtid);
540  ANNOTATE_FUTEX_ACQUIRED(lck);
541  lck->lk.depth_locked = 1;
542  return KMP_LOCK_ACQUIRED_FIRST;
543  }
544 }
545 
546 static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
547  kmp_int32 gtid) {
548  char const *const func = "omp_set_nest_lock";
549  if (!__kmp_is_futex_lock_nestable(lck)) {
550  KMP_FATAL(LockSimpleUsedAsNestable, func);
551  }
552  return __kmp_acquire_nested_futex_lock(lck, gtid);
553 }
554 
555 int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
556  int retval;
557 
558  KMP_DEBUG_ASSERT(gtid >= 0);
559 
560  if (__kmp_get_futex_lock_owner(lck) == gtid) {
561  retval = ++lck->lk.depth_locked;
562  } else if (!__kmp_test_futex_lock(lck, gtid)) {
563  retval = 0;
564  } else {
565  KMP_MB();
566  retval = lck->lk.depth_locked = 1;
567  }
568  return retval;
569 }
570 
571 static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
572  kmp_int32 gtid) {
573  char const *const func = "omp_test_nest_lock";
574  if (!__kmp_is_futex_lock_nestable(lck)) {
575  KMP_FATAL(LockSimpleUsedAsNestable, func);
576  }
577  return __kmp_test_nested_futex_lock(lck, gtid);
578 }
579 
580 int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
581  KMP_DEBUG_ASSERT(gtid >= 0);
582 
583  KMP_MB();
584  if (--(lck->lk.depth_locked) == 0) {
585  __kmp_release_futex_lock(lck, gtid);
586  return KMP_LOCK_RELEASED;
587  }
588  return KMP_LOCK_STILL_HELD;
589 }
590 
591 static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
592  kmp_int32 gtid) {
593  char const *const func = "omp_unset_nest_lock";
594  KMP_MB(); /* in case another processor initialized lock */
595  if (!__kmp_is_futex_lock_nestable(lck)) {
596  KMP_FATAL(LockSimpleUsedAsNestable, func);
597  }
598  if (__kmp_get_futex_lock_owner(lck) == -1) {
599  KMP_FATAL(LockUnsettingFree, func);
600  }
601  if (__kmp_get_futex_lock_owner(lck) != gtid) {
602  KMP_FATAL(LockUnsettingSetByAnother, func);
603  }
604  return __kmp_release_nested_futex_lock(lck, gtid);
605 }
606 
607 void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) {
608  __kmp_init_futex_lock(lck);
609  lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
610 }
611 
612 static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
613  __kmp_init_nested_futex_lock(lck);
614 }
615 
616 void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) {
617  __kmp_destroy_futex_lock(lck);
618  lck->lk.depth_locked = 0;
619 }
620 
621 static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
622  char const *const func = "omp_destroy_nest_lock";
623  if (!__kmp_is_futex_lock_nestable(lck)) {
624  KMP_FATAL(LockSimpleUsedAsNestable, func);
625  }
626  if (__kmp_get_futex_lock_owner(lck) != -1) {
627  KMP_FATAL(LockStillOwned, func);
628  }
629  __kmp_destroy_nested_futex_lock(lck);
630 }
631 
632 #endif // KMP_USE_FUTEX
633 
634 /* ------------------------------------------------------------------------ */
635 /* ticket (bakery) locks */
636 
637 static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) {
638  return std::atomic_load_explicit(&lck->lk.owner_id,
639  std::memory_order_relaxed) -
640  1;
641 }
642 
643 static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) {
644  return std::atomic_load_explicit(&lck->lk.depth_locked,
645  std::memory_order_relaxed) != -1;
646 }
647 
648 static kmp_uint32 __kmp_bakery_check(void *now_serving, kmp_uint32 my_ticket) {
649  return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving,
650  std::memory_order_acquire) == my_ticket;
651 }
652 
653 __forceinline static int
654 __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
655  kmp_int32 gtid) {
656  kmp_uint32 my_ticket = std::atomic_fetch_add_explicit(
657  &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
658 
659 #ifdef USE_LOCK_PROFILE
660  if (std::atomic_load_explicit(&lck->lk.now_serving,
661  std::memory_order_relaxed) != my_ticket)
662  __kmp_printf("LOCK CONTENTION: %p\n", lck);
663 /* else __kmp_printf( "." );*/
664 #endif /* USE_LOCK_PROFILE */
665 
666  if (std::atomic_load_explicit(&lck->lk.now_serving,
667  std::memory_order_acquire) == my_ticket) {
668  return KMP_LOCK_ACQUIRED_FIRST;
669  }
670  KMP_WAIT_YIELD_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
671  return KMP_LOCK_ACQUIRED_FIRST;
672 }
673 
674 int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
675  int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid);
676  ANNOTATE_TICKET_ACQUIRED(lck);
677  return retval;
678 }
679 
680 static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
681  kmp_int32 gtid) {
682  char const *const func = "omp_set_lock";
683 
684  if (!std::atomic_load_explicit(&lck->lk.initialized,
685  std::memory_order_relaxed)) {
686  KMP_FATAL(LockIsUninitialized, func);
687  }
688  if (lck->lk.self != lck) {
689  KMP_FATAL(LockIsUninitialized, func);
690  }
691  if (__kmp_is_ticket_lock_nestable(lck)) {
692  KMP_FATAL(LockNestableUsedAsSimple, func);
693  }
694  if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) == gtid)) {
695  KMP_FATAL(LockIsAlreadyOwned, func);
696  }
697 
698  __kmp_acquire_ticket_lock(lck, gtid);
699 
700  std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
701  std::memory_order_relaxed);
702  return KMP_LOCK_ACQUIRED_FIRST;
703 }
704 
705 int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
706  kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
707  std::memory_order_relaxed);
708 
709  if (std::atomic_load_explicit(&lck->lk.now_serving,
710  std::memory_order_relaxed) == my_ticket) {
711  kmp_uint32 next_ticket = my_ticket + 1;
712  if (std::atomic_compare_exchange_strong_explicit(
713  &lck->lk.next_ticket, &my_ticket, next_ticket,
714  std::memory_order_acquire, std::memory_order_acquire)) {
715  return TRUE;
716  }
717  }
718  return FALSE;
719 }
720 
721 static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
722  kmp_int32 gtid) {
723  char const *const func = "omp_test_lock";
724 
725  if (!std::atomic_load_explicit(&lck->lk.initialized,
726  std::memory_order_relaxed)) {
727  KMP_FATAL(LockIsUninitialized, func);
728  }
729  if (lck->lk.self != lck) {
730  KMP_FATAL(LockIsUninitialized, func);
731  }
732  if (__kmp_is_ticket_lock_nestable(lck)) {
733  KMP_FATAL(LockNestableUsedAsSimple, func);
734  }
735 
736  int retval = __kmp_test_ticket_lock(lck, gtid);
737 
738  if (retval) {
739  std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
740  std::memory_order_relaxed);
741  }
742  return retval;
743 }
744 
745 int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
746  kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
747  std::memory_order_relaxed) -
748  std::atomic_load_explicit(&lck->lk.now_serving,
749  std::memory_order_relaxed);
750 
751  ANNOTATE_TICKET_RELEASED(lck);
752  std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
753  std::memory_order_release);
754 
755  KMP_YIELD(distance >
756  (kmp_uint32)(__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
757  return KMP_LOCK_RELEASED;
758 }
759 
760 static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
761  kmp_int32 gtid) {
762  char const *const func = "omp_unset_lock";
763 
764  if (!std::atomic_load_explicit(&lck->lk.initialized,
765  std::memory_order_relaxed)) {
766  KMP_FATAL(LockIsUninitialized, func);
767  }
768  if (lck->lk.self != lck) {
769  KMP_FATAL(LockIsUninitialized, func);
770  }
771  if (__kmp_is_ticket_lock_nestable(lck)) {
772  KMP_FATAL(LockNestableUsedAsSimple, func);
773  }
774  if (__kmp_get_ticket_lock_owner(lck) == -1) {
775  KMP_FATAL(LockUnsettingFree, func);
776  }
777  if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) >= 0) &&
778  (__kmp_get_ticket_lock_owner(lck) != gtid)) {
779  KMP_FATAL(LockUnsettingSetByAnother, func);
780  }
781  std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
782  return __kmp_release_ticket_lock(lck, gtid);
783 }
784 
785 void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) {
786  lck->lk.location = NULL;
787  lck->lk.self = lck;
788  std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
789  std::memory_order_relaxed);
790  std::atomic_store_explicit(&lck->lk.now_serving, 0U,
791  std::memory_order_relaxed);
792  std::atomic_store_explicit(
793  &lck->lk.owner_id, 0,
794  std::memory_order_relaxed); // no thread owns the lock.
795  std::atomic_store_explicit(
796  &lck->lk.depth_locked, -1,
797  std::memory_order_relaxed); // -1 => not a nested lock.
798  std::atomic_store_explicit(&lck->lk.initialized, true,
799  std::memory_order_release);
800 }
801 
802 static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
803  __kmp_init_ticket_lock(lck);
804 }
805 
806 void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) {
807  std::atomic_store_explicit(&lck->lk.initialized, false,
808  std::memory_order_release);
809  lck->lk.self = NULL;
810  lck->lk.location = NULL;
811  std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
812  std::memory_order_relaxed);
813  std::atomic_store_explicit(&lck->lk.now_serving, 0U,
814  std::memory_order_relaxed);
815  std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
816  std::atomic_store_explicit(&lck->lk.depth_locked, -1,
817  std::memory_order_relaxed);
818 }
819 
820 static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
821  char const *const func = "omp_destroy_lock";
822 
823  if (!std::atomic_load_explicit(&lck->lk.initialized,
824  std::memory_order_relaxed)) {
825  KMP_FATAL(LockIsUninitialized, func);
826  }
827  if (lck->lk.self != lck) {
828  KMP_FATAL(LockIsUninitialized, func);
829  }
830  if (__kmp_is_ticket_lock_nestable(lck)) {
831  KMP_FATAL(LockNestableUsedAsSimple, func);
832  }
833  if (__kmp_get_ticket_lock_owner(lck) != -1) {
834  KMP_FATAL(LockStillOwned, func);
835  }
836  __kmp_destroy_ticket_lock(lck);
837 }
838 
839 // nested ticket locks
840 
841 int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
842  KMP_DEBUG_ASSERT(gtid >= 0);
843 
844  if (__kmp_get_ticket_lock_owner(lck) == gtid) {
845  std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
846  std::memory_order_relaxed);
847  return KMP_LOCK_ACQUIRED_NEXT;
848  } else {
849  __kmp_acquire_ticket_lock_timed_template(lck, gtid);
850  ANNOTATE_TICKET_ACQUIRED(lck);
851  std::atomic_store_explicit(&lck->lk.depth_locked, 1,
852  std::memory_order_relaxed);
853  std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
854  std::memory_order_relaxed);
855  return KMP_LOCK_ACQUIRED_FIRST;
856  }
857 }
858 
859 static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
860  kmp_int32 gtid) {
861  char const *const func = "omp_set_nest_lock";
862 
863  if (!std::atomic_load_explicit(&lck->lk.initialized,
864  std::memory_order_relaxed)) {
865  KMP_FATAL(LockIsUninitialized, func);
866  }
867  if (lck->lk.self != lck) {
868  KMP_FATAL(LockIsUninitialized, func);
869  }
870  if (!__kmp_is_ticket_lock_nestable(lck)) {
871  KMP_FATAL(LockSimpleUsedAsNestable, func);
872  }
873  return __kmp_acquire_nested_ticket_lock(lck, gtid);
874 }
875 
876 int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
877  int retval;
878 
879  KMP_DEBUG_ASSERT(gtid >= 0);
880 
881  if (__kmp_get_ticket_lock_owner(lck) == gtid) {
882  retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
883  std::memory_order_relaxed) +
884  1;
885  } else if (!__kmp_test_ticket_lock(lck, gtid)) {
886  retval = 0;
887  } else {
888  std::atomic_store_explicit(&lck->lk.depth_locked, 1,
889  std::memory_order_relaxed);
890  std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
891  std::memory_order_relaxed);
892  retval = 1;
893  }
894  return retval;
895 }
896 
897 static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
898  kmp_int32 gtid) {
899  char const *const func = "omp_test_nest_lock";
900 
901  if (!std::atomic_load_explicit(&lck->lk.initialized,
902  std::memory_order_relaxed)) {
903  KMP_FATAL(LockIsUninitialized, func);
904  }
905  if (lck->lk.self != lck) {
906  KMP_FATAL(LockIsUninitialized, func);
907  }
908  if (!__kmp_is_ticket_lock_nestable(lck)) {
909  KMP_FATAL(LockSimpleUsedAsNestable, func);
910  }
911  return __kmp_test_nested_ticket_lock(lck, gtid);
912 }
913 
914 int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
915  KMP_DEBUG_ASSERT(gtid >= 0);
916 
917  if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
918  std::memory_order_relaxed) -
919  1) == 0) {
920  std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
921  __kmp_release_ticket_lock(lck, gtid);
922  return KMP_LOCK_RELEASED;
923  }
924  return KMP_LOCK_STILL_HELD;
925 }
926 
927 static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
928  kmp_int32 gtid) {
929  char const *const func = "omp_unset_nest_lock";
930 
931  if (!std::atomic_load_explicit(&lck->lk.initialized,
932  std::memory_order_relaxed)) {
933  KMP_FATAL(LockIsUninitialized, func);
934  }
935  if (lck->lk.self != lck) {
936  KMP_FATAL(LockIsUninitialized, func);
937  }
938  if (!__kmp_is_ticket_lock_nestable(lck)) {
939  KMP_FATAL(LockSimpleUsedAsNestable, func);
940  }
941  if (__kmp_get_ticket_lock_owner(lck) == -1) {
942  KMP_FATAL(LockUnsettingFree, func);
943  }
944  if (__kmp_get_ticket_lock_owner(lck) != gtid) {
945  KMP_FATAL(LockUnsettingSetByAnother, func);
946  }
947  return __kmp_release_nested_ticket_lock(lck, gtid);
948 }
949 
950 void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) {
951  __kmp_init_ticket_lock(lck);
952  std::atomic_store_explicit(&lck->lk.depth_locked, 0,
953  std::memory_order_relaxed);
954  // >= 0 for nestable locks, -1 for simple locks
955 }
956 
957 static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
958  __kmp_init_nested_ticket_lock(lck);
959 }
960 
961 void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) {
962  __kmp_destroy_ticket_lock(lck);
963  std::atomic_store_explicit(&lck->lk.depth_locked, 0,
964  std::memory_order_relaxed);
965 }
966 
967 static void
968 __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
969  char const *const func = "omp_destroy_nest_lock";
970 
971  if (!std::atomic_load_explicit(&lck->lk.initialized,
972  std::memory_order_relaxed)) {
973  KMP_FATAL(LockIsUninitialized, func);
974  }
975  if (lck->lk.self != lck) {
976  KMP_FATAL(LockIsUninitialized, func);
977  }
978  if (!__kmp_is_ticket_lock_nestable(lck)) {
979  KMP_FATAL(LockSimpleUsedAsNestable, func);
980  }
981  if (__kmp_get_ticket_lock_owner(lck) != -1) {
982  KMP_FATAL(LockStillOwned, func);
983  }
984  __kmp_destroy_nested_ticket_lock(lck);
985 }
986 
987 // access functions to fields which don't exist for all lock kinds.
988 
989 static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) {
990  return std::atomic_load_explicit(&lck->lk.initialized,
991  std::memory_order_relaxed) &&
992  (lck->lk.self == lck);
993 }
994 
995 static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) {
996  return lck->lk.location;
997 }
998 
999 static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck,
1000  const ident_t *loc) {
1001  lck->lk.location = loc;
1002 }
1003 
1004 static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) {
1005  return lck->lk.flags;
1006 }
1007 
1008 static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck,
1009  kmp_lock_flags_t flags) {
1010  lck->lk.flags = flags;
1011 }
1012 
1013 /* ------------------------------------------------------------------------ */
1014 /* queuing locks */
1015 
1016 /* First the states
1017  (head,tail) = 0, 0 means lock is unheld, nobody on queue
1018  UINT_MAX or -1, 0 means lock is held, nobody on queue
1019  h, h means lock held or about to transition,
1020  1 element on queue
1021  h, t h <> t, means lock is held or about to
1022  transition, >1 elements on queue
1023 
1024  Now the transitions
1025  Acquire(0,0) = -1 ,0
1026  Release(0,0) = Error
1027  Acquire(-1,0) = h ,h h > 0
1028  Release(-1,0) = 0 ,0
1029  Acquire(h,h) = h ,t h > 0, t > 0, h <> t
1030  Release(h,h) = -1 ,0 h > 0
1031  Acquire(h,t) = h ,t' h > 0, t > 0, t' > 0, h <> t, h <> t', t <> t'
1032  Release(h,t) = h',t h > 0, t > 0, h <> t, h <> h', h' maybe = t
1033 
1034  And pictorially
1035 
1036  +-----+
1037  | 0, 0|------- release -------> Error
1038  +-----+
1039  | ^
1040  acquire| |release
1041  | |
1042  | |
1043  v |
1044  +-----+
1045  |-1, 0|
1046  +-----+
1047  | ^
1048  acquire| |release
1049  | |
1050  | |
1051  v |
1052  +-----+
1053  | h, h|
1054  +-----+
1055  | ^
1056  acquire| |release
1057  | |
1058  | |
1059  v |
1060  +-----+
1061  | h, t|----- acquire, release loopback ---+
1062  +-----+ |
1063  ^ |
1064  | |
1065  +------------------------------------+
1066  */
1067 
1068 #ifdef DEBUG_QUEUING_LOCKS
1069 
1070 /* Stuff for circular trace buffer */
1071 #define TRACE_BUF_ELE 1024
1072 static char traces[TRACE_BUF_ELE][128] = {0};
1073 static int tc = 0;
1074 #define TRACE_LOCK(X, Y) \
1075  KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y);
1076 #define TRACE_LOCK_T(X, Y, Z) \
1077  KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z);
1078 #define TRACE_LOCK_HT(X, Y, Z, Q) \
1079  KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, \
1080  Z, Q);
1081 
1082 static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid,
1083  kmp_queuing_lock_t *lck, kmp_int32 head_id,
1084  kmp_int32 tail_id) {
1085  kmp_int32 t, i;
1086 
1087  __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n");
1088 
1089  i = tc % TRACE_BUF_ELE;
1090  __kmp_printf_no_lock("%s\n", traces[i]);
1091  i = (i + 1) % TRACE_BUF_ELE;
1092  while (i != (tc % TRACE_BUF_ELE)) {
1093  __kmp_printf_no_lock("%s", traces[i]);
1094  i = (i + 1) % TRACE_BUF_ELE;
1095  }
1096  __kmp_printf_no_lock("\n");
1097 
1098  __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, "
1099  "next_wait:%d, head_id:%d, tail_id:%d\n",
1100  gtid + 1, this_thr->th.th_spin_here,
1101  this_thr->th.th_next_waiting, head_id, tail_id);
1102 
1103  __kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id);
1104 
1105  if (lck->lk.head_id >= 1) {
1106  t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
1107  while (t > 0) {
1108  __kmp_printf_no_lock("-> %d ", t);
1109  t = __kmp_threads[t - 1]->th.th_next_waiting;
1110  }
1111  }
1112  __kmp_printf_no_lock("; tail: %d ", lck->lk.tail_id);
1113  __kmp_printf_no_lock("\n\n");
1114 }
1115 
1116 #endif /* DEBUG_QUEUING_LOCKS */
1117 
1118 static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) {
1119  return TCR_4(lck->lk.owner_id) - 1;
1120 }
1121 
1122 static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) {
1123  return lck->lk.depth_locked != -1;
1124 }
1125 
1126 /* Acquire a lock using a the queuing lock implementation */
1127 template <bool takeTime>
1128 /* [TLW] The unused template above is left behind because of what BEB believes
1129  is a potential compiler problem with __forceinline. */
1130 __forceinline static int
1131 __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
1132  kmp_int32 gtid) {
1133  kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid);
1134  volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1135  volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1136  volatile kmp_uint32 *spin_here_p;
1137  kmp_int32 need_mf = 1;
1138 
1139 #if OMPT_SUPPORT
1140  ompt_state_t prev_state = ompt_state_undefined;
1141 #endif
1142 
1143  KA_TRACE(1000,
1144  ("__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1145 
1146  KMP_FSYNC_PREPARE(lck);
1147  KMP_DEBUG_ASSERT(this_thr != NULL);
1148  spin_here_p = &this_thr->th.th_spin_here;
1149 
1150 #ifdef DEBUG_QUEUING_LOCKS
1151  TRACE_LOCK(gtid + 1, "acq ent");
1152  if (*spin_here_p)
1153  __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1154  if (this_thr->th.th_next_waiting != 0)
1155  __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1156 #endif
1157  KMP_DEBUG_ASSERT(!*spin_here_p);
1158  KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1159 
1160  /* The following st.rel to spin_here_p needs to precede the cmpxchg.acq to
1161  head_id_p that may follow, not just in execution order, but also in
1162  visibility order. This way, when a releasing thread observes the changes to
1163  the queue by this thread, it can rightly assume that spin_here_p has
1164  already been set to TRUE, so that when it sets spin_here_p to FALSE, it is
1165  not premature. If the releasing thread sets spin_here_p to FALSE before
1166  this thread sets it to TRUE, this thread will hang. */
1167  *spin_here_p = TRUE; /* before enqueuing to prevent race */
1168 
1169  while (1) {
1170  kmp_int32 enqueued;
1171  kmp_int32 head;
1172  kmp_int32 tail;
1173 
1174  head = *head_id_p;
1175 
1176  switch (head) {
1177 
1178  case -1: {
1179 #ifdef DEBUG_QUEUING_LOCKS
1180  tail = *tail_id_p;
1181  TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1182 #endif
1183  tail = 0; /* to make sure next link asynchronously read is not set
1184  accidentally; this assignment prevents us from entering the
1185  if ( t > 0 ) condition in the enqueued case below, which is not
1186  necessary for this state transition */
1187 
1188  need_mf = 0;
1189  /* try (-1,0)->(tid,tid) */
1190  enqueued = KMP_COMPARE_AND_STORE_ACQ64((volatile kmp_int64 *)tail_id_p,
1191  KMP_PACK_64(-1, 0),
1192  KMP_PACK_64(gtid + 1, gtid + 1));
1193 #ifdef DEBUG_QUEUING_LOCKS
1194  if (enqueued)
1195  TRACE_LOCK(gtid + 1, "acq enq: (-1,0)->(tid,tid)");
1196 #endif
1197  } break;
1198 
1199  default: {
1200  tail = *tail_id_p;
1201  KMP_DEBUG_ASSERT(tail != gtid + 1);
1202 
1203 #ifdef DEBUG_QUEUING_LOCKS
1204  TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1205 #endif
1206 
1207  if (tail == 0) {
1208  enqueued = FALSE;
1209  } else {
1210  need_mf = 0;
1211  /* try (h,t) or (h,h)->(h,tid) */
1212  enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1);
1213 
1214 #ifdef DEBUG_QUEUING_LOCKS
1215  if (enqueued)
1216  TRACE_LOCK(gtid + 1, "acq enq: (h,t)->(h,tid)");
1217 #endif
1218  }
1219  } break;
1220 
1221  case 0: /* empty queue */
1222  {
1223  kmp_int32 grabbed_lock;
1224 
1225 #ifdef DEBUG_QUEUING_LOCKS
1226  tail = *tail_id_p;
1227  TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1228 #endif
1229  /* try (0,0)->(-1,0) */
1230 
1231  /* only legal transition out of head = 0 is head = -1 with no change to
1232  * tail */
1233  grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1);
1234 
1235  if (grabbed_lock) {
1236 
1237  *spin_here_p = FALSE;
1238 
1239  KA_TRACE(
1240  1000,
1241  ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
1242  lck, gtid));
1243 #ifdef DEBUG_QUEUING_LOCKS
1244  TRACE_LOCK_HT(gtid + 1, "acq exit: ", head, 0);
1245 #endif
1246 
1247 #if OMPT_SUPPORT
1248  if (ompt_enabled && prev_state != ompt_state_undefined) {
1249  /* change the state before clearing wait_id */
1250  this_thr->th.ompt_thread_info.state = prev_state;
1251  this_thr->th.ompt_thread_info.wait_id = 0;
1252  }
1253 #endif
1254 
1255  KMP_FSYNC_ACQUIRED(lck);
1256  return KMP_LOCK_ACQUIRED_FIRST; /* lock holder cannot be on queue */
1257  }
1258  enqueued = FALSE;
1259  } break;
1260  }
1261 
1262 #if OMPT_SUPPORT
1263  if (ompt_enabled && prev_state == ompt_state_undefined) {
1264  /* this thread will spin; set wait_id before entering wait state */
1265  prev_state = this_thr->th.ompt_thread_info.state;
1266  this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
1267  this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
1268  }
1269 #endif
1270 
1271  if (enqueued) {
1272  if (tail > 0) {
1273  kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1);
1274  KMP_ASSERT(tail_thr != NULL);
1275  tail_thr->th.th_next_waiting = gtid + 1;
1276  /* corresponding wait for this write in release code */
1277  }
1278  KA_TRACE(1000,
1279  ("__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n",
1280  lck, gtid));
1281 
1282  /* ToDo: May want to consider using __kmp_wait_sleep or something that
1283  sleeps for throughput only here. */
1284  KMP_MB();
1285  KMP_WAIT_YIELD(spin_here_p, FALSE, KMP_EQ, lck);
1286 
1287 #ifdef DEBUG_QUEUING_LOCKS
1288  TRACE_LOCK(gtid + 1, "acq spin");
1289 
1290  if (this_thr->th.th_next_waiting != 0)
1291  __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1292 #endif
1293  KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1294  KA_TRACE(1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after "
1295  "waiting on queue\n",
1296  lck, gtid));
1297 
1298 #ifdef DEBUG_QUEUING_LOCKS
1299  TRACE_LOCK(gtid + 1, "acq exit 2");
1300 #endif
1301 
1302 #if OMPT_SUPPORT
1303  /* change the state before clearing wait_id */
1304  this_thr->th.ompt_thread_info.state = prev_state;
1305  this_thr->th.ompt_thread_info.wait_id = 0;
1306 #endif
1307 
1308  /* got lock, we were dequeued by the thread that released lock */
1309  return KMP_LOCK_ACQUIRED_FIRST;
1310  }
1311 
1312  /* Yield if number of threads > number of logical processors */
1313  /* ToDo: Not sure why this should only be in oversubscription case,
1314  maybe should be traditional YIELD_INIT/YIELD_WHEN loop */
1315  KMP_YIELD(TCR_4(__kmp_nth) >
1316  (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
1317 #ifdef DEBUG_QUEUING_LOCKS
1318  TRACE_LOCK(gtid + 1, "acq retry");
1319 #endif
1320  }
1321  KMP_ASSERT2(0, "should not get here");
1322  return KMP_LOCK_ACQUIRED_FIRST;
1323 }
1324 
1325 int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1326  KMP_DEBUG_ASSERT(gtid >= 0);
1327 
1328  int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1329  ANNOTATE_QUEUING_ACQUIRED(lck);
1330  return retval;
1331 }
1332 
1333 static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1334  kmp_int32 gtid) {
1335  char const *const func = "omp_set_lock";
1336  if (lck->lk.initialized != lck) {
1337  KMP_FATAL(LockIsUninitialized, func);
1338  }
1339  if (__kmp_is_queuing_lock_nestable(lck)) {
1340  KMP_FATAL(LockNestableUsedAsSimple, func);
1341  }
1342  if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1343  KMP_FATAL(LockIsAlreadyOwned, func);
1344  }
1345 
1346  __kmp_acquire_queuing_lock(lck, gtid);
1347 
1348  lck->lk.owner_id = gtid + 1;
1349  return KMP_LOCK_ACQUIRED_FIRST;
1350 }
1351 
1352 int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1353  volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1354  kmp_int32 head;
1355 #ifdef KMP_DEBUG
1356  kmp_info_t *this_thr;
1357 #endif
1358 
1359  KA_TRACE(1000, ("__kmp_test_queuing_lock: T#%d entering\n", gtid));
1360  KMP_DEBUG_ASSERT(gtid >= 0);
1361 #ifdef KMP_DEBUG
1362  this_thr = __kmp_thread_from_gtid(gtid);
1363  KMP_DEBUG_ASSERT(this_thr != NULL);
1364  KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1365 #endif
1366 
1367  head = *head_id_p;
1368 
1369  if (head == 0) { /* nobody on queue, nobody holding */
1370  /* try (0,0)->(-1,0) */
1371  if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) {
1372  KA_TRACE(1000,
1373  ("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
1374  KMP_FSYNC_ACQUIRED(lck);
1375  ANNOTATE_QUEUING_ACQUIRED(lck);
1376  return TRUE;
1377  }
1378  }
1379 
1380  KA_TRACE(1000,
1381  ("__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid));
1382  return FALSE;
1383 }
1384 
1385 static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1386  kmp_int32 gtid) {
1387  char const *const func = "omp_test_lock";
1388  if (lck->lk.initialized != lck) {
1389  KMP_FATAL(LockIsUninitialized, func);
1390  }
1391  if (__kmp_is_queuing_lock_nestable(lck)) {
1392  KMP_FATAL(LockNestableUsedAsSimple, func);
1393  }
1394 
1395  int retval = __kmp_test_queuing_lock(lck, gtid);
1396 
1397  if (retval) {
1398  lck->lk.owner_id = gtid + 1;
1399  }
1400  return retval;
1401 }
1402 
1403 int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1404  kmp_info_t *this_thr;
1405  volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1406  volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1407 
1408  KA_TRACE(1000,
1409  ("__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1410  KMP_DEBUG_ASSERT(gtid >= 0);
1411  this_thr = __kmp_thread_from_gtid(gtid);
1412  KMP_DEBUG_ASSERT(this_thr != NULL);
1413 #ifdef DEBUG_QUEUING_LOCKS
1414  TRACE_LOCK(gtid + 1, "rel ent");
1415 
1416  if (this_thr->th.th_spin_here)
1417  __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1418  if (this_thr->th.th_next_waiting != 0)
1419  __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1420 #endif
1421  KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1422  KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1423 
1424  KMP_FSYNC_RELEASING(lck);
1425  ANNOTATE_QUEUING_RELEASED(lck);
1426 
1427  while (1) {
1428  kmp_int32 dequeued;
1429  kmp_int32 head;
1430  kmp_int32 tail;
1431 
1432  head = *head_id_p;
1433 
1434 #ifdef DEBUG_QUEUING_LOCKS
1435  tail = *tail_id_p;
1436  TRACE_LOCK_HT(gtid + 1, "rel read: ", head, tail);
1437  if (head == 0)
1438  __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1439 #endif
1440  KMP_DEBUG_ASSERT(head !=
1441  0); /* holding the lock, head must be -1 or queue head */
1442 
1443  if (head == -1) { /* nobody on queue */
1444  /* try (-1,0)->(0,0) */
1445  if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) {
1446  KA_TRACE(
1447  1000,
1448  ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
1449  lck, gtid));
1450 #ifdef DEBUG_QUEUING_LOCKS
1451  TRACE_LOCK_HT(gtid + 1, "rel exit: ", 0, 0);
1452 #endif
1453 
1454 #if OMPT_SUPPORT
1455 /* nothing to do - no other thread is trying to shift blame */
1456 #endif
1457  return KMP_LOCK_RELEASED;
1458  }
1459  dequeued = FALSE;
1460  } else {
1461  tail = *tail_id_p;
1462  if (head == tail) { /* only one thread on the queue */
1463 #ifdef DEBUG_QUEUING_LOCKS
1464  if (head <= 0)
1465  __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1466 #endif
1467  KMP_DEBUG_ASSERT(head > 0);
1468 
1469  /* try (h,h)->(-1,0) */
1470  dequeued = KMP_COMPARE_AND_STORE_REL64(
1471  RCAST(volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head),
1472  KMP_PACK_64(-1, 0));
1473 #ifdef DEBUG_QUEUING_LOCKS
1474  TRACE_LOCK(gtid + 1, "rel deq: (h,h)->(-1,0)");
1475 #endif
1476 
1477  } else {
1478  volatile kmp_int32 *waiting_id_p;
1479  kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1480  KMP_DEBUG_ASSERT(head_thr != NULL);
1481  waiting_id_p = &head_thr->th.th_next_waiting;
1482 
1483 /* Does this require synchronous reads? */
1484 #ifdef DEBUG_QUEUING_LOCKS
1485  if (head <= 0 || tail <= 0)
1486  __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1487 #endif
1488  KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1489 
1490  /* try (h,t)->(h',t) or (t,t) */
1491  KMP_MB();
1492  /* make sure enqueuing thread has time to update next waiting thread
1493  * field */
1494  *head_id_p = KMP_WAIT_YIELD((volatile kmp_uint32 *)waiting_id_p, 0,
1495  KMP_NEQ, NULL);
1496 #ifdef DEBUG_QUEUING_LOCKS
1497  TRACE_LOCK(gtid + 1, "rel deq: (h,t)->(h',t)");
1498 #endif
1499  dequeued = TRUE;
1500  }
1501  }
1502 
1503  if (dequeued) {
1504  kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1505  KMP_DEBUG_ASSERT(head_thr != NULL);
1506 
1507 /* Does this require synchronous reads? */
1508 #ifdef DEBUG_QUEUING_LOCKS
1509  if (head <= 0 || tail <= 0)
1510  __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1511 #endif
1512  KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1513 
1514  /* For clean code only. Thread not released until next statement prevents
1515  race with acquire code. */
1516  head_thr->th.th_next_waiting = 0;
1517 #ifdef DEBUG_QUEUING_LOCKS
1518  TRACE_LOCK_T(gtid + 1, "rel nw=0 for t=", head);
1519 #endif
1520 
1521  KMP_MB();
1522  /* reset spin value */
1523  head_thr->th.th_spin_here = FALSE;
1524 
1525  KA_TRACE(1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: after "
1526  "dequeuing\n",
1527  lck, gtid));
1528 #ifdef DEBUG_QUEUING_LOCKS
1529  TRACE_LOCK(gtid + 1, "rel exit 2");
1530 #endif
1531  return KMP_LOCK_RELEASED;
1532  }
1533 /* KMP_CPU_PAUSE(); don't want to make releasing thread hold up acquiring
1534  threads */
1535 
1536 #ifdef DEBUG_QUEUING_LOCKS
1537  TRACE_LOCK(gtid + 1, "rel retry");
1538 #endif
1539 
1540  } /* while */
1541  KMP_ASSERT2(0, "should not get here");
1542  return KMP_LOCK_RELEASED;
1543 }
1544 
1545 static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1546  kmp_int32 gtid) {
1547  char const *const func = "omp_unset_lock";
1548  KMP_MB(); /* in case another processor initialized lock */
1549  if (lck->lk.initialized != lck) {
1550  KMP_FATAL(LockIsUninitialized, func);
1551  }
1552  if (__kmp_is_queuing_lock_nestable(lck)) {
1553  KMP_FATAL(LockNestableUsedAsSimple, func);
1554  }
1555  if (__kmp_get_queuing_lock_owner(lck) == -1) {
1556  KMP_FATAL(LockUnsettingFree, func);
1557  }
1558  if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1559  KMP_FATAL(LockUnsettingSetByAnother, func);
1560  }
1561  lck->lk.owner_id = 0;
1562  return __kmp_release_queuing_lock(lck, gtid);
1563 }
1564 
1565 void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) {
1566  lck->lk.location = NULL;
1567  lck->lk.head_id = 0;
1568  lck->lk.tail_id = 0;
1569  lck->lk.next_ticket = 0;
1570  lck->lk.now_serving = 0;
1571  lck->lk.owner_id = 0; // no thread owns the lock.
1572  lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
1573  lck->lk.initialized = lck;
1574 
1575  KA_TRACE(1000, ("__kmp_init_queuing_lock: lock %p initialized\n", lck));
1576 }
1577 
1578 static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1579  __kmp_init_queuing_lock(lck);
1580 }
1581 
1582 void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) {
1583  lck->lk.initialized = NULL;
1584  lck->lk.location = NULL;
1585  lck->lk.head_id = 0;
1586  lck->lk.tail_id = 0;
1587  lck->lk.next_ticket = 0;
1588  lck->lk.now_serving = 0;
1589  lck->lk.owner_id = 0;
1590  lck->lk.depth_locked = -1;
1591 }
1592 
1593 static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1594  char const *const func = "omp_destroy_lock";
1595  if (lck->lk.initialized != lck) {
1596  KMP_FATAL(LockIsUninitialized, func);
1597  }
1598  if (__kmp_is_queuing_lock_nestable(lck)) {
1599  KMP_FATAL(LockNestableUsedAsSimple, func);
1600  }
1601  if (__kmp_get_queuing_lock_owner(lck) != -1) {
1602  KMP_FATAL(LockStillOwned, func);
1603  }
1604  __kmp_destroy_queuing_lock(lck);
1605 }
1606 
1607 // nested queuing locks
1608 
1609 int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1610  KMP_DEBUG_ASSERT(gtid >= 0);
1611 
1612  if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1613  lck->lk.depth_locked += 1;
1614  return KMP_LOCK_ACQUIRED_NEXT;
1615  } else {
1616  __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1617  ANNOTATE_QUEUING_ACQUIRED(lck);
1618  KMP_MB();
1619  lck->lk.depth_locked = 1;
1620  KMP_MB();
1621  lck->lk.owner_id = gtid + 1;
1622  return KMP_LOCK_ACQUIRED_FIRST;
1623  }
1624 }
1625 
1626 static int
1627 __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1628  kmp_int32 gtid) {
1629  char const *const func = "omp_set_nest_lock";
1630  if (lck->lk.initialized != lck) {
1631  KMP_FATAL(LockIsUninitialized, func);
1632  }
1633  if (!__kmp_is_queuing_lock_nestable(lck)) {
1634  KMP_FATAL(LockSimpleUsedAsNestable, func);
1635  }
1636  return __kmp_acquire_nested_queuing_lock(lck, gtid);
1637 }
1638 
1639 int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1640  int retval;
1641 
1642  KMP_DEBUG_ASSERT(gtid >= 0);
1643 
1644  if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1645  retval = ++lck->lk.depth_locked;
1646  } else if (!__kmp_test_queuing_lock(lck, gtid)) {
1647  retval = 0;
1648  } else {
1649  KMP_MB();
1650  retval = lck->lk.depth_locked = 1;
1651  KMP_MB();
1652  lck->lk.owner_id = gtid + 1;
1653  }
1654  return retval;
1655 }
1656 
1657 static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1658  kmp_int32 gtid) {
1659  char const *const func = "omp_test_nest_lock";
1660  if (lck->lk.initialized != lck) {
1661  KMP_FATAL(LockIsUninitialized, func);
1662  }
1663  if (!__kmp_is_queuing_lock_nestable(lck)) {
1664  KMP_FATAL(LockSimpleUsedAsNestable, func);
1665  }
1666  return __kmp_test_nested_queuing_lock(lck, gtid);
1667 }
1668 
1669 int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1670  KMP_DEBUG_ASSERT(gtid >= 0);
1671 
1672  KMP_MB();
1673  if (--(lck->lk.depth_locked) == 0) {
1674  KMP_MB();
1675  lck->lk.owner_id = 0;
1676  __kmp_release_queuing_lock(lck, gtid);
1677  return KMP_LOCK_RELEASED;
1678  }
1679  return KMP_LOCK_STILL_HELD;
1680 }
1681 
1682 static int
1683 __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1684  kmp_int32 gtid) {
1685  char const *const func = "omp_unset_nest_lock";
1686  KMP_MB(); /* in case another processor initialized lock */
1687  if (lck->lk.initialized != lck) {
1688  KMP_FATAL(LockIsUninitialized, func);
1689  }
1690  if (!__kmp_is_queuing_lock_nestable(lck)) {
1691  KMP_FATAL(LockSimpleUsedAsNestable, func);
1692  }
1693  if (__kmp_get_queuing_lock_owner(lck) == -1) {
1694  KMP_FATAL(LockUnsettingFree, func);
1695  }
1696  if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1697  KMP_FATAL(LockUnsettingSetByAnother, func);
1698  }
1699  return __kmp_release_nested_queuing_lock(lck, gtid);
1700 }
1701 
1702 void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1703  __kmp_init_queuing_lock(lck);
1704  lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
1705 }
1706 
1707 static void
1708 __kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1709  __kmp_init_nested_queuing_lock(lck);
1710 }
1711 
1712 void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1713  __kmp_destroy_queuing_lock(lck);
1714  lck->lk.depth_locked = 0;
1715 }
1716 
1717 static void
1718 __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1719  char const *const func = "omp_destroy_nest_lock";
1720  if (lck->lk.initialized != lck) {
1721  KMP_FATAL(LockIsUninitialized, func);
1722  }
1723  if (!__kmp_is_queuing_lock_nestable(lck)) {
1724  KMP_FATAL(LockSimpleUsedAsNestable, func);
1725  }
1726  if (__kmp_get_queuing_lock_owner(lck) != -1) {
1727  KMP_FATAL(LockStillOwned, func);
1728  }
1729  __kmp_destroy_nested_queuing_lock(lck);
1730 }
1731 
1732 // access functions to fields which don't exist for all lock kinds.
1733 
1734 static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) {
1735  return lck == lck->lk.initialized;
1736 }
1737 
1738 static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) {
1739  return lck->lk.location;
1740 }
1741 
1742 static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck,
1743  const ident_t *loc) {
1744  lck->lk.location = loc;
1745 }
1746 
1747 static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) {
1748  return lck->lk.flags;
1749 }
1750 
1751 static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
1752  kmp_lock_flags_t flags) {
1753  lck->lk.flags = flags;
1754 }
1755 
1756 #if KMP_USE_ADAPTIVE_LOCKS
1757 
1758 /* RTM Adaptive locks */
1759 
1760 #if KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300
1761 
1762 #include <immintrin.h>
1763 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1764 
1765 #else
1766 
1767 // Values from the status register after failed speculation.
1768 #define _XBEGIN_STARTED (~0u)
1769 #define _XABORT_EXPLICIT (1 << 0)
1770 #define _XABORT_RETRY (1 << 1)
1771 #define _XABORT_CONFLICT (1 << 2)
1772 #define _XABORT_CAPACITY (1 << 3)
1773 #define _XABORT_DEBUG (1 << 4)
1774 #define _XABORT_NESTED (1 << 5)
1775 #define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF))
1776 
1777 // Aborts for which it's worth trying again immediately
1778 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1779 
1780 #define STRINGIZE_INTERNAL(arg) #arg
1781 #define STRINGIZE(arg) STRINGIZE_INTERNAL(arg)
1782 
1783 // Access to RTM instructions
1784 /*A version of XBegin which returns -1 on speculation, and the value of EAX on
1785  an abort. This is the same definition as the compiler intrinsic that will be
1786  supported at some point. */
1787 static __inline int _xbegin() {
1788  int res = -1;
1789 
1790 #if KMP_OS_WINDOWS
1791 #if KMP_ARCH_X86_64
1792  _asm {
1793  _emit 0xC7
1794  _emit 0xF8
1795  _emit 2
1796  _emit 0
1797  _emit 0
1798  _emit 0
1799  jmp L2
1800  mov res, eax
1801  L2:
1802  }
1803 #else /* IA32 */
1804  _asm {
1805  _emit 0xC7
1806  _emit 0xF8
1807  _emit 2
1808  _emit 0
1809  _emit 0
1810  _emit 0
1811  jmp L2
1812  mov res, eax
1813  L2:
1814  }
1815 #endif // KMP_ARCH_X86_64
1816 #else
1817  /* Note that %eax must be noted as killed (clobbered), because the XSR is
1818  returned in %eax(%rax) on abort. Other register values are restored, so
1819  don't need to be killed.
1820 
1821  We must also mark 'res' as an input and an output, since otherwise
1822  'res=-1' may be dropped as being dead, whereas we do need the assignment on
1823  the successful (i.e., non-abort) path. */
1824  __asm__ volatile("1: .byte 0xC7; .byte 0xF8;\n"
1825  " .long 1f-1b-6\n"
1826  " jmp 2f\n"
1827  "1: movl %%eax,%0\n"
1828  "2:"
1829  : "+r"(res)::"memory", "%eax");
1830 #endif // KMP_OS_WINDOWS
1831  return res;
1832 }
1833 
1834 /* Transaction end */
1835 static __inline void _xend() {
1836 #if KMP_OS_WINDOWS
1837  __asm {
1838  _emit 0x0f
1839  _emit 0x01
1840  _emit 0xd5
1841  }
1842 #else
1843  __asm__ volatile(".byte 0x0f; .byte 0x01; .byte 0xd5" ::: "memory");
1844 #endif
1845 }
1846 
1847 /* This is a macro, the argument must be a single byte constant which can be
1848  evaluated by the inline assembler, since it is emitted as a byte into the
1849  assembly code. */
1850 // clang-format off
1851 #if KMP_OS_WINDOWS
1852 #define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG
1853 #else
1854 #define _xabort(ARG) \
1855  __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory");
1856 #endif
1857 // clang-format on
1858 #endif // KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300
1859 
1860 // Statistics is collected for testing purpose
1861 #if KMP_DEBUG_ADAPTIVE_LOCKS
1862 
1863 // We accumulate speculative lock statistics when the lock is destroyed. We
1864 // keep locks that haven't been destroyed in the liveLocks list so that we can
1865 // grab their statistics too.
1866 static kmp_adaptive_lock_statistics_t destroyedStats;
1867 
1868 // To hold the list of live locks.
1869 static kmp_adaptive_lock_info_t liveLocks;
1870 
1871 // A lock so we can safely update the list of locks.
1872 static kmp_bootstrap_lock_t chain_lock;
1873 
1874 // Initialize the list of stats.
1875 void __kmp_init_speculative_stats() {
1876  kmp_adaptive_lock_info_t *lck = &liveLocks;
1877 
1878  memset((void *)&(lck->stats), 0, sizeof(lck->stats));
1879  lck->stats.next = lck;
1880  lck->stats.prev = lck;
1881 
1882  KMP_ASSERT(lck->stats.next->stats.prev == lck);
1883  KMP_ASSERT(lck->stats.prev->stats.next == lck);
1884 
1885  __kmp_init_bootstrap_lock(&chain_lock);
1886 }
1887 
1888 // Insert the lock into the circular list
1889 static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) {
1890  __kmp_acquire_bootstrap_lock(&chain_lock);
1891 
1892  lck->stats.next = liveLocks.stats.next;
1893  lck->stats.prev = &liveLocks;
1894 
1895  liveLocks.stats.next = lck;
1896  lck->stats.next->stats.prev = lck;
1897 
1898  KMP_ASSERT(lck->stats.next->stats.prev == lck);
1899  KMP_ASSERT(lck->stats.prev->stats.next == lck);
1900 
1901  __kmp_release_bootstrap_lock(&chain_lock);
1902 }
1903 
1904 static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) {
1905  KMP_ASSERT(lck->stats.next->stats.prev == lck);
1906  KMP_ASSERT(lck->stats.prev->stats.next == lck);
1907 
1908  kmp_adaptive_lock_info_t *n = lck->stats.next;
1909  kmp_adaptive_lock_info_t *p = lck->stats.prev;
1910 
1911  n->stats.prev = p;
1912  p->stats.next = n;
1913 }
1914 
1915 static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1916  memset((void *)&lck->stats, 0, sizeof(lck->stats));
1917  __kmp_remember_lock(lck);
1918 }
1919 
1920 static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
1921  kmp_adaptive_lock_info_t *lck) {
1922  kmp_adaptive_lock_statistics_t volatile *s = &lck->stats;
1923 
1924  t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
1925  t->successfulSpeculations += s->successfulSpeculations;
1926  t->hardFailedSpeculations += s->hardFailedSpeculations;
1927  t->softFailedSpeculations += s->softFailedSpeculations;
1928  t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
1929  t->lemmingYields += s->lemmingYields;
1930 }
1931 
1932 static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1933  kmp_adaptive_lock_statistics_t *t = &destroyedStats;
1934 
1935  __kmp_acquire_bootstrap_lock(&chain_lock);
1936 
1937  __kmp_add_stats(&destroyedStats, lck);
1938  __kmp_forget_lock(lck);
1939 
1940  __kmp_release_bootstrap_lock(&chain_lock);
1941 }
1942 
1943 static float percent(kmp_uint32 count, kmp_uint32 total) {
1944  return (total == 0) ? 0.0 : (100.0 * count) / total;
1945 }
1946 
1947 static FILE *__kmp_open_stats_file() {
1948  if (strcmp(__kmp_speculative_statsfile, "-") == 0)
1949  return stdout;
1950 
1951  size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20;
1952  char buffer[buffLen];
1953  KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
1954  (kmp_int32)getpid());
1955  FILE *result = fopen(&buffer[0], "w");
1956 
1957  // Maybe we should issue a warning here...
1958  return result ? result : stdout;
1959 }
1960 
1961 void __kmp_print_speculative_stats() {
1962  if (__kmp_user_lock_kind != lk_adaptive)
1963  return;
1964 
1965  FILE *statsFile = __kmp_open_stats_file();
1966 
1967  kmp_adaptive_lock_statistics_t total = destroyedStats;
1968  kmp_adaptive_lock_info_t *lck;
1969 
1970  for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
1971  __kmp_add_stats(&total, lck);
1972  }
1973  kmp_adaptive_lock_statistics_t *t = &total;
1974  kmp_uint32 totalSections =
1975  t->nonSpeculativeAcquires + t->successfulSpeculations;
1976  kmp_uint32 totalSpeculations = t->successfulSpeculations +
1977  t->hardFailedSpeculations +
1978  t->softFailedSpeculations;
1979 
1980  fprintf(statsFile, "Speculative lock statistics (all approximate!)\n");
1981  fprintf(statsFile, " Lock parameters: \n"
1982  " max_soft_retries : %10d\n"
1983  " max_badness : %10d\n",
1984  __kmp_adaptive_backoff_params.max_soft_retries,
1985  __kmp_adaptive_backoff_params.max_badness);
1986  fprintf(statsFile, " Non-speculative acquire attempts : %10d\n",
1987  t->nonSpeculativeAcquireAttempts);
1988  fprintf(statsFile, " Total critical sections : %10d\n",
1989  totalSections);
1990  fprintf(statsFile, " Successful speculations : %10d (%5.1f%%)\n",
1991  t->successfulSpeculations,
1992  percent(t->successfulSpeculations, totalSections));
1993  fprintf(statsFile, " Non-speculative acquires : %10d (%5.1f%%)\n",
1994  t->nonSpeculativeAcquires,
1995  percent(t->nonSpeculativeAcquires, totalSections));
1996  fprintf(statsFile, " Lemming yields : %10d\n\n",
1997  t->lemmingYields);
1998 
1999  fprintf(statsFile, " Speculative acquire attempts : %10d\n",
2000  totalSpeculations);
2001  fprintf(statsFile, " Successes : %10d (%5.1f%%)\n",
2002  t->successfulSpeculations,
2003  percent(t->successfulSpeculations, totalSpeculations));
2004  fprintf(statsFile, " Soft failures : %10d (%5.1f%%)\n",
2005  t->softFailedSpeculations,
2006  percent(t->softFailedSpeculations, totalSpeculations));
2007  fprintf(statsFile, " Hard failures : %10d (%5.1f%%)\n",
2008  t->hardFailedSpeculations,
2009  percent(t->hardFailedSpeculations, totalSpeculations));
2010 
2011  if (statsFile != stdout)
2012  fclose(statsFile);
2013 }
2014 
2015 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
2016 #else
2017 #define KMP_INC_STAT(lck, stat)
2018 
2019 #endif // KMP_DEBUG_ADAPTIVE_LOCKS
2020 
2021 static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) {
2022  // It is enough to check that the head_id is zero.
2023  // We don't also need to check the tail.
2024  bool res = lck->lk.head_id == 0;
2025 
2026 // We need a fence here, since we must ensure that no memory operations
2027 // from later in this thread float above that read.
2028 #if KMP_COMPILER_ICC
2029  _mm_mfence();
2030 #else
2031  __sync_synchronize();
2032 #endif
2033 
2034  return res;
2035 }
2036 
2037 // Functions for manipulating the badness
2038 static __inline void
2039 __kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) {
2040  // Reset the badness to zero so we eagerly try to speculate again
2041  lck->lk.adaptive.badness = 0;
2042  KMP_INC_STAT(lck, successfulSpeculations);
2043 }
2044 
2045 // Create a bit mask with one more set bit.
2046 static __inline void __kmp_step_badness(kmp_adaptive_lock_t *lck) {
2047  kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
2048  if (newBadness > lck->lk.adaptive.max_badness) {
2049  return;
2050  } else {
2051  lck->lk.adaptive.badness = newBadness;
2052  }
2053 }
2054 
2055 // Check whether speculation should be attempted.
2056 static __inline int __kmp_should_speculate(kmp_adaptive_lock_t *lck,
2057  kmp_int32 gtid) {
2058  kmp_uint32 badness = lck->lk.adaptive.badness;
2059  kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
2060  int res = (attempts & badness) == 0;
2061  return res;
2062 }
2063 
2064 // Attempt to acquire only the speculative lock.
2065 // Does not back off to the non-speculative lock.
2066 static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck,
2067  kmp_int32 gtid) {
2068  int retries = lck->lk.adaptive.max_soft_retries;
2069 
2070  // We don't explicitly count the start of speculation, rather we record the
2071  // results (success, hard fail, soft fail). The sum of all of those is the
2072  // total number of times we started speculation since all speculations must
2073  // end one of those ways.
2074  do {
2075  kmp_uint32 status = _xbegin();
2076  // Switch this in to disable actual speculation but exercise at least some
2077  // of the rest of the code. Useful for debugging...
2078  // kmp_uint32 status = _XABORT_NESTED;
2079 
2080  if (status == _XBEGIN_STARTED) {
2081  /* We have successfully started speculation. Check that no-one acquired
2082  the lock for real between when we last looked and now. This also gets
2083  the lock cache line into our read-set, which we need so that we'll
2084  abort if anyone later claims it for real. */
2085  if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2086  // Lock is now visibly acquired, so someone beat us to it. Abort the
2087  // transaction so we'll restart from _xbegin with the failure status.
2088  _xabort(0x01);
2089  KMP_ASSERT2(0, "should not get here");
2090  }
2091  return 1; // Lock has been acquired (speculatively)
2092  } else {
2093  // We have aborted, update the statistics
2094  if (status & SOFT_ABORT_MASK) {
2095  KMP_INC_STAT(lck, softFailedSpeculations);
2096  // and loop round to retry.
2097  } else {
2098  KMP_INC_STAT(lck, hardFailedSpeculations);
2099  // Give up if we had a hard failure.
2100  break;
2101  }
2102  }
2103  } while (retries--); // Loop while we have retries, and didn't fail hard.
2104 
2105  // Either we had a hard failure or we didn't succeed softly after
2106  // the full set of attempts, so back off the badness.
2107  __kmp_step_badness(lck);
2108  return 0;
2109 }
2110 
2111 // Attempt to acquire the speculative lock, or back off to the non-speculative
2112 // one if the speculative lock cannot be acquired.
2113 // We can succeed speculatively, non-speculatively, or fail.
2114 static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) {
2115  // First try to acquire the lock speculatively
2116  if (__kmp_should_speculate(lck, gtid) &&
2117  __kmp_test_adaptive_lock_only(lck, gtid))
2118  return 1;
2119 
2120  // Speculative acquisition failed, so try to acquire it non-speculatively.
2121  // Count the non-speculative acquire attempt
2122  lck->lk.adaptive.acquire_attempts++;
2123 
2124  // Use base, non-speculative lock.
2125  if (__kmp_test_queuing_lock(GET_QLK_PTR(lck), gtid)) {
2126  KMP_INC_STAT(lck, nonSpeculativeAcquires);
2127  return 1; // Lock is acquired (non-speculatively)
2128  } else {
2129  return 0; // Failed to acquire the lock, it's already visibly locked.
2130  }
2131 }
2132 
2133 static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2134  kmp_int32 gtid) {
2135  char const *const func = "omp_test_lock";
2136  if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2137  KMP_FATAL(LockIsUninitialized, func);
2138  }
2139 
2140  int retval = __kmp_test_adaptive_lock(lck, gtid);
2141 
2142  if (retval) {
2143  lck->lk.qlk.owner_id = gtid + 1;
2144  }
2145  return retval;
2146 }
2147 
2148 // Block until we can acquire a speculative, adaptive lock. We check whether we
2149 // should be trying to speculate. If we should be, we check the real lock to see
2150 // if it is free, and, if not, pause without attempting to acquire it until it
2151 // is. Then we try the speculative acquire. This means that although we suffer
2152 // from lemmings a little (because all we can't acquire the lock speculatively
2153 // until the queue of threads waiting has cleared), we don't get into a state
2154 // where we can never acquire the lock speculatively (because we force the queue
2155 // to clear by preventing new arrivals from entering the queue). This does mean
2156 // that when we're trying to break lemmings, the lock is no longer fair. However
2157 // OpenMP makes no guarantee that its locks are fair, so this isn't a real
2158 // problem.
2159 static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
2160  kmp_int32 gtid) {
2161  if (__kmp_should_speculate(lck, gtid)) {
2162  if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2163  if (__kmp_test_adaptive_lock_only(lck, gtid))
2164  return;
2165  // We tried speculation and failed, so give up.
2166  } else {
2167  // We can't try speculation until the lock is free, so we pause here
2168  // (without suspending on the queueing lock, to allow it to drain, then
2169  // try again. All other threads will also see the same result for
2170  // shouldSpeculate, so will be doing the same if they try to claim the
2171  // lock from now on.
2172  while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2173  KMP_INC_STAT(lck, lemmingYields);
2174  __kmp_yield(TRUE);
2175  }
2176 
2177  if (__kmp_test_adaptive_lock_only(lck, gtid))
2178  return;
2179  }
2180  }
2181 
2182  // Speculative acquisition failed, so acquire it non-speculatively.
2183  // Count the non-speculative acquire attempt
2184  lck->lk.adaptive.acquire_attempts++;
2185 
2186  __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid);
2187  // We have acquired the base lock, so count that.
2188  KMP_INC_STAT(lck, nonSpeculativeAcquires);
2189  ANNOTATE_QUEUING_ACQUIRED(lck);
2190 }
2191 
2192 static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2193  kmp_int32 gtid) {
2194  char const *const func = "omp_set_lock";
2195  if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2196  KMP_FATAL(LockIsUninitialized, func);
2197  }
2198  if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == gtid) {
2199  KMP_FATAL(LockIsAlreadyOwned, func);
2200  }
2201 
2202  __kmp_acquire_adaptive_lock(lck, gtid);
2203 
2204  lck->lk.qlk.owner_id = gtid + 1;
2205 }
2206 
2207 static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck,
2208  kmp_int32 gtid) {
2209  if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
2210  lck))) { // If the lock doesn't look claimed we must be speculating.
2211  // (Or the user's code is buggy and they're releasing without locking;
2212  // if we had XTEST we'd be able to check that case...)
2213  _xend(); // Exit speculation
2214  __kmp_update_badness_after_success(lck);
2215  } else { // Since the lock *is* visibly locked we're not speculating,
2216  // so should use the underlying lock's release scheme.
2217  __kmp_release_queuing_lock(GET_QLK_PTR(lck), gtid);
2218  }
2219  return KMP_LOCK_RELEASED;
2220 }
2221 
2222 static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2223  kmp_int32 gtid) {
2224  char const *const func = "omp_unset_lock";
2225  KMP_MB(); /* in case another processor initialized lock */
2226  if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2227  KMP_FATAL(LockIsUninitialized, func);
2228  }
2229  if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) {
2230  KMP_FATAL(LockUnsettingFree, func);
2231  }
2232  if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != gtid) {
2233  KMP_FATAL(LockUnsettingSetByAnother, func);
2234  }
2235  lck->lk.qlk.owner_id = 0;
2236  __kmp_release_adaptive_lock(lck, gtid);
2237  return KMP_LOCK_RELEASED;
2238 }
2239 
2240 static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) {
2241  __kmp_init_queuing_lock(GET_QLK_PTR(lck));
2242  lck->lk.adaptive.badness = 0;
2243  lck->lk.adaptive.acquire_attempts = 0; // nonSpeculativeAcquireAttempts = 0;
2244  lck->lk.adaptive.max_soft_retries =
2245  __kmp_adaptive_backoff_params.max_soft_retries;
2246  lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2247 #if KMP_DEBUG_ADAPTIVE_LOCKS
2248  __kmp_zero_speculative_stats(&lck->lk.adaptive);
2249 #endif
2250  KA_TRACE(1000, ("__kmp_init_adaptive_lock: lock %p initialized\n", lck));
2251 }
2252 
2253 static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
2254  __kmp_init_adaptive_lock(lck);
2255 }
2256 
2257 static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) {
2258 #if KMP_DEBUG_ADAPTIVE_LOCKS
2259  __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
2260 #endif
2261  __kmp_destroy_queuing_lock(GET_QLK_PTR(lck));
2262  // Nothing needed for the speculative part.
2263 }
2264 
2265 static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
2266  char const *const func = "omp_destroy_lock";
2267  if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2268  KMP_FATAL(LockIsUninitialized, func);
2269  }
2270  if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) {
2271  KMP_FATAL(LockStillOwned, func);
2272  }
2273  __kmp_destroy_adaptive_lock(lck);
2274 }
2275 
2276 #endif // KMP_USE_ADAPTIVE_LOCKS
2277 
2278 /* ------------------------------------------------------------------------ */
2279 /* DRDPA ticket locks */
2280 /* "DRDPA" means Dynamically Reconfigurable Distributed Polling Area */
2281 
2282 static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) {
2283  return TCR_4(lck->lk.owner_id) - 1;
2284 }
2285 
2286 static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) {
2287  return lck->lk.depth_locked != -1;
2288 }
2289 
2290 __forceinline static int
2291 __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2292  kmp_uint64 ticket =
2293  KMP_TEST_THEN_INC64(RCAST(volatile kmp_int64 *, &lck->lk.next_ticket));
2294  kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
2295  volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls = lck->lk.polls;
2296 
2297 #ifdef USE_LOCK_PROFILE
2298  if (TCR_8(polls[ticket & mask].poll) != ticket)
2299  __kmp_printf("LOCK CONTENTION: %p\n", lck);
2300 /* else __kmp_printf( "." );*/
2301 #endif /* USE_LOCK_PROFILE */
2302 
2303  // Now spin-wait, but reload the polls pointer and mask, in case the
2304  // polling area has been reconfigured. Unless it is reconfigured, the
2305  // reloads stay in L1 cache and are cheap.
2306  //
2307  // Keep this code in sync with KMP_WAIT_YIELD, in kmp_dispatch.cpp !!!
2308  //
2309  // The current implementation of KMP_WAIT_YIELD doesn't allow for mask
2310  // and poll to be re-read every spin iteration.
2311  kmp_uint32 spins;
2312 
2313  KMP_FSYNC_PREPARE(lck);
2314  KMP_INIT_YIELD(spins);
2315  while (TCR_8(polls[ticket & mask].poll) < ticket) { // volatile load
2316  // If we are oversubscribed,
2317  // or have waited a bit (and KMP_LIBRARY=turnaround), then yield.
2318  // CPU Pause is in the macros for yield.
2319  //
2320  KMP_YIELD(TCR_4(__kmp_nth) >
2321  (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
2322  KMP_YIELD_SPIN(spins);
2323 
2324  // Re-read the mask and the poll pointer from the lock structure.
2325  //
2326  // Make certain that "mask" is read before "polls" !!!
2327  //
2328  // If another thread picks reconfigures the polling area and updates their
2329  // values, and we get the new value of mask and the old polls pointer, we
2330  // could access memory beyond the end of the old polling area.
2331  mask = TCR_8(lck->lk.mask); // volatile load
2332  polls = lck->lk.polls; // volatile load
2333  }
2334 
2335  // Critical section starts here
2336  KMP_FSYNC_ACQUIRED(lck);
2337  KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
2338  ticket, lck));
2339  lck->lk.now_serving = ticket; // non-volatile store
2340 
2341  // Deallocate a garbage polling area if we know that we are the last
2342  // thread that could possibly access it.
2343  //
2344  // The >= check is in case __kmp_test_drdpa_lock() allocated the cleanup
2345  // ticket.
2346  if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
2347  __kmp_free(CCAST(kmp_base_drdpa_lock::kmp_lock_poll *, lck->lk.old_polls));
2348  lck->lk.old_polls = NULL;
2349  lck->lk.cleanup_ticket = 0;
2350  }
2351 
2352  // Check to see if we should reconfigure the polling area.
2353  // If there is still a garbage polling area to be deallocated from a
2354  // previous reconfiguration, let a later thread reconfigure it.
2355  if (lck->lk.old_polls == NULL) {
2356  bool reconfigure = false;
2357  volatile struct kmp_base_drdpa_lock::kmp_lock_poll *old_polls = polls;
2358  kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
2359 
2360  if (TCR_4(__kmp_nth) >
2361  (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
2362  // We are in oversubscription mode. Contract the polling area
2363  // down to a single location, if that hasn't been done already.
2364  if (num_polls > 1) {
2365  reconfigure = true;
2366  num_polls = TCR_4(lck->lk.num_polls);
2367  mask = 0;
2368  num_polls = 1;
2369  polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
2370  __kmp_allocate(num_polls * sizeof(*polls));
2371  polls[0].poll = ticket;
2372  }
2373  } else {
2374  // We are in under/fully subscribed mode. Check the number of
2375  // threads waiting on the lock. The size of the polling area
2376  // should be at least the number of threads waiting.
2377  kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
2378  if (num_waiting > num_polls) {
2379  kmp_uint32 old_num_polls = num_polls;
2380  reconfigure = true;
2381  do {
2382  mask = (mask << 1) | 1;
2383  num_polls *= 2;
2384  } while (num_polls <= num_waiting);
2385 
2386  // Allocate the new polling area, and copy the relevant portion
2387  // of the old polling area to the new area. __kmp_allocate()
2388  // zeroes the memory it allocates, and most of the old area is
2389  // just zero padding, so we only copy the release counters.
2390  polls = (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)
2391  __kmp_allocate(num_polls * sizeof(*polls));
2392  kmp_uint32 i;
2393  for (i = 0; i < old_num_polls; i++) {
2394  polls[i].poll = old_polls[i].poll;
2395  }
2396  }
2397  }
2398 
2399  if (reconfigure) {
2400  // Now write the updated fields back to the lock structure.
2401  //
2402  // Make certain that "polls" is written before "mask" !!!
2403  //
2404  // If another thread picks up the new value of mask and the old polls
2405  // pointer , it could access memory beyond the end of the old polling
2406  // area.
2407  //
2408  // On x86, we need memory fences.
2409  KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring "
2410  "lock %p to %d polls\n",
2411  ticket, lck, num_polls));
2412 
2413  lck->lk.old_polls = old_polls; // non-volatile store
2414  lck->lk.polls = polls; // volatile store
2415 
2416  KMP_MB();
2417 
2418  lck->lk.num_polls = num_polls; // non-volatile store
2419  lck->lk.mask = mask; // volatile store
2420 
2421  KMP_MB();
2422 
2423  // Only after the new polling area and mask have been flushed
2424  // to main memory can we update the cleanup ticket field.
2425  //
2426  // volatile load / non-volatile store
2427  lck->lk.cleanup_ticket = TCR_8(lck->lk.next_ticket);
2428  }
2429  }
2430  return KMP_LOCK_ACQUIRED_FIRST;
2431 }
2432 
2433 int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2434  int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2435  ANNOTATE_DRDPA_ACQUIRED(lck);
2436  return retval;
2437 }
2438 
2439 static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2440  kmp_int32 gtid) {
2441  char const *const func = "omp_set_lock";
2442  if (lck->lk.initialized != lck) {
2443  KMP_FATAL(LockIsUninitialized, func);
2444  }
2445  if (__kmp_is_drdpa_lock_nestable(lck)) {
2446  KMP_FATAL(LockNestableUsedAsSimple, func);
2447  }
2448  if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) == gtid)) {
2449  KMP_FATAL(LockIsAlreadyOwned, func);
2450  }
2451 
2452  __kmp_acquire_drdpa_lock(lck, gtid);
2453 
2454  lck->lk.owner_id = gtid + 1;
2455  return KMP_LOCK_ACQUIRED_FIRST;
2456 }
2457 
2458 int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2459  // First get a ticket, then read the polls pointer and the mask.
2460  // The polls pointer must be read before the mask!!! (See above)
2461  kmp_uint64 ticket = TCR_8(lck->lk.next_ticket); // volatile load
2462  volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls = lck->lk.polls;
2463  kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
2464  if (TCR_8(polls[ticket & mask].poll) == ticket) {
2465  kmp_uint64 next_ticket = ticket + 1;
2466  if (KMP_COMPARE_AND_STORE_ACQ64(&lck->lk.next_ticket, ticket,
2467  next_ticket)) {
2468  KMP_FSYNC_ACQUIRED(lck);
2469  KA_TRACE(1000, ("__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
2470  ticket, lck));
2471  lck->lk.now_serving = ticket; // non-volatile store
2472 
2473  // Since no threads are waiting, there is no possibility that we would
2474  // want to reconfigure the polling area. We might have the cleanup ticket
2475  // value (which says that it is now safe to deallocate old_polls), but
2476  // we'll let a later thread which calls __kmp_acquire_lock do that - this
2477  // routine isn't supposed to block, and we would risk blocks if we called
2478  // __kmp_free() to do the deallocation.
2479  return TRUE;
2480  }
2481  }
2482  return FALSE;
2483 }
2484 
2485 static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2486  kmp_int32 gtid) {
2487  char const *const func = "omp_test_lock";
2488  if (lck->lk.initialized != lck) {
2489  KMP_FATAL(LockIsUninitialized, func);
2490  }
2491  if (__kmp_is_drdpa_lock_nestable(lck)) {
2492  KMP_FATAL(LockNestableUsedAsSimple, func);
2493  }
2494 
2495  int retval = __kmp_test_drdpa_lock(lck, gtid);
2496 
2497  if (retval) {
2498  lck->lk.owner_id = gtid + 1;
2499  }
2500  return retval;
2501 }
2502 
2503 int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2504  // Read the ticket value from the lock data struct, then the polls pointer and
2505  // the mask. The polls pointer must be read before the mask!!! (See above)
2506  kmp_uint64 ticket = lck->lk.now_serving + 1; // non-volatile load
2507  volatile struct kmp_base_drdpa_lock::kmp_lock_poll *polls = lck->lk.polls;
2508  kmp_uint64 mask = TCR_8(lck->lk.mask); // volatile load
2509  KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
2510  ticket - 1, lck));
2511  KMP_FSYNC_RELEASING(lck);
2512  ANNOTATE_DRDPA_RELEASED(lck);
2513  KMP_ST_REL64(&(polls[ticket & mask].poll), ticket); // volatile store
2514  return KMP_LOCK_RELEASED;
2515 }
2516 
2517 static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2518  kmp_int32 gtid) {
2519  char const *const func = "omp_unset_lock";
2520  KMP_MB(); /* in case another processor initialized lock */
2521  if (lck->lk.initialized != lck) {
2522  KMP_FATAL(LockIsUninitialized, func);
2523  }
2524  if (__kmp_is_drdpa_lock_nestable(lck)) {
2525  KMP_FATAL(LockNestableUsedAsSimple, func);
2526  }
2527  if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2528  KMP_FATAL(LockUnsettingFree, func);
2529  }
2530  if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) >= 0) &&
2531  (__kmp_get_drdpa_lock_owner(lck) != gtid)) {
2532  KMP_FATAL(LockUnsettingSetByAnother, func);
2533  }
2534  lck->lk.owner_id = 0;
2535  return __kmp_release_drdpa_lock(lck, gtid);
2536 }
2537 
2538 void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) {
2539  lck->lk.location = NULL;
2540  lck->lk.mask = 0;
2541  lck->lk.num_polls = 1;
2542  lck->lk.polls =
2543  (volatile struct kmp_base_drdpa_lock::kmp_lock_poll *)__kmp_allocate(
2544  lck->lk.num_polls * sizeof(*(lck->lk.polls)));
2545  lck->lk.cleanup_ticket = 0;
2546  lck->lk.old_polls = NULL;
2547  lck->lk.next_ticket = 0;
2548  lck->lk.now_serving = 0;
2549  lck->lk.owner_id = 0; // no thread owns the lock.
2550  lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
2551  lck->lk.initialized = lck;
2552 
2553  KA_TRACE(1000, ("__kmp_init_drdpa_lock: lock %p initialized\n", lck));
2554 }
2555 
2556 static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2557  __kmp_init_drdpa_lock(lck);
2558 }
2559 
2560 void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) {
2561  lck->lk.initialized = NULL;
2562  lck->lk.location = NULL;
2563  if (lck->lk.polls != NULL) {
2564  __kmp_free(CCAST(kmp_base_drdpa_lock::kmp_lock_poll *, lck->lk.polls));
2565  lck->lk.polls = NULL;
2566  }
2567  if (lck->lk.old_polls != NULL) {
2568  __kmp_free(CCAST(kmp_base_drdpa_lock::kmp_lock_poll *, lck->lk.old_polls));
2569  lck->lk.old_polls = NULL;
2570  }
2571  lck->lk.mask = 0;
2572  lck->lk.num_polls = 0;
2573  lck->lk.cleanup_ticket = 0;
2574  lck->lk.next_ticket = 0;
2575  lck->lk.now_serving = 0;
2576  lck->lk.owner_id = 0;
2577  lck->lk.depth_locked = -1;
2578 }
2579 
2580 static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2581  char const *const func = "omp_destroy_lock";
2582  if (lck->lk.initialized != lck) {
2583  KMP_FATAL(LockIsUninitialized, func);
2584  }
2585  if (__kmp_is_drdpa_lock_nestable(lck)) {
2586  KMP_FATAL(LockNestableUsedAsSimple, func);
2587  }
2588  if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2589  KMP_FATAL(LockStillOwned, func);
2590  }
2591  __kmp_destroy_drdpa_lock(lck);
2592 }
2593 
2594 // nested drdpa ticket locks
2595 
2596 int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2597  KMP_DEBUG_ASSERT(gtid >= 0);
2598 
2599  if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2600  lck->lk.depth_locked += 1;
2601  return KMP_LOCK_ACQUIRED_NEXT;
2602  } else {
2603  __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2604  ANNOTATE_DRDPA_ACQUIRED(lck);
2605  KMP_MB();
2606  lck->lk.depth_locked = 1;
2607  KMP_MB();
2608  lck->lk.owner_id = gtid + 1;
2609  return KMP_LOCK_ACQUIRED_FIRST;
2610  }
2611 }
2612 
2613 static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2614  kmp_int32 gtid) {
2615  char const *const func = "omp_set_nest_lock";
2616  if (lck->lk.initialized != lck) {
2617  KMP_FATAL(LockIsUninitialized, func);
2618  }
2619  if (!__kmp_is_drdpa_lock_nestable(lck)) {
2620  KMP_FATAL(LockSimpleUsedAsNestable, func);
2621  }
2622  __kmp_acquire_nested_drdpa_lock(lck, gtid);
2623 }
2624 
2625 int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2626  int retval;
2627 
2628  KMP_DEBUG_ASSERT(gtid >= 0);
2629 
2630  if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2631  retval = ++lck->lk.depth_locked;
2632  } else if (!__kmp_test_drdpa_lock(lck, gtid)) {
2633  retval = 0;
2634  } else {
2635  KMP_MB();
2636  retval = lck->lk.depth_locked = 1;
2637  KMP_MB();
2638  lck->lk.owner_id = gtid + 1;
2639  }
2640  return retval;
2641 }
2642 
2643 static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2644  kmp_int32 gtid) {
2645  char const *const func = "omp_test_nest_lock";
2646  if (lck->lk.initialized != lck) {
2647  KMP_FATAL(LockIsUninitialized, func);
2648  }
2649  if (!__kmp_is_drdpa_lock_nestable(lck)) {
2650  KMP_FATAL(LockSimpleUsedAsNestable, func);
2651  }
2652  return __kmp_test_nested_drdpa_lock(lck, gtid);
2653 }
2654 
2655 int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2656  KMP_DEBUG_ASSERT(gtid >= 0);
2657 
2658  KMP_MB();
2659  if (--(lck->lk.depth_locked) == 0) {
2660  KMP_MB();
2661  lck->lk.owner_id = 0;
2662  __kmp_release_drdpa_lock(lck, gtid);
2663  return KMP_LOCK_RELEASED;
2664  }
2665  return KMP_LOCK_STILL_HELD;
2666 }
2667 
2668 static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2669  kmp_int32 gtid) {
2670  char const *const func = "omp_unset_nest_lock";
2671  KMP_MB(); /* in case another processor initialized lock */
2672  if (lck->lk.initialized != lck) {
2673  KMP_FATAL(LockIsUninitialized, func);
2674  }
2675  if (!__kmp_is_drdpa_lock_nestable(lck)) {
2676  KMP_FATAL(LockSimpleUsedAsNestable, func);
2677  }
2678  if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2679  KMP_FATAL(LockUnsettingFree, func);
2680  }
2681  if (__kmp_get_drdpa_lock_owner(lck) != gtid) {
2682  KMP_FATAL(LockUnsettingSetByAnother, func);
2683  }
2684  return __kmp_release_nested_drdpa_lock(lck, gtid);
2685 }
2686 
2687 void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2688  __kmp_init_drdpa_lock(lck);
2689  lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
2690 }
2691 
2692 static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2693  __kmp_init_nested_drdpa_lock(lck);
2694 }
2695 
2696 void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2697  __kmp_destroy_drdpa_lock(lck);
2698  lck->lk.depth_locked = 0;
2699 }
2700 
2701 static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2702  char const *const func = "omp_destroy_nest_lock";
2703  if (lck->lk.initialized != lck) {
2704  KMP_FATAL(LockIsUninitialized, func);
2705  }
2706  if (!__kmp_is_drdpa_lock_nestable(lck)) {
2707  KMP_FATAL(LockSimpleUsedAsNestable, func);
2708  }
2709  if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2710  KMP_FATAL(LockStillOwned, func);
2711  }
2712  __kmp_destroy_nested_drdpa_lock(lck);
2713 }
2714 
2715 // access functions to fields which don't exist for all lock kinds.
2716 
2717 static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) {
2718  return lck == lck->lk.initialized;
2719 }
2720 
2721 static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) {
2722  return lck->lk.location;
2723 }
2724 
2725 static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck,
2726  const ident_t *loc) {
2727  lck->lk.location = loc;
2728 }
2729 
2730 static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) {
2731  return lck->lk.flags;
2732 }
2733 
2734 static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck,
2735  kmp_lock_flags_t flags) {
2736  lck->lk.flags = flags;
2737 }
2738 
2739 // Time stamp counter
2740 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2741 #define __kmp_tsc() __kmp_hardware_timestamp()
2742 // Runtime's default backoff parameters
2743 kmp_backoff_t __kmp_spin_backoff_params = {1, 4096, 100};
2744 #else
2745 // Use nanoseconds for other platforms
2746 extern kmp_uint64 __kmp_now_nsec();
2747 kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100};
2748 #define __kmp_tsc() __kmp_now_nsec()
2749 #endif
2750 
2751 // A useful predicate for dealing with timestamps that may wrap.
2752 // Is a before b? Since the timestamps may wrap, this is asking whether it's
2753 // shorter to go clockwise from a to b around the clock-face, or anti-clockwise.
2754 // Times where going clockwise is less distance than going anti-clockwise
2755 // are in the future, others are in the past. e.g. a = MAX-1, b = MAX+1 (=0),
2756 // then a > b (true) does not mean a reached b; whereas signed(a) = -2,
2757 // signed(b) = 0 captures the actual difference
2758 static inline bool before(kmp_uint64 a, kmp_uint64 b) {
2759  return ((kmp_int64)b - (kmp_int64)a) > 0;
2760 }
2761 
2762 // Truncated binary exponential backoff function
2763 void __kmp_spin_backoff(kmp_backoff_t *boff) {
2764  // We could flatten this loop, but making it a nested loop gives better result
2765  kmp_uint32 i;
2766  for (i = boff->step; i > 0; i--) {
2767  kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
2768  do {
2769  KMP_CPU_PAUSE();
2770  } while (before(__kmp_tsc(), goal));
2771  }
2772  boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1);
2773 }
2774 
2775 #if KMP_USE_DYNAMIC_LOCK
2776 
2777 // Direct lock initializers. It simply writes a tag to the low 8 bits of the
2778 // lock word.
2779 static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
2780  kmp_dyna_lockseq_t seq) {
2781  TCW_4(*lck, KMP_GET_D_TAG(seq));
2782  KA_TRACE(
2783  20,
2784  ("__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
2785 }
2786 
2787 #if KMP_USE_TSX
2788 
2789 // HLE lock functions - imported from the testbed runtime.
2790 #define HLE_ACQUIRE ".byte 0xf2;"
2791 #define HLE_RELEASE ".byte 0xf3;"
2792 
2793 static inline kmp_uint32 swap4(kmp_uint32 volatile *p, kmp_uint32 v) {
2794  __asm__ volatile(HLE_ACQUIRE "xchg %1,%0" : "+r"(v), "+m"(*p) : : "memory");
2795  return v;
2796 }
2797 
2798 static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); }
2799 
2800 static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2801  // Use gtid for KMP_LOCK_BUSY if necessary
2802  if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
2803  int delay = 1;
2804  do {
2805  while (*(kmp_uint32 volatile *)lck != KMP_LOCK_FREE(hle)) {
2806  for (int i = delay; i != 0; --i)
2807  KMP_CPU_PAUSE();
2808  delay = ((delay << 1) | 1) & 7;
2809  }
2810  } while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
2811  }
2812 }
2813 
2814 static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2815  kmp_int32 gtid) {
2816  __kmp_acquire_hle_lock(lck, gtid); // TODO: add checks
2817 }
2818 
2819 static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2820  __asm__ volatile(HLE_RELEASE "movl %1,%0"
2821  : "=m"(*lck)
2822  : "r"(KMP_LOCK_FREE(hle))
2823  : "memory");
2824  return KMP_LOCK_RELEASED;
2825 }
2826 
2827 static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2828  kmp_int32 gtid) {
2829  return __kmp_release_hle_lock(lck, gtid); // TODO: add checks
2830 }
2831 
2832 static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2833  return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
2834 }
2835 
2836 static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2837  kmp_int32 gtid) {
2838  return __kmp_test_hle_lock(lck, gtid); // TODO: add checks
2839 }
2840 
2841 static void __kmp_init_rtm_lock(kmp_queuing_lock_t *lck) {
2842  __kmp_init_queuing_lock(lck);
2843 }
2844 
2845 static void __kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck) {
2846  __kmp_destroy_queuing_lock(lck);
2847 }
2848 
2849 static void __kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2850  unsigned retries = 3, status;
2851  do {
2852  status = _xbegin();
2853  if (status == _XBEGIN_STARTED) {
2854  if (__kmp_is_unlocked_queuing_lock(lck))
2855  return;
2856  _xabort(0xff);
2857  }
2858  if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
2859  // Wait until lock becomes free
2860  while (!__kmp_is_unlocked_queuing_lock(lck))
2861  __kmp_yield(TRUE);
2862  } else if (!(status & _XABORT_RETRY))
2863  break;
2864  } while (retries--);
2865 
2866  // Fall-back non-speculative lock (xchg)
2867  __kmp_acquire_queuing_lock(lck, gtid);
2868 }
2869 
2870 static void __kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2871  kmp_int32 gtid) {
2872  __kmp_acquire_rtm_lock(lck, gtid);
2873 }
2874 
2875 static int __kmp_release_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2876  if (__kmp_is_unlocked_queuing_lock(lck)) {
2877  // Releasing from speculation
2878  _xend();
2879  } else {
2880  // Releasing from a real lock
2881  __kmp_release_queuing_lock(lck, gtid);
2882  }
2883  return KMP_LOCK_RELEASED;
2884 }
2885 
2886 static int __kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2887  kmp_int32 gtid) {
2888  return __kmp_release_rtm_lock(lck, gtid);
2889 }
2890 
2891 static int __kmp_test_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2892  unsigned retries = 3, status;
2893  do {
2894  status = _xbegin();
2895  if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
2896  return 1;
2897  }
2898  if (!(status & _XABORT_RETRY))
2899  break;
2900  } while (retries--);
2901 
2902  return (__kmp_is_unlocked_queuing_lock(lck)) ? 1 : 0;
2903 }
2904 
2905 static int __kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2906  kmp_int32 gtid) {
2907  return __kmp_test_rtm_lock(lck, gtid);
2908 }
2909 
2910 #endif // KMP_USE_TSX
2911 
2912 // Entry functions for indirect locks (first element of direct lock jump tables)
2913 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
2914  kmp_dyna_lockseq_t tag);
2915 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
2916 static void __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2917 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2918 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2919 static void __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2920  kmp_int32);
2921 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2922  kmp_int32);
2923 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2924  kmp_int32);
2925 
2926 // Jump tables for the indirect lock functions
2927 // Only fill in the odd entries, that avoids the need to shift out the low bit
2928 
2929 // init functions
2930 #define expand(l, op) 0, __kmp_init_direct_lock,
2931 void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = {
2932  __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)};
2933 #undef expand
2934 
2935 // destroy functions
2936 #define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock,
2937 void (*__kmp_direct_destroy[])(kmp_dyna_lock_t *) = {
2938  __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2939 #undef expand
2940 
2941 // set/acquire functions
2942 #define expand(l, op) \
2943  0, (void (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
2944 static void (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
2945  __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
2946 #undef expand
2947 #define expand(l, op) \
2948  0, (void (*)(kmp_dyna_lock_t *, \
2949  kmp_int32))__kmp_##op##_##l##_lock_with_checks,
2950 static void (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2951  __kmp_set_indirect_lock_with_checks, 0,
2952  KMP_FOREACH_D_LOCK(expand, acquire)};
2953 #undef expand
2954 
2955 // unset/release and test functions
2956 #define expand(l, op) \
2957  0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
2958 static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = {
2959  __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release)};
2960 static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = {
2961  __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test)};
2962 #undef expand
2963 #define expand(l, op) \
2964  0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
2965 static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2966  __kmp_unset_indirect_lock_with_checks, 0,
2967  KMP_FOREACH_D_LOCK(expand, release)};
2968 static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2969  __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test)};
2970 #undef expand
2971 
2972 // Exposes only one set of jump tables (*lock or *lock_with_checks).
2973 void (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32) = 0;
2974 int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32) = 0;
2975 int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32) = 0;
2976 
2977 // Jump tables for the indirect lock functions
2978 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
2979 void (*__kmp_indirect_init[])(kmp_user_lock_p) = {
2980  KMP_FOREACH_I_LOCK(expand, init)};
2981 void (*__kmp_indirect_destroy[])(kmp_user_lock_p) = {
2982  KMP_FOREACH_I_LOCK(expand, destroy)};
2983 #undef expand
2984 
2985 // set/acquire functions
2986 #define expand(l, op) \
2987  (void (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
2988 static void (*indirect_set[])(kmp_user_lock_p, kmp_int32) = {
2989  KMP_FOREACH_I_LOCK(expand, acquire)};
2990 #undef expand
2991 #define expand(l, op) \
2992  (void (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
2993 static void (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
2994  KMP_FOREACH_I_LOCK(expand, acquire)};
2995 #undef expand
2996 
2997 // unset/release and test functions
2998 #define expand(l, op) \
2999  (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
3000 static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = {
3001  KMP_FOREACH_I_LOCK(expand, release)};
3002 static int (*indirect_test[])(kmp_user_lock_p,
3003  kmp_int32) = {KMP_FOREACH_I_LOCK(expand, test)};
3004 #undef expand
3005 #define expand(l, op) \
3006  (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
3007 static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = {
3008  KMP_FOREACH_I_LOCK(expand, release)};
3009 static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = {
3010  KMP_FOREACH_I_LOCK(expand, test)};
3011 #undef expand
3012 
3013 // Exposes only one jump tables (*lock or *lock_with_checks).
3014 void (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32) = 0;
3015 int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32) = 0;
3016 int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32) = 0;
3017 
3018 // Lock index table.
3019 kmp_indirect_lock_table_t __kmp_i_lock_table;
3020 
3021 // Size of indirect locks.
3022 static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0};
3023 
3024 // Jump tables for lock accessor/modifier.
3025 void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3026  const ident_t *) = {0};
3027 void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3028  kmp_lock_flags_t) = {0};
3029 const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
3030  kmp_user_lock_p) = {0};
3031 kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
3032  kmp_user_lock_p) = {0};
3033 
3034 // Use different lock pools for different lock types.
3035 static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
3036 
3037 // User lock allocator for dynamically dispatched indirect locks. Every entry of
3038 // the indirect lock table holds the address and type of the allocated indrect
3039 // lock (kmp_indirect_lock_t), and the size of the table doubles when it is
3040 // full. A destroyed indirect lock object is returned to the reusable pool of
3041 // locks, unique to each lock type.
3042 kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock,
3043  kmp_int32 gtid,
3044  kmp_indirect_locktag_t tag) {
3045  kmp_indirect_lock_t *lck;
3046  kmp_lock_index_t idx;
3047 
3048  __kmp_acquire_lock(&__kmp_global_lock, gtid);
3049 
3050  if (__kmp_indirect_lock_pool[tag] != NULL) {
3051  // Reuse the allocated and destroyed lock object
3052  lck = __kmp_indirect_lock_pool[tag];
3053  if (OMP_LOCK_T_SIZE < sizeof(void *))
3054  idx = lck->lock->pool.index;
3055  __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
3056  KA_TRACE(20, ("__kmp_allocate_indirect_lock: reusing an existing lock %p\n",
3057  lck));
3058  } else {
3059  idx = __kmp_i_lock_table.next;
3060  // Check capacity and double the size if it is full
3061  if (idx == __kmp_i_lock_table.size) {
3062  // Double up the space for block pointers
3063  int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
3064  kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate(
3065  2 * row * sizeof(kmp_indirect_lock_t *));
3066  KMP_MEMCPY(new_table, __kmp_i_lock_table.table,
3067  row * sizeof(kmp_indirect_lock_t *));
3068  kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
3069  __kmp_i_lock_table.table = new_table;
3070  __kmp_free(old_table);
3071  // Allocate new objects in the new blocks
3072  for (int i = row; i < 2 * row; ++i)
3073  *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate(
3074  KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t));
3075  __kmp_i_lock_table.size = 2 * idx;
3076  }
3077  __kmp_i_lock_table.next++;
3078  lck = KMP_GET_I_LOCK(idx);
3079  // Allocate a new base lock object
3080  lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
3081  KA_TRACE(20,
3082  ("__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
3083  }
3084 
3085  __kmp_release_lock(&__kmp_global_lock, gtid);
3086 
3087  lck->type = tag;
3088 
3089  if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3090  *((kmp_lock_index_t *)user_lock) = idx
3091  << 1; // indirect lock word must be even
3092  } else {
3093  *((kmp_indirect_lock_t **)user_lock) = lck;
3094  }
3095 
3096  return lck;
3097 }
3098 
3099 // User lock lookup for dynamically dispatched locks.
3100 static __forceinline kmp_indirect_lock_t *
3101 __kmp_lookup_indirect_lock(void **user_lock, const char *func) {
3102  if (__kmp_env_consistency_check) {
3103  kmp_indirect_lock_t *lck = NULL;
3104  if (user_lock == NULL) {
3105  KMP_FATAL(LockIsUninitialized, func);
3106  }
3107  if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3108  kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
3109  if (idx >= __kmp_i_lock_table.size) {
3110  KMP_FATAL(LockIsUninitialized, func);
3111  }
3112  lck = KMP_GET_I_LOCK(idx);
3113  } else {
3114  lck = *((kmp_indirect_lock_t **)user_lock);
3115  }
3116  if (lck == NULL) {
3117  KMP_FATAL(LockIsUninitialized, func);
3118  }
3119  return lck;
3120  } else {
3121  if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3122  return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
3123  } else {
3124  return *((kmp_indirect_lock_t **)user_lock);
3125  }
3126  }
3127 }
3128 
3129 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock,
3130  kmp_dyna_lockseq_t seq) {
3131 #if KMP_USE_ADAPTIVE_LOCKS
3132  if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) {
3133  KMP_WARNING(AdaptiveNotSupported, "kmp_lockseq_t", "adaptive");
3134  seq = lockseq_queuing;
3135  }
3136 #endif
3137 #if KMP_USE_TSX
3138  if (seq == lockseq_rtm && !__kmp_cpuinfo.rtm) {
3139  seq = lockseq_queuing;
3140  }
3141 #endif
3142  kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
3143  kmp_indirect_lock_t *l =
3144  __kmp_allocate_indirect_lock((void **)lock, __kmp_entry_gtid(), tag);
3145  KMP_I_LOCK_FUNC(l, init)(l->lock);
3146  KA_TRACE(
3147  20, ("__kmp_init_indirect_lock: initialized indirect lock with type#%d\n",
3148  seq));
3149 }
3150 
3151 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) {
3152  kmp_uint32 gtid = __kmp_entry_gtid();
3153  kmp_indirect_lock_t *l =
3154  __kmp_lookup_indirect_lock((void **)lock, "omp_destroy_lock");
3155  KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3156  kmp_indirect_locktag_t tag = l->type;
3157 
3158  __kmp_acquire_lock(&__kmp_global_lock, gtid);
3159 
3160  // Use the base lock's space to keep the pool chain.
3161  l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
3162  if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3163  l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
3164  }
3165  __kmp_indirect_lock_pool[tag] = l;
3166 
3167  __kmp_release_lock(&__kmp_global_lock, gtid);
3168 }
3169 
3170 static void __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3171  kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3172  KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3173 }
3174 
3175 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3176  kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3177  return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3178 }
3179 
3180 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3181  kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3182  return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3183 }
3184 
3185 static void __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3186  kmp_int32 gtid) {
3187  kmp_indirect_lock_t *l =
3188  __kmp_lookup_indirect_lock((void **)lock, "omp_set_lock");
3189  KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3190 }
3191 
3192 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3193  kmp_int32 gtid) {
3194  kmp_indirect_lock_t *l =
3195  __kmp_lookup_indirect_lock((void **)lock, "omp_unset_lock");
3196  return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3197 }
3198 
3199 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3200  kmp_int32 gtid) {
3201  kmp_indirect_lock_t *l =
3202  __kmp_lookup_indirect_lock((void **)lock, "omp_test_lock");
3203  return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3204 }
3205 
3206 kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
3207 
3208 // This is used only in kmp_error.cpp when consistency checking is on.
3209 kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) {
3210  switch (seq) {
3211  case lockseq_tas:
3212  case lockseq_nested_tas:
3213  return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
3214 #if KMP_USE_FUTEX
3215  case lockseq_futex:
3216  case lockseq_nested_futex:
3217  return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
3218 #endif
3219  case lockseq_ticket:
3220  case lockseq_nested_ticket:
3221  return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck);
3222  case lockseq_queuing:
3223  case lockseq_nested_queuing:
3224 #if KMP_USE_ADAPTIVE_LOCKS
3225  case lockseq_adaptive:
3226 #endif
3227  return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck);
3228  case lockseq_drdpa:
3229  case lockseq_nested_drdpa:
3230  return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck);
3231  default:
3232  return 0;
3233  }
3234 }
3235 
3236 // Initializes data for dynamic user locks.
3237 void __kmp_init_dynamic_user_locks() {
3238  // Initialize jump table for the lock functions
3239  if (__kmp_env_consistency_check) {
3240  __kmp_direct_set = direct_set_check;
3241  __kmp_direct_unset = direct_unset_check;
3242  __kmp_direct_test = direct_test_check;
3243  __kmp_indirect_set = indirect_set_check;
3244  __kmp_indirect_unset = indirect_unset_check;
3245  __kmp_indirect_test = indirect_test_check;
3246  } else {
3247  __kmp_direct_set = direct_set;
3248  __kmp_direct_unset = direct_unset;
3249  __kmp_direct_test = direct_test;
3250  __kmp_indirect_set = indirect_set;
3251  __kmp_indirect_unset = indirect_unset;
3252  __kmp_indirect_test = indirect_test;
3253  }
3254  // If the user locks have already been initialized, then return. Allow the
3255  // switch between different KMP_CONSISTENCY_CHECK values, but do not allocate
3256  // new lock tables if they have already been allocated.
3257  if (__kmp_init_user_locks)
3258  return;
3259 
3260  // Initialize lock index table
3261  __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
3262  __kmp_i_lock_table.table =
3263  (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *));
3264  *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate(
3265  KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t));
3266  __kmp_i_lock_table.next = 0;
3267 
3268  // Indirect lock size
3269  __kmp_indirect_lock_size[locktag_ticket] = sizeof(kmp_ticket_lock_t);
3270  __kmp_indirect_lock_size[locktag_queuing] = sizeof(kmp_queuing_lock_t);
3271 #if KMP_USE_ADAPTIVE_LOCKS
3272  __kmp_indirect_lock_size[locktag_adaptive] = sizeof(kmp_adaptive_lock_t);
3273 #endif
3274  __kmp_indirect_lock_size[locktag_drdpa] = sizeof(kmp_drdpa_lock_t);
3275 #if KMP_USE_TSX
3276  __kmp_indirect_lock_size[locktag_rtm] = sizeof(kmp_queuing_lock_t);
3277 #endif
3278  __kmp_indirect_lock_size[locktag_nested_tas] = sizeof(kmp_tas_lock_t);
3279 #if KMP_USE_FUTEX
3280  __kmp_indirect_lock_size[locktag_nested_futex] = sizeof(kmp_futex_lock_t);
3281 #endif
3282  __kmp_indirect_lock_size[locktag_nested_ticket] = sizeof(kmp_ticket_lock_t);
3283  __kmp_indirect_lock_size[locktag_nested_queuing] = sizeof(kmp_queuing_lock_t);
3284  __kmp_indirect_lock_size[locktag_nested_drdpa] = sizeof(kmp_drdpa_lock_t);
3285 
3286 // Initialize lock accessor/modifier
3287 #define fill_jumps(table, expand, sep) \
3288  { \
3289  table[locktag##sep##ticket] = expand(ticket); \
3290  table[locktag##sep##queuing] = expand(queuing); \
3291  table[locktag##sep##drdpa] = expand(drdpa); \
3292  }
3293 
3294 #if KMP_USE_ADAPTIVE_LOCKS
3295 #define fill_table(table, expand) \
3296  { \
3297  fill_jumps(table, expand, _); \
3298  table[locktag_adaptive] = expand(queuing); \
3299  fill_jumps(table, expand, _nested_); \
3300  }
3301 #else
3302 #define fill_table(table, expand) \
3303  { \
3304  fill_jumps(table, expand, _); \
3305  fill_jumps(table, expand, _nested_); \
3306  }
3307 #endif // KMP_USE_ADAPTIVE_LOCKS
3308 
3309 #define expand(l) \
3310  (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location
3311  fill_table(__kmp_indirect_set_location, expand);
3312 #undef expand
3313 #define expand(l) \
3314  (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags
3315  fill_table(__kmp_indirect_set_flags, expand);
3316 #undef expand
3317 #define expand(l) \
3318  (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location
3319  fill_table(__kmp_indirect_get_location, expand);
3320 #undef expand
3321 #define expand(l) \
3322  (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags
3323  fill_table(__kmp_indirect_get_flags, expand);
3324 #undef expand
3325 
3326  __kmp_init_user_locks = TRUE;
3327 }
3328 
3329 // Clean up the lock table.
3330 void __kmp_cleanup_indirect_user_locks() {
3331  kmp_lock_index_t i;
3332  int k;
3333 
3334  // Clean up locks in the pools first (they were already destroyed before going
3335  // into the pools).
3336  for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
3337  kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
3338  while (l != NULL) {
3339  kmp_indirect_lock_t *ll = l;
3340  l = (kmp_indirect_lock_t *)l->lock->pool.next;
3341  KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: freeing %p from pool\n",
3342  ll));
3343  __kmp_free(ll->lock);
3344  ll->lock = NULL;
3345  }
3346  __kmp_indirect_lock_pool[k] = NULL;
3347  }
3348  // Clean up the remaining undestroyed locks.
3349  for (i = 0; i < __kmp_i_lock_table.next; i++) {
3350  kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
3351  if (l->lock != NULL) {
3352  // Locks not destroyed explicitly need to be destroyed here.
3353  KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3354  KA_TRACE(
3355  20,
3356  ("__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n",
3357  l));
3358  __kmp_free(l->lock);
3359  }
3360  }
3361  // Free the table
3362  for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
3363  __kmp_free(__kmp_i_lock_table.table[i]);
3364  __kmp_free(__kmp_i_lock_table.table);
3365 
3366  __kmp_init_user_locks = FALSE;
3367 }
3368 
3369 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3370 int __kmp_num_locks_in_block = 1; // FIXME - tune this value
3371 
3372 #else // KMP_USE_DYNAMIC_LOCK
3373 
3374 /* user locks
3375  * They are implemented as a table of function pointers which are set to the
3376  * lock functions of the appropriate kind, once that has been determined. */
3377 
3378 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3379 
3380 size_t __kmp_base_user_lock_size = 0;
3381 size_t __kmp_user_lock_size = 0;
3382 
3383 kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL;
3384 int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
3385  kmp_int32 gtid) = NULL;
3386 
3387 int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
3388  kmp_int32 gtid) = NULL;
3389 int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
3390  kmp_int32 gtid) = NULL;
3391 void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3392 void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL;
3393 void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3394 int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3395  kmp_int32 gtid) = NULL;
3396 
3397 int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3398  kmp_int32 gtid) = NULL;
3399 int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3400  kmp_int32 gtid) = NULL;
3401 void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3402 void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3403 
3404 int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL;
3405 const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL;
3406 void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
3407  const ident_t *loc) = NULL;
3408 kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL;
3409 void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
3410  kmp_lock_flags_t flags) = NULL;
3411 
3412 void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) {
3413  switch (user_lock_kind) {
3414  case lk_default:
3415  default:
3416  KMP_ASSERT(0);
3417 
3418  case lk_tas: {
3419  __kmp_base_user_lock_size = sizeof(kmp_base_tas_lock_t);
3420  __kmp_user_lock_size = sizeof(kmp_tas_lock_t);
3421 
3422  __kmp_get_user_lock_owner_ =
3423  (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner);
3424 
3425  if (__kmp_env_consistency_check) {
3426  KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
3427  KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
3428  } else {
3429  KMP_BIND_USER_LOCK(tas);
3430  KMP_BIND_NESTED_USER_LOCK(tas);
3431  }
3432 
3433  __kmp_destroy_user_lock_ =
3434  (void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock);
3435 
3436  __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3437 
3438  __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
3439 
3440  __kmp_set_user_lock_location_ =
3441  (void (*)(kmp_user_lock_p, const ident_t *))NULL;
3442 
3443  __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3444 
3445  __kmp_set_user_lock_flags_ =
3446  (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3447  } break;
3448 
3449 #if KMP_USE_FUTEX
3450 
3451  case lk_futex: {
3452  __kmp_base_user_lock_size = sizeof(kmp_base_futex_lock_t);
3453  __kmp_user_lock_size = sizeof(kmp_futex_lock_t);
3454 
3455  __kmp_get_user_lock_owner_ =
3456  (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner);
3457 
3458  if (__kmp_env_consistency_check) {
3459  KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
3460  KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
3461  } else {
3462  KMP_BIND_USER_LOCK(futex);
3463  KMP_BIND_NESTED_USER_LOCK(futex);
3464  }
3465 
3466  __kmp_destroy_user_lock_ =
3467  (void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock);
3468 
3469  __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3470 
3471  __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
3472 
3473  __kmp_set_user_lock_location_ =
3474  (void (*)(kmp_user_lock_p, const ident_t *))NULL;
3475 
3476  __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3477 
3478  __kmp_set_user_lock_flags_ =
3479  (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3480  } break;
3481 
3482 #endif // KMP_USE_FUTEX
3483 
3484  case lk_ticket: {
3485  __kmp_base_user_lock_size = sizeof(kmp_base_ticket_lock_t);
3486  __kmp_user_lock_size = sizeof(kmp_ticket_lock_t);
3487 
3488  __kmp_get_user_lock_owner_ =
3489  (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner);
3490 
3491  if (__kmp_env_consistency_check) {
3492  KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
3493  KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
3494  } else {
3495  KMP_BIND_USER_LOCK(ticket);
3496  KMP_BIND_NESTED_USER_LOCK(ticket);
3497  }
3498 
3499  __kmp_destroy_user_lock_ =
3500  (void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock);
3501 
3502  __kmp_is_user_lock_initialized_ =
3503  (int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized);
3504 
3505  __kmp_get_user_lock_location_ =
3506  (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location);
3507 
3508  __kmp_set_user_lock_location_ = (void (*)(
3509  kmp_user_lock_p, const ident_t *))(&__kmp_set_ticket_lock_location);
3510 
3511  __kmp_get_user_lock_flags_ =
3512  (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags);
3513 
3514  __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3515  &__kmp_set_ticket_lock_flags);
3516  } break;
3517 
3518  case lk_queuing: {
3519  __kmp_base_user_lock_size = sizeof(kmp_base_queuing_lock_t);
3520  __kmp_user_lock_size = sizeof(kmp_queuing_lock_t);
3521 
3522  __kmp_get_user_lock_owner_ =
3523  (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3524 
3525  if (__kmp_env_consistency_check) {
3526  KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
3527  KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
3528  } else {
3529  KMP_BIND_USER_LOCK(queuing);
3530  KMP_BIND_NESTED_USER_LOCK(queuing);
3531  }
3532 
3533  __kmp_destroy_user_lock_ =
3534  (void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock);
3535 
3536  __kmp_is_user_lock_initialized_ =
3537  (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3538 
3539  __kmp_get_user_lock_location_ =
3540  (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3541 
3542  __kmp_set_user_lock_location_ = (void (*)(
3543  kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
3544 
3545  __kmp_get_user_lock_flags_ =
3546  (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3547 
3548  __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3549  &__kmp_set_queuing_lock_flags);
3550  } break;
3551 
3552 #if KMP_USE_ADAPTIVE_LOCKS
3553  case lk_adaptive: {
3554  __kmp_base_user_lock_size = sizeof(kmp_base_adaptive_lock_t);
3555  __kmp_user_lock_size = sizeof(kmp_adaptive_lock_t);
3556 
3557  __kmp_get_user_lock_owner_ =
3558  (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3559 
3560  if (__kmp_env_consistency_check) {
3561  KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
3562  } else {
3563  KMP_BIND_USER_LOCK(adaptive);
3564  }
3565 
3566  __kmp_destroy_user_lock_ =
3567  (void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock);
3568 
3569  __kmp_is_user_lock_initialized_ =
3570  (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3571 
3572  __kmp_get_user_lock_location_ =
3573  (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3574 
3575  __kmp_set_user_lock_location_ = (void (*)(
3576  kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
3577 
3578  __kmp_get_user_lock_flags_ =
3579  (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3580 
3581  __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3582  &__kmp_set_queuing_lock_flags);
3583 
3584  } break;
3585 #endif // KMP_USE_ADAPTIVE_LOCKS
3586 
3587  case lk_drdpa: {
3588  __kmp_base_user_lock_size = sizeof(kmp_base_drdpa_lock_t);
3589  __kmp_user_lock_size = sizeof(kmp_drdpa_lock_t);
3590 
3591  __kmp_get_user_lock_owner_ =
3592  (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner);
3593 
3594  if (__kmp_env_consistency_check) {
3595  KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
3596  KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
3597  } else {
3598  KMP_BIND_USER_LOCK(drdpa);
3599  KMP_BIND_NESTED_USER_LOCK(drdpa);
3600  }
3601 
3602  __kmp_destroy_user_lock_ =
3603  (void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock);
3604 
3605  __kmp_is_user_lock_initialized_ =
3606  (int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized);
3607 
3608  __kmp_get_user_lock_location_ =
3609  (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location);
3610 
3611  __kmp_set_user_lock_location_ = (void (*)(
3612  kmp_user_lock_p, const ident_t *))(&__kmp_set_drdpa_lock_location);
3613 
3614  __kmp_get_user_lock_flags_ =
3615  (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags);
3616 
3617  __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3618  &__kmp_set_drdpa_lock_flags);
3619  } break;
3620  }
3621 }
3622 
3623 // ----------------------------------------------------------------------------
3624 // User lock table & lock allocation
3625 
3626 kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL};
3627 kmp_user_lock_p __kmp_lock_pool = NULL;
3628 
3629 // Lock block-allocation support.
3630 kmp_block_of_locks *__kmp_lock_blocks = NULL;
3631 int __kmp_num_locks_in_block = 1; // FIXME - tune this value
3632 
3633 static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) {
3634  // Assume that kmp_global_lock is held upon entry/exit.
3635  kmp_lock_index_t index;
3636  if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) {
3637  kmp_lock_index_t size;
3638  kmp_user_lock_p *table;
3639  // Reallocate lock table.
3640  if (__kmp_user_lock_table.allocated == 0) {
3641  size = 1024;
3642  } else {
3643  size = __kmp_user_lock_table.allocated * 2;
3644  }
3645  table = (kmp_user_lock_p *)__kmp_allocate(sizeof(kmp_user_lock_p) * size);
3646  KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1,
3647  sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1));
3648  table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table;
3649  // We cannot free the previous table now, since it may be in use by other
3650  // threads. So save the pointer to the previous table in in the first
3651  // element of the new table. All the tables will be organized into a list,
3652  // and could be freed when library shutting down.
3653  __kmp_user_lock_table.table = table;
3654  __kmp_user_lock_table.allocated = size;
3655  }
3656  KMP_DEBUG_ASSERT(__kmp_user_lock_table.used <
3657  __kmp_user_lock_table.allocated);
3658  index = __kmp_user_lock_table.used;
3659  __kmp_user_lock_table.table[index] = lck;
3660  ++__kmp_user_lock_table.used;
3661  return index;
3662 }
3663 
3664 static kmp_user_lock_p __kmp_lock_block_allocate() {
3665  // Assume that kmp_global_lock is held upon entry/exit.
3666  static int last_index = 0;
3667  if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) {
3668  // Restart the index.
3669  last_index = 0;
3670  // Need to allocate a new block.
3671  KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3672  size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
3673  char *buffer =
3674  (char *)__kmp_allocate(space_for_locks + sizeof(kmp_block_of_locks));
3675  // Set up the new block.
3676  kmp_block_of_locks *new_block =
3677  (kmp_block_of_locks *)(&buffer[space_for_locks]);
3678  new_block->next_block = __kmp_lock_blocks;
3679  new_block->locks = (void *)buffer;
3680  // Publish the new block.
3681  KMP_MB();
3682  __kmp_lock_blocks = new_block;
3683  }
3684  kmp_user_lock_p ret = (kmp_user_lock_p)(&(
3685  ((char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size]));
3686  last_index++;
3687  return ret;
3688 }
3689 
3690 // Get memory for a lock. It may be freshly allocated memory or reused memory
3691 // from lock pool.
3692 kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, kmp_int32 gtid,
3693  kmp_lock_flags_t flags) {
3694  kmp_user_lock_p lck;
3695  kmp_lock_index_t index;
3696  KMP_DEBUG_ASSERT(user_lock);
3697 
3698  __kmp_acquire_lock(&__kmp_global_lock, gtid);
3699 
3700  if (__kmp_lock_pool == NULL) {
3701  // Lock pool is empty. Allocate new memory.
3702 
3703  // ANNOTATION: Found no good way to express the syncronisation
3704  // between allocation and usage, so ignore the allocation
3705  ANNOTATE_IGNORE_WRITES_BEGIN();
3706  if (__kmp_num_locks_in_block <= 1) { // Tune this cutoff point.
3707  lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
3708  } else {
3709  lck = __kmp_lock_block_allocate();
3710  }
3711  ANNOTATE_IGNORE_WRITES_END();
3712 
3713  // Insert lock in the table so that it can be freed in __kmp_cleanup,
3714  // and debugger has info on all allocated locks.
3715  index = __kmp_lock_table_insert(lck);
3716  } else {
3717  // Pick up lock from pool.
3718  lck = __kmp_lock_pool;
3719  index = __kmp_lock_pool->pool.index;
3720  __kmp_lock_pool = __kmp_lock_pool->pool.next;
3721  }
3722 
3723  // We could potentially differentiate between nested and regular locks
3724  // here, and do the lock table lookup for regular locks only.
3725  if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3726  *((kmp_lock_index_t *)user_lock) = index;
3727  } else {
3728  *((kmp_user_lock_p *)user_lock) = lck;
3729  }
3730 
3731  // mark the lock if it is critical section lock.
3732  __kmp_set_user_lock_flags(lck, flags);
3733 
3734  __kmp_release_lock(&__kmp_global_lock, gtid); // AC: TODO move this line upper
3735 
3736  return lck;
3737 }
3738 
3739 // Put lock's memory to pool for reusing.
3740 void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
3741  kmp_user_lock_p lck) {
3742  KMP_DEBUG_ASSERT(user_lock != NULL);
3743  KMP_DEBUG_ASSERT(lck != NULL);
3744 
3745  __kmp_acquire_lock(&__kmp_global_lock, gtid);
3746 
3747  lck->pool.next = __kmp_lock_pool;
3748  __kmp_lock_pool = lck;
3749  if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3750  kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3751  KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used);
3752  lck->pool.index = index;
3753  }
3754 
3755  __kmp_release_lock(&__kmp_global_lock, gtid);
3756 }
3757 
3758 kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock, char const *func) {
3759  kmp_user_lock_p lck = NULL;
3760 
3761  if (__kmp_env_consistency_check) {
3762  if (user_lock == NULL) {
3763  KMP_FATAL(LockIsUninitialized, func);
3764  }
3765  }
3766 
3767  if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3768  kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3769  if (__kmp_env_consistency_check) {
3770  if (!(0 < index && index < __kmp_user_lock_table.used)) {
3771  KMP_FATAL(LockIsUninitialized, func);
3772  }
3773  }
3774  KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used);
3775  KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3776  lck = __kmp_user_lock_table.table[index];
3777  } else {
3778  lck = *((kmp_user_lock_p *)user_lock);
3779  }
3780 
3781  if (__kmp_env_consistency_check) {
3782  if (lck == NULL) {
3783  KMP_FATAL(LockIsUninitialized, func);
3784  }
3785  }
3786 
3787  return lck;
3788 }
3789 
3790 void __kmp_cleanup_user_locks(void) {
3791  // Reset lock pool. Don't worry about lock in the pool--we will free them when
3792  // iterating through lock table (it includes all the locks, dead or alive).
3793  __kmp_lock_pool = NULL;
3794 
3795 #define IS_CRITICAL(lck) \
3796  ((__kmp_get_user_lock_flags_ != NULL) && \
3797  ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section))
3798 
3799  // Loop through lock table, free all locks.
3800  // Do not free item [0], it is reserved for lock tables list.
3801  //
3802  // FIXME - we are iterating through a list of (pointers to) objects of type
3803  // union kmp_user_lock, but we have no way of knowing whether the base type is
3804  // currently "pool" or whatever the global user lock type is.
3805  //
3806  // We are relying on the fact that for all of the user lock types
3807  // (except "tas"), the first field in the lock struct is the "initialized"
3808  // field, which is set to the address of the lock object itself when
3809  // the lock is initialized. When the union is of type "pool", the
3810  // first field is a pointer to the next object in the free list, which
3811  // will not be the same address as the object itself.
3812  //
3813  // This means that the check (*__kmp_is_user_lock_initialized_)(lck) will fail
3814  // for "pool" objects on the free list. This must happen as the "location"
3815  // field of real user locks overlaps the "index" field of "pool" objects.
3816  //
3817  // It would be better to run through the free list, and remove all "pool"
3818  // objects from the lock table before executing this loop. However,
3819  // "pool" objects do not always have their index field set (only on
3820  // lin_32e), and I don't want to search the lock table for the address
3821  // of every "pool" object on the free list.
3822  while (__kmp_user_lock_table.used > 1) {
3823  const ident *loc;
3824 
3825  // reduce __kmp_user_lock_table.used before freeing the lock,
3826  // so that state of locks is consistent
3827  kmp_user_lock_p lck =
3828  __kmp_user_lock_table.table[--__kmp_user_lock_table.used];
3829 
3830  if ((__kmp_is_user_lock_initialized_ != NULL) &&
3831  (*__kmp_is_user_lock_initialized_)(lck)) {
3832  // Issue a warning if: KMP_CONSISTENCY_CHECK AND lock is initialized AND
3833  // it is NOT a critical section (user is not responsible for destroying
3834  // criticals) AND we know source location to report.
3835  if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) &&
3836  ((loc = __kmp_get_user_lock_location(lck)) != NULL) &&
3837  (loc->psource != NULL)) {
3838  kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->psource, 0);
3839  KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line);
3840  __kmp_str_loc_free(&str_loc);
3841  }
3842 
3843 #ifdef KMP_DEBUG
3844  if (IS_CRITICAL(lck)) {
3845  KA_TRACE(
3846  20,
3847  ("__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
3848  lck, *(void **)lck));
3849  } else {
3850  KA_TRACE(20, ("__kmp_cleanup_user_locks: free lock %p (%p)\n", lck,
3851  *(void **)lck));
3852  }
3853 #endif // KMP_DEBUG
3854 
3855  // Cleanup internal lock dynamic resources (for drdpa locks particularly).
3856  __kmp_destroy_user_lock(lck);
3857  }
3858 
3859  // Free the lock if block allocation of locks is not used.
3860  if (__kmp_lock_blocks == NULL) {
3861  __kmp_free(lck);
3862  }
3863  }
3864 
3865 #undef IS_CRITICAL
3866 
3867  // delete lock table(s).
3868  kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
3869  __kmp_user_lock_table.table = NULL;
3870  __kmp_user_lock_table.allocated = 0;
3871 
3872  while (table_ptr != NULL) {
3873  // In the first element we saved the pointer to the previous
3874  // (smaller) lock table.
3875  kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]);
3876  __kmp_free(table_ptr);
3877  table_ptr = next;
3878  }
3879 
3880  // Free buffers allocated for blocks of locks.
3881  kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
3882  __kmp_lock_blocks = NULL;
3883 
3884  while (block_ptr != NULL) {
3885  kmp_block_of_locks_t *next = block_ptr->next_block;
3886  __kmp_free(block_ptr->locks);
3887  // *block_ptr itself was allocated at the end of the locks vector.
3888  block_ptr = next;
3889  }
3890 
3891  TCW_4(__kmp_init_user_locks, FALSE);
3892 }
3893 
3894 #endif // KMP_USE_DYNAMIC_LOCK
Definition: kmp.h:208
char const * psource
Definition: kmp.h:218