LLVM OpenMP* Runtime Library
kmp_threadprivate.cpp
1 /*
2  * kmp_threadprivate.cpp -- OpenMP threadprivate support library
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #include "kmp.h"
17 #include "kmp_i18n.h"
18 #include "kmp_itt.h"
19 
20 #define USE_CHECKS_COMMON
21 
22 #define KMP_INLINE_SUBR 1
23 
24 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
25  void *data_addr, size_t pc_size);
26 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
27  void *data_addr,
28  size_t pc_size);
29 
30 struct shared_table __kmp_threadprivate_d_table;
31 
32 static
33 #ifdef KMP_INLINE_SUBR
34  __forceinline
35 #endif
36  struct private_common *
37  __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid,
38  void *pc_addr)
39 
40 {
41  struct private_common *tn;
42 
43 #ifdef KMP_TASK_COMMON_DEBUG
44  KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with "
45  "address %p\n",
46  gtid, pc_addr));
47  dump_list();
48 #endif
49 
50  for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
51  if (tn->gbl_addr == pc_addr) {
52 #ifdef KMP_TASK_COMMON_DEBUG
53  KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found "
54  "node %p on list\n",
55  gtid, pc_addr));
56 #endif
57  return tn;
58  }
59  }
60  return 0;
61 }
62 
63 static
64 #ifdef KMP_INLINE_SUBR
65  __forceinline
66 #endif
67  struct shared_common *
68  __kmp_find_shared_task_common(struct shared_table *tbl, int gtid,
69  void *pc_addr) {
70  struct shared_common *tn;
71 
72  for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
73  if (tn->gbl_addr == pc_addr) {
74 #ifdef KMP_TASK_COMMON_DEBUG
75  KC_TRACE(
76  10,
77  ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
78  gtid, pc_addr));
79 #endif
80  return tn;
81  }
82  }
83  return 0;
84 }
85 
86 // Create a template for the data initialized storage. Either the template is
87 // NULL indicating zero fill, or the template is a copy of the original data.
88 static struct private_data *__kmp_init_common_data(void *pc_addr,
89  size_t pc_size) {
90  struct private_data *d;
91  size_t i;
92  char *p;
93 
94  d = (struct private_data *)__kmp_allocate(sizeof(struct private_data));
95  /*
96  d->data = 0; // AC: commented out because __kmp_allocate zeroes the
97  memory
98  d->next = 0;
99  */
100  d->size = pc_size;
101  d->more = 1;
102 
103  p = (char *)pc_addr;
104 
105  for (i = pc_size; i > 0; --i) {
106  if (*p++ != '\0') {
107  d->data = __kmp_allocate(pc_size);
108  KMP_MEMCPY(d->data, pc_addr, pc_size);
109  break;
110  }
111  }
112 
113  return d;
114 }
115 
116 // Initialize the data area from the template.
117 static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) {
118  char *addr = (char *)pc_addr;
119  int i, offset;
120 
121  for (offset = 0; d != 0; d = d->next) {
122  for (i = d->more; i > 0; --i) {
123  if (d->data == 0)
124  memset(&addr[offset], '\0', d->size);
125  else
126  KMP_MEMCPY(&addr[offset], d->data, d->size);
127  offset += d->size;
128  }
129  }
130 }
131 
132 /* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
133 void __kmp_common_initialize(void) {
134  if (!TCR_4(__kmp_init_common)) {
135  int q;
136 #ifdef KMP_DEBUG
137  int gtid;
138 #endif
139 
140  __kmp_threadpriv_cache_list = NULL;
141 
142 #ifdef KMP_DEBUG
143  /* verify the uber masters were initialized */
144  for (gtid = 0; gtid < __kmp_threads_capacity; gtid++)
145  if (__kmp_root[gtid]) {
146  KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread);
147  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
148  KMP_DEBUG_ASSERT(
149  !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]);
150  /* __kmp_root[ gitd ]-> r.r_uber_thread ->
151  * th.th_pri_common -> data[ q ] = 0;*/
152  }
153 #endif /* KMP_DEBUG */
154 
155  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
156  __kmp_threadprivate_d_table.data[q] = 0;
157 
158  TCW_4(__kmp_init_common, TRUE);
159  }
160 }
161 
162 /* Call all destructors for threadprivate data belonging to all threads.
163  Currently unused! */
164 void __kmp_common_destroy(void) {
165  if (TCR_4(__kmp_init_common)) {
166  int q;
167 
168  TCW_4(__kmp_init_common, FALSE);
169 
170  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
171  int gtid;
172  struct private_common *tn;
173  struct shared_common *d_tn;
174 
175  /* C++ destructors need to be called once per thread before exiting.
176  Don't call destructors for master thread though unless we used copy
177  constructor */
178 
179  for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn;
180  d_tn = d_tn->next) {
181  if (d_tn->is_vec) {
182  if (d_tn->dt.dtorv != 0) {
183  for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
184  if (__kmp_threads[gtid]) {
185  if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
186  : (!KMP_UBER_GTID(gtid))) {
187  tn = __kmp_threadprivate_find_task_common(
188  __kmp_threads[gtid]->th.th_pri_common, gtid,
189  d_tn->gbl_addr);
190  if (tn) {
191  (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
192  }
193  }
194  }
195  }
196  if (d_tn->obj_init != 0) {
197  (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
198  }
199  }
200  } else {
201  if (d_tn->dt.dtor != 0) {
202  for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
203  if (__kmp_threads[gtid]) {
204  if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
205  : (!KMP_UBER_GTID(gtid))) {
206  tn = __kmp_threadprivate_find_task_common(
207  __kmp_threads[gtid]->th.th_pri_common, gtid,
208  d_tn->gbl_addr);
209  if (tn) {
210  (*d_tn->dt.dtor)(tn->par_addr);
211  }
212  }
213  }
214  }
215  if (d_tn->obj_init != 0) {
216  (*d_tn->dt.dtor)(d_tn->obj_init);
217  }
218  }
219  }
220  }
221  __kmp_threadprivate_d_table.data[q] = 0;
222  }
223  }
224 }
225 
226 /* Call all destructors for threadprivate data belonging to this thread */
227 void __kmp_common_destroy_gtid(int gtid) {
228  struct private_common *tn;
229  struct shared_common *d_tn;
230 
231  if (!TCR_4(__kmp_init_gtid)) {
232  // This is possible when one of multiple roots initiates early library
233  // termination in a sequential region while other teams are active, and its
234  // child threads are about to end.
235  return;
236  }
237 
238  KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid));
239  if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) {
240 
241  if (TCR_4(__kmp_init_common)) {
242 
243  /* Cannot do this here since not all threads have destroyed their data */
244  /* TCW_4(__kmp_init_common, FALSE); */
245 
246  for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
247 
248  d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
249  tn->gbl_addr);
250 
251  KMP_DEBUG_ASSERT(d_tn);
252 
253  if (d_tn->is_vec) {
254  if (d_tn->dt.dtorv != 0) {
255  (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
256  }
257  if (d_tn->obj_init != 0) {
258  (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
259  }
260  } else {
261  if (d_tn->dt.dtor != 0) {
262  (void)(*d_tn->dt.dtor)(tn->par_addr);
263  }
264  if (d_tn->obj_init != 0) {
265  (void)(*d_tn->dt.dtor)(d_tn->obj_init);
266  }
267  }
268  }
269  KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors "
270  "complete\n",
271  gtid));
272  }
273  }
274 }
275 
276 #ifdef KMP_TASK_COMMON_DEBUG
277 static void dump_list(void) {
278  int p, q;
279 
280  for (p = 0; p < __kmp_all_nth; ++p) {
281  if (!__kmp_threads[p])
282  continue;
283  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
284  if (__kmp_threads[p]->th.th_pri_common->data[q]) {
285  struct private_common *tn;
286 
287  KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p));
288 
289  for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn;
290  tn = tn->next) {
291  KC_TRACE(10,
292  ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
293  tn->gbl_addr, tn->par_addr));
294  }
295  }
296  }
297  }
298 }
299 #endif /* KMP_TASK_COMMON_DEBUG */
300 
301 // NOTE: this routine is to be called only from the serial part of the program.
302 void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
303  void *data_addr, size_t pc_size) {
304  struct shared_common **lnk_tn, *d_tn;
305  KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
306  __kmp_threads[gtid]->th.th_root->r.r_active == 0);
307 
308  d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
309  pc_addr);
310 
311  if (d_tn == 0) {
312  d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
313 
314  d_tn->gbl_addr = pc_addr;
315  d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
316  /*
317  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
318  zeroes the memory
319  d_tn->ct.ctor = 0;
320  d_tn->cct.cctor = 0;;
321  d_tn->dt.dtor = 0;
322  d_tn->is_vec = FALSE;
323  d_tn->vec_len = 0L;
324  */
325  d_tn->cmn_size = pc_size;
326 
327  __kmp_acquire_lock(&__kmp_global_lock, gtid);
328 
329  lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
330 
331  d_tn->next = *lnk_tn;
332  *lnk_tn = d_tn;
333 
334  __kmp_release_lock(&__kmp_global_lock, gtid);
335  }
336 }
337 
338 struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
339  void *data_addr,
340  size_t pc_size) {
341  struct private_common *tn, **tt;
342  struct shared_common *d_tn;
343 
344  /* +++++++++ START OF CRITICAL SECTION +++++++++ */
345  __kmp_acquire_lock(&__kmp_global_lock, gtid);
346 
347  tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common));
348 
349  tn->gbl_addr = pc_addr;
350 
351  d_tn = __kmp_find_shared_task_common(
352  &__kmp_threadprivate_d_table, gtid,
353  pc_addr); /* Only the MASTER data table exists. */
354 
355  if (d_tn != 0) {
356  /* This threadprivate variable has already been seen. */
357 
358  if (d_tn->pod_init == 0 && d_tn->obj_init == 0) {
359  d_tn->cmn_size = pc_size;
360 
361  if (d_tn->is_vec) {
362  if (d_tn->ct.ctorv != 0) {
363  /* Construct from scratch so no prototype exists */
364  d_tn->obj_init = 0;
365  } else if (d_tn->cct.cctorv != 0) {
366  /* Now data initialize the prototype since it was previously
367  * registered */
368  d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
369  (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len);
370  } else {
371  d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
372  }
373  } else {
374  if (d_tn->ct.ctor != 0) {
375  /* Construct from scratch so no prototype exists */
376  d_tn->obj_init = 0;
377  } else if (d_tn->cct.cctor != 0) {
378  /* Now data initialize the prototype since it was previously
379  registered */
380  d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
381  (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr);
382  } else {
383  d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
384  }
385  }
386  }
387  } else {
388  struct shared_common **lnk_tn;
389 
390  d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
391  d_tn->gbl_addr = pc_addr;
392  d_tn->cmn_size = pc_size;
393  d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
394  /*
395  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
396  zeroes the memory
397  d_tn->ct.ctor = 0;
398  d_tn->cct.cctor = 0;
399  d_tn->dt.dtor = 0;
400  d_tn->is_vec = FALSE;
401  d_tn->vec_len = 0L;
402  */
403  lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
404 
405  d_tn->next = *lnk_tn;
406  *lnk_tn = d_tn;
407  }
408 
409  tn->cmn_size = d_tn->cmn_size;
410 
411  if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) {
412  tn->par_addr = (void *)pc_addr;
413  } else {
414  tn->par_addr = (void *)__kmp_allocate(tn->cmn_size);
415  }
416 
417  __kmp_release_lock(&__kmp_global_lock, gtid);
418 /* +++++++++ END OF CRITICAL SECTION +++++++++ */
419 
420 #ifdef USE_CHECKS_COMMON
421  if (pc_size > d_tn->cmn_size) {
422  KC_TRACE(
423  10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
424  " ,%" KMP_UINTPTR_SPEC ")\n",
425  pc_addr, pc_size, d_tn->cmn_size));
426  KMP_FATAL(TPCommonBlocksInconsist);
427  }
428 #endif /* USE_CHECKS_COMMON */
429 
430  tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
431 
432 #ifdef KMP_TASK_COMMON_DEBUG
433  if (*tt != 0) {
434  KC_TRACE(
435  10,
436  ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
437  gtid, pc_addr));
438  }
439 #endif
440  tn->next = *tt;
441  *tt = tn;
442 
443 #ifdef KMP_TASK_COMMON_DEBUG
444  KC_TRACE(10,
445  ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
446  gtid, pc_addr));
447  dump_list();
448 #endif
449 
450  /* Link the node into a simple list */
451 
452  tn->link = __kmp_threads[gtid]->th.th_pri_head;
453  __kmp_threads[gtid]->th.th_pri_head = tn;
454 
455 #ifdef BUILD_TV
456  __kmp_tv_threadprivate_store(__kmp_threads[gtid], tn->gbl_addr, tn->par_addr);
457 #endif
458 
459  if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid)))
460  return tn;
461 
462  /* if C++ object with copy constructor, use it;
463  * else if C++ object with constructor, use it for the non-master copies only;
464  * else use pod_init and memcpy
465  *
466  * C++ constructors need to be called once for each non-master thread on
467  * allocate
468  * C++ copy constructors need to be called once for each thread on allocate */
469 
470  /* C++ object with constructors/destructors; don't call constructors for
471  master thread though */
472  if (d_tn->is_vec) {
473  if (d_tn->ct.ctorv != 0) {
474  (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len);
475  } else if (d_tn->cct.cctorv != 0) {
476  (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len);
477  } else if (tn->par_addr != tn->gbl_addr) {
478  __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
479  }
480  } else {
481  if (d_tn->ct.ctor != 0) {
482  (void)(*d_tn->ct.ctor)(tn->par_addr);
483  } else if (d_tn->cct.cctor != 0) {
484  (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init);
485  } else if (tn->par_addr != tn->gbl_addr) {
486  __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
487  }
488  }
489  /* !BUILD_OPENMP_C
490  if (tn->par_addr != tn->gbl_addr)
491  __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
492 
493  return tn;
494 }
495 
496 /* ------------------------------------------------------------------------ */
497 /* We are currently parallel, and we know the thread id. */
498 /* ------------------------------------------------------------------------ */
499 
512 void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor,
513  kmpc_cctor cctor, kmpc_dtor dtor) {
514  struct shared_common *d_tn, **lnk_tn;
515 
516  KC_TRACE(10, ("__kmpc_threadprivate_register: called\n"));
517 
518 #ifdef USE_CHECKS_COMMON
519  /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
520  KMP_ASSERT(cctor == 0);
521 #endif /* USE_CHECKS_COMMON */
522 
523  /* Only the global data table exists. */
524  d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data);
525 
526  if (d_tn == 0) {
527  d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
528  d_tn->gbl_addr = data;
529 
530  d_tn->ct.ctor = ctor;
531  d_tn->cct.cctor = cctor;
532  d_tn->dt.dtor = dtor;
533  /*
534  d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate
535  zeroes the memory
536  d_tn->vec_len = 0L;
537  d_tn->obj_init = 0;
538  d_tn->pod_init = 0;
539  */
540  lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
541 
542  d_tn->next = *lnk_tn;
543  *lnk_tn = d_tn;
544  }
545 }
546 
547 void *__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data,
548  size_t size) {
549  void *ret;
550  struct private_common *tn;
551 
552  KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid));
553 
554 #ifdef USE_CHECKS_COMMON
555  if (!__kmp_init_serial)
556  KMP_FATAL(RTLNotInitialized);
557 #endif /* USE_CHECKS_COMMON */
558 
559  if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
560  /* The parallel address will NEVER overlap with the data_address */
561  /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the
562  * data_address; use data_address = data */
563 
564  KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n",
565  global_tid));
566  kmp_threadprivate_insert_private_data(global_tid, data, data, size);
567 
568  ret = data;
569  } else {
570  KC_TRACE(
571  50,
572  ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
573  global_tid, data));
574  tn = __kmp_threadprivate_find_task_common(
575  __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
576 
577  if (tn) {
578  KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid));
579 #ifdef USE_CHECKS_COMMON
580  if ((size_t)size > tn->cmn_size) {
581  KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
582  " ,%" KMP_UINTPTR_SPEC ")\n",
583  data, size, tn->cmn_size));
584  KMP_FATAL(TPCommonBlocksInconsist);
585  }
586 #endif /* USE_CHECKS_COMMON */
587  } else {
588  /* The parallel address will NEVER overlap with the data_address */
589  /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use
590  * data_address = data */
591  KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid));
592  tn = kmp_threadprivate_insert(global_tid, data, data, size);
593  }
594 
595  ret = tn->par_addr;
596  }
597  KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
598  global_tid, ret));
599 
600  return ret;
601 }
602 
614 void *
616  kmp_int32 global_tid, // gtid.
617  void *data, // Pointer to original global variable.
618  size_t size, // Size of original global variable.
619  void ***cache) {
620  KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, "
621  "address: %p, size: %" KMP_SIZE_T_SPEC "\n",
622  global_tid, *cache, data, size));
623 
624  if (TCR_PTR(*cache) == 0) {
625  __kmp_acquire_lock(&__kmp_global_lock, global_tid);
626 
627  if (TCR_PTR(*cache) == 0) {
628  __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
629  __kmp_tp_cached = 1;
630  __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
631  void **my_cache;
632  KMP_ITT_IGNORE(
633  my_cache = (void **)__kmp_allocate(
634  sizeof(void *) * __kmp_tp_capacity + sizeof(kmp_cached_addr_t)););
635  // No need to zero the allocated memory; __kmp_allocate does that.
636  KC_TRACE(
637  50,
638  ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
639  global_tid, my_cache));
640 
641  /* TODO: free all this memory in __kmp_common_destroy using
642  * __kmp_threadpriv_cache_list */
643  /* Add address of mycache to linked list for cleanup later */
644  kmp_cached_addr_t *tp_cache_addr;
645 
646  tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity];
647  tp_cache_addr->addr = my_cache;
648  tp_cache_addr->next = __kmp_threadpriv_cache_list;
649  __kmp_threadpriv_cache_list = tp_cache_addr;
650 
651  KMP_MB();
652 
653  TCW_PTR(*cache, my_cache);
654 
655  KMP_MB();
656  }
657 
658  __kmp_release_lock(&__kmp_global_lock, global_tid);
659  }
660 
661  void *ret;
662  if ((ret = TCR_PTR((*cache)[global_tid])) == 0) {
663  ret = __kmpc_threadprivate(loc, global_tid, data, (size_t)size);
664 
665  TCW_PTR((*cache)[global_tid], ret);
666  }
667  KC_TRACE(10,
668  ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
669  global_tid, ret));
670 
671  return ret;
672 }
673 
685  kmpc_ctor_vec ctor, kmpc_cctor_vec cctor,
686  kmpc_dtor_vec dtor,
687  size_t vector_length) {
688  struct shared_common *d_tn, **lnk_tn;
689 
690  KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n"));
691 
692 #ifdef USE_CHECKS_COMMON
693  /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
694  KMP_ASSERT(cctor == 0);
695 #endif /* USE_CHECKS_COMMON */
696 
697  d_tn = __kmp_find_shared_task_common(
698  &__kmp_threadprivate_d_table, -1,
699  data); /* Only the global data table exists. */
700 
701  if (d_tn == 0) {
702  d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
703  d_tn->gbl_addr = data;
704 
705  d_tn->ct.ctorv = ctor;
706  d_tn->cct.cctorv = cctor;
707  d_tn->dt.dtorv = dtor;
708  d_tn->is_vec = TRUE;
709  d_tn->vec_len = (size_t)vector_length;
710  /*
711  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
712  zeroes the memory
713  d_tn->pod_init = 0;
714  */
715  lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
716 
717  d_tn->next = *lnk_tn;
718  *lnk_tn = d_tn;
719  }
720 }
void(* kmpc_dtor)(void *)
Definition: kmp.h:1417
void(* kmpc_dtor_vec)(void *, size_t)
Definition: kmp.h:1440
void *(* kmpc_ctor_vec)(void *, size_t)
Definition: kmp.h:1434
void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
Definition: kmp.h:1446
void *(* kmpc_cctor)(void *, void *)
Definition: kmp.h:1424
void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
Definition: kmp.h:208
void *(* kmpc_ctor)(void *)
Definition: kmp.h:1411
void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)