20 #define USE_CHECKS_COMMON 22 #define KMP_INLINE_SUBR 1 24 void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
25 void *data_addr,
size_t pc_size);
26 struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
30 struct shared_table __kmp_threadprivate_d_table;
33 #ifdef KMP_INLINE_SUBR 36 struct private_common *
37 __kmp_threadprivate_find_task_common(
struct common_table *tbl,
int gtid,
41 struct private_common *tn;
43 #ifdef KMP_TASK_COMMON_DEBUG 44 KC_TRACE(10, (
"__kmp_threadprivate_find_task_common: thread#%d, called with " 50 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
51 if (tn->gbl_addr == pc_addr) {
52 #ifdef KMP_TASK_COMMON_DEBUG 53 KC_TRACE(10, (
"__kmp_threadprivate_find_task_common: thread#%d, found " 64 #ifdef KMP_INLINE_SUBR 67 struct shared_common *
68 __kmp_find_shared_task_common(
struct shared_table *tbl,
int gtid,
70 struct shared_common *tn;
72 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
73 if (tn->gbl_addr == pc_addr) {
74 #ifdef KMP_TASK_COMMON_DEBUG 77 (
"__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
88 static struct private_data *__kmp_init_common_data(
void *pc_addr,
90 struct private_data *d;
94 d = (
struct private_data *)__kmp_allocate(
sizeof(
struct private_data));
105 for (i = pc_size; i > 0; --i) {
107 d->data = __kmp_allocate(pc_size);
108 KMP_MEMCPY(d->data, pc_addr, pc_size);
117 static void __kmp_copy_common_data(
void *pc_addr,
struct private_data *d) {
118 char *addr = (
char *)pc_addr;
121 for (offset = 0; d != 0; d = d->next) {
122 for (i = d->more; i > 0; --i) {
124 memset(&addr[offset],
'\0', d->size);
126 KMP_MEMCPY(&addr[offset], d->data, d->size);
133 void __kmp_common_initialize(
void) {
134 if (!TCR_4(__kmp_init_common)) {
140 __kmp_threadpriv_cache_list = NULL;
144 for (gtid = 0; gtid < __kmp_threads_capacity; gtid++)
145 if (__kmp_root[gtid]) {
146 KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread);
147 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
149 !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]);
155 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
156 __kmp_threadprivate_d_table.data[q] = 0;
158 TCW_4(__kmp_init_common, TRUE);
164 void __kmp_common_destroy(
void) {
165 if (TCR_4(__kmp_init_common)) {
168 TCW_4(__kmp_init_common, FALSE);
170 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
172 struct private_common *tn;
173 struct shared_common *d_tn;
179 for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn;
182 if (d_tn->dt.dtorv != 0) {
183 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
184 if (__kmp_threads[gtid]) {
185 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
186 : (!KMP_UBER_GTID(gtid))) {
187 tn = __kmp_threadprivate_find_task_common(
188 __kmp_threads[gtid]->th.th_pri_common, gtid,
191 (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
196 if (d_tn->obj_init != 0) {
197 (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
201 if (d_tn->dt.dtor != 0) {
202 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
203 if (__kmp_threads[gtid]) {
204 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
205 : (!KMP_UBER_GTID(gtid))) {
206 tn = __kmp_threadprivate_find_task_common(
207 __kmp_threads[gtid]->th.th_pri_common, gtid,
210 (*d_tn->dt.dtor)(tn->par_addr);
215 if (d_tn->obj_init != 0) {
216 (*d_tn->dt.dtor)(d_tn->obj_init);
221 __kmp_threadprivate_d_table.data[q] = 0;
227 void __kmp_common_destroy_gtid(
int gtid) {
228 struct private_common *tn;
229 struct shared_common *d_tn;
231 if (!TCR_4(__kmp_init_gtid)) {
238 KC_TRACE(10, (
"__kmp_common_destroy_gtid: T#%d called\n", gtid));
239 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) {
241 if (TCR_4(__kmp_init_common)) {
246 for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
248 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
251 KMP_DEBUG_ASSERT(d_tn);
254 if (d_tn->dt.dtorv != 0) {
255 (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
257 if (d_tn->obj_init != 0) {
258 (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
261 if (d_tn->dt.dtor != 0) {
262 (void)(*d_tn->dt.dtor)(tn->par_addr);
264 if (d_tn->obj_init != 0) {
265 (void)(*d_tn->dt.dtor)(d_tn->obj_init);
269 KC_TRACE(30, (
"__kmp_common_destroy_gtid: T#%d threadprivate destructors " 276 #ifdef KMP_TASK_COMMON_DEBUG 277 static void dump_list(
void) {
280 for (p = 0; p < __kmp_all_nth; ++p) {
281 if (!__kmp_threads[p])
283 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
284 if (__kmp_threads[p]->th.th_pri_common->data[q]) {
285 struct private_common *tn;
287 KC_TRACE(10, (
"\tdump_list: gtid:%d addresses\n", p));
289 for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn;
292 (
"\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
293 tn->gbl_addr, tn->par_addr));
302 void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
303 void *data_addr,
size_t pc_size) {
304 struct shared_common **lnk_tn, *d_tn;
305 KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
306 __kmp_threads[gtid]->th.th_root->r.r_active == 0);
308 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
312 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
314 d_tn->gbl_addr = pc_addr;
315 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
325 d_tn->cmn_size = pc_size;
327 __kmp_acquire_lock(&__kmp_global_lock, gtid);
329 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
331 d_tn->next = *lnk_tn;
334 __kmp_release_lock(&__kmp_global_lock, gtid);
338 struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
341 struct private_common *tn, **tt;
342 struct shared_common *d_tn;
345 __kmp_acquire_lock(&__kmp_global_lock, gtid);
347 tn = (
struct private_common *)__kmp_allocate(
sizeof(
struct private_common));
349 tn->gbl_addr = pc_addr;
351 d_tn = __kmp_find_shared_task_common(
352 &__kmp_threadprivate_d_table, gtid,
358 if (d_tn->pod_init == 0 && d_tn->obj_init == 0) {
359 d_tn->cmn_size = pc_size;
362 if (d_tn->ct.ctorv != 0) {
365 }
else if (d_tn->cct.cctorv != 0) {
368 d_tn->obj_init = (
void *)__kmp_allocate(d_tn->cmn_size);
369 (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len);
371 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
374 if (d_tn->ct.ctor != 0) {
377 }
else if (d_tn->cct.cctor != 0) {
380 d_tn->obj_init = (
void *)__kmp_allocate(d_tn->cmn_size);
381 (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr);
383 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
388 struct shared_common **lnk_tn;
390 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
391 d_tn->gbl_addr = pc_addr;
392 d_tn->cmn_size = pc_size;
393 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
403 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
405 d_tn->next = *lnk_tn;
409 tn->cmn_size = d_tn->cmn_size;
411 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) {
412 tn->par_addr = (
void *)pc_addr;
414 tn->par_addr = (
void *)__kmp_allocate(tn->cmn_size);
417 __kmp_release_lock(&__kmp_global_lock, gtid);
420 #ifdef USE_CHECKS_COMMON 421 if (pc_size > d_tn->cmn_size) {
423 10, (
"__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
424 " ,%" KMP_UINTPTR_SPEC
")\n",
425 pc_addr, pc_size, d_tn->cmn_size));
426 KMP_FATAL(TPCommonBlocksInconsist);
430 tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
432 #ifdef KMP_TASK_COMMON_DEBUG 436 (
"__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
443 #ifdef KMP_TASK_COMMON_DEBUG 445 (
"__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
452 tn->link = __kmp_threads[gtid]->th.th_pri_head;
453 __kmp_threads[gtid]->th.th_pri_head = tn;
456 __kmp_tv_threadprivate_store(__kmp_threads[gtid], tn->gbl_addr, tn->par_addr);
459 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid)))
473 if (d_tn->ct.ctorv != 0) {
474 (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len);
475 }
else if (d_tn->cct.cctorv != 0) {
476 (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len);
477 }
else if (tn->par_addr != tn->gbl_addr) {
478 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
481 if (d_tn->ct.ctor != 0) {
482 (void)(*d_tn->ct.ctor)(tn->par_addr);
483 }
else if (d_tn->cct.cctor != 0) {
484 (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init);
485 }
else if (tn->par_addr != tn->gbl_addr) {
486 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
514 struct shared_common *d_tn, **lnk_tn;
516 KC_TRACE(10, (
"__kmpc_threadprivate_register: called\n"));
518 #ifdef USE_CHECKS_COMMON 520 KMP_ASSERT(cctor == 0);
524 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data);
527 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
528 d_tn->gbl_addr = data;
530 d_tn->ct.ctor = ctor;
531 d_tn->cct.cctor = cctor;
532 d_tn->dt.dtor = dtor;
540 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
542 d_tn->next = *lnk_tn;
547 void *__kmpc_threadprivate(
ident_t *loc, kmp_int32 global_tid,
void *data,
550 struct private_common *tn;
552 KC_TRACE(10, (
"__kmpc_threadprivate: T#%d called\n", global_tid));
554 #ifdef USE_CHECKS_COMMON 555 if (!__kmp_init_serial)
556 KMP_FATAL(RTLNotInitialized);
559 if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
564 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d inserting private data\n",
566 kmp_threadprivate_insert_private_data(global_tid, data, data, size);
572 (
"__kmpc_threadprivate: T#%d try to find private data at address %p\n",
574 tn = __kmp_threadprivate_find_task_common(
575 __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
578 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d found data\n", global_tid));
579 #ifdef USE_CHECKS_COMMON 580 if ((
size_t)size > tn->cmn_size) {
581 KC_TRACE(10, (
"THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
582 " ,%" KMP_UINTPTR_SPEC
")\n",
583 data, size, tn->cmn_size));
584 KMP_FATAL(TPCommonBlocksInconsist);
591 KC_TRACE(20, (
"__kmpc_threadprivate: T#%d inserting data\n", global_tid));
592 tn = kmp_threadprivate_insert(global_tid, data, data, size);
597 KC_TRACE(10, (
"__kmpc_threadprivate: T#%d exiting; return value = %p\n",
616 kmp_int32 global_tid,
620 KC_TRACE(10, (
"__kmpc_threadprivate_cached: T#%d called with cache: %p, " 621 "address: %p, size: %" KMP_SIZE_T_SPEC
"\n",
622 global_tid, *cache, data, size));
624 if (TCR_PTR(*cache) == 0) {
625 __kmp_acquire_lock(&__kmp_global_lock, global_tid);
627 if (TCR_PTR(*cache) == 0) {
628 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
630 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
633 my_cache = (
void **)__kmp_allocate(
634 sizeof(
void *) * __kmp_tp_capacity +
sizeof(kmp_cached_addr_t)););
638 (
"__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
639 global_tid, my_cache));
644 kmp_cached_addr_t *tp_cache_addr;
646 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity];
647 tp_cache_addr->addr = my_cache;
648 tp_cache_addr->next = __kmp_threadpriv_cache_list;
649 __kmp_threadpriv_cache_list = tp_cache_addr;
653 TCW_PTR(*cache, my_cache);
658 __kmp_release_lock(&__kmp_global_lock, global_tid);
662 if ((ret = TCR_PTR((*cache)[global_tid])) == 0) {
663 ret = __kmpc_threadprivate(loc, global_tid, data, (
size_t)size);
665 TCW_PTR((*cache)[global_tid], ret);
668 (
"__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
687 size_t vector_length) {
688 struct shared_common *d_tn, **lnk_tn;
690 KC_TRACE(10, (
"__kmpc_threadprivate_register_vec: called\n"));
692 #ifdef USE_CHECKS_COMMON 694 KMP_ASSERT(cctor == 0);
697 d_tn = __kmp_find_shared_task_common(
698 &__kmp_threadprivate_d_table, -1,
702 d_tn = (
struct shared_common *)__kmp_allocate(
sizeof(
struct shared_common));
703 d_tn->gbl_addr = data;
705 d_tn->ct.ctorv = ctor;
706 d_tn->cct.cctorv = cctor;
707 d_tn->dt.dtorv = dtor;
709 d_tn->vec_len = (size_t)vector_length;
715 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
717 d_tn->next = *lnk_tn;
void(* kmpc_dtor)(void *)
void(* kmpc_dtor_vec)(void *, size_t)
void *(* kmpc_ctor_vec)(void *, size_t)
void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void *(* kmpc_cctor)(void *, void *)
void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
void *(* kmpc_ctor)(void *)
void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)