reference, declarationdefinition
definition → references, declarations, derived classes, virtual overrides
reference to multiple definitions → definitions
unreferenced

References

projects/openmp/runtime/src/kmp.h
 3062   return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
 3067   return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
 3072   return thr->th.th_info.ds.ds_gtid;
 3082   return __kmp_threads[gtid]->th.th_team;
projects/openmp/runtime/src/kmp_affinity.cpp
 4710   if (th->th.th_affin_mask == NULL) {
 4711     KMP_CPU_ALLOC(th->th.th_affin_mask);
 4713     KMP_CPU_ZERO(th->th.th_affin_mask);
 4759   th->th.th_current_place = i;
 4761     th->th.th_new_place = i;
 4762     th->th.th_first_place = 0;
 4763     th->th.th_last_place = __kmp_affinity_num_masks - 1;
 4767     th->th.th_first_place = 0;
 4768     th->th.th_last_place = __kmp_affinity_num_masks - 1;
 4779   KMP_CPU_COPY(th->th.th_affin_mask, mask);
 4787                               th->th.th_affin_mask);
 4800     __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
 4816   KMP_ASSERT(th->th.th_new_place >= 0);
 4817   KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
 4818   if (th->th.th_first_place <= th->th.th_last_place) {
 4818   if (th->th.th_first_place <= th->th.th_last_place) {
 4819     KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
 4819     KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
 4820                (th->th.th_new_place <= th->th.th_last_place));
 4820                (th->th.th_new_place <= th->th.th_last_place));
 4822     KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
 4822     KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
 4823                (th->th.th_new_place >= th->th.th_last_place));
 4823                (th->th.th_new_place >= th->th.th_last_place));
 4829       KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
 4830   KMP_CPU_COPY(th->th.th_affin_mask, mask);
 4831   th->th.th_current_place = th->th.th_new_place;
 4831   th->th.th_current_place = th->th.th_new_place;
 4836                               th->th.th_affin_mask);
 4840   __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
 4894     KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
 4897   th->th.th_current_place = KMP_PLACE_UNDEFINED;
 4898   th->th.th_new_place = KMP_PLACE_UNDEFINED;
 4899   th->th.th_first_place = 0;
 4900   th->th.th_last_place = __kmp_affinity_num_masks - 1;
 4903   th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
 5066   int tid = th->th.th_info.ds.ds_tid;
 5114     kmp_affin_mask_t *mask = th->th.th_affin_mask;
 5136     kmp_affin_mask_t *mask = th->th.th_affin_mask;
projects/openmp/runtime/src/kmp_alloc.cpp
  231   data = (thr_data_t *)((!th->th.th_local.bget_data)
  233                             : th->th.th_local.bget_data);
  242   th->th.th_local.bget_data = data;
  243   th->th.th_local.bget_list = 0;
  256   data = (thr_data_t *)th->th.th_local.bget_data;
  265   void *p = TCR_SYNC_PTR(th->th.th_local.bget_list);
  270       volatile void *old_value = TCR_SYNC_PTR(th->th.th_local.bget_list);
  271       while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list,
  274         old_value = TCR_SYNC_PTR(th->th.th_local.bget_list);
  332     volatile void *old_value = TCR_PTR(th->th.th_local.bget_list);
  337     while (!KMP_COMPARE_AND_STORE_PTR(&th->th.th_local.bget_list,
  340       old_value = TCR_PTR(th->th.th_local.bget_list);
 1008   thr = (thr_data_t *)th->th.th_local.bget_data;
 1037   if (th->th.th_local.bget_data != NULL) {
 1038     __kmp_free(th->th.th_local.bget_data);
 1039     th->th.th_local.bget_data = NULL;
 1429   __kmp_threads[gtid]->th.th_def_allocator = allocator;
 1433   return __kmp_threads[gtid]->th.th_def_allocator;
 1449     allocator = __kmp_threads[gtid]->th.th_def_allocator;
 1844   ptr = this_thr->th.th_free_lists[index].th_free_list_self;
 1847     this_thr->th.th_free_lists[index].th_free_list_self = *((void **)ptr);
 1854   ptr = TCR_SYNC_PTR(this_thr->th.th_free_lists[index].th_free_list_sync);
 1860         &this_thr->th.th_free_lists[index].th_free_list_sync, ptr, nullptr)) {
 1862       ptr = TCR_SYNC_PTR(this_thr->th.th_free_lists[index].th_free_list_sync);
 1866     this_thr->th.th_free_lists[index].th_free_list_self = *((void **)ptr);
 1940     *((void **)ptr) = this_thr->th.th_free_lists[index].th_free_list_self;
 1941     this_thr->th.th_free_lists[index].th_free_list_self = ptr;
 1943     void *head = this_thr->th.th_free_lists[index].th_free_list_other;
 1946       this_thr->th.th_free_lists[index].th_free_list_other = ptr;
 1961         this_thr->th.th_free_lists[index].th_free_list_other = ptr;
 1982         old_ptr = TCR_PTR(q_th->th.th_free_lists[index].th_free_list_sync);
 1988             &q_th->th.th_free_lists[index].th_free_list_sync, old_ptr, head)) {
 1990           old_ptr = TCR_PTR(q_th->th.th_free_lists[index].th_free_list_sync);
 1995         this_thr->th.th_free_lists[index].th_free_list_other = ptr;
 2019   memset(this_thr->th.th_free_lists, 0, NUM_LISTS * sizeof(kmp_free_list_t));
projects/openmp/runtime/src/kmp_barrier.cpp
   52   kmp_team_t *team = this_thr->th.th_team;
   53   kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
   65     this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
   65     this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
   87     int nproc = this_thr->th.th_team_nproc;
  106       kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_arrived,
  121         this_thr->th.th_bar_min_time = KMP_MIN(
  122             this_thr->th.th_bar_min_time, other_threads[i]->th.th_bar_min_time);
  122             this_thr->th.th_bar_min_time, other_threads[i]->th.th_bar_min_time);
  131         (*reduce)(this_thr->th.th_local.reduce_data,
  132                   other_threads[i]->th.th_local.reduce_data);
  156   kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
  161     kmp_uint32 nproc = this_thr->th.th_team_nproc;
  164     team = __kmp_threads[gtid]->th.th_team;
  205         kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_go,
  297   kmp_team_t *team = this_thr->th.th_team;
  298   kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
  300   kmp_uint32 nproc = this_thr->th.th_team_nproc;
  315     this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
  315     this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
  328       kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
  348         this_thr->th.th_bar_min_time = KMP_MIN(this_thr->th.th_bar_min_time,
  348         this_thr->th.th_bar_min_time = KMP_MIN(this_thr->th.th_bar_min_time,
  349                                                child_thr->th.th_bar_min_time);
  358         (*reduce)(this_thr->th.th_local.reduce_data,
  359                   child_thr->th.th_local.reduce_data);
  406   kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
  444     team = __kmp_threads[gtid]->th.th_team;
  454     team = __kmp_threads[gtid]->th.th_team;
  460   nproc = this_thr->th.th_team_nproc;
  469       kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
  514   kmp_team_t *team = this_thr->th.th_team;
  515   kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
  518   kmp_uint32 num_threads = this_thr->th.th_team_nproc;
  533     this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
  533     this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time =
  572       kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
  593         this_thr->th.th_bar_min_time = KMP_MIN(this_thr->th.th_bar_min_time,
  593         this_thr->th.th_bar_min_time = KMP_MIN(this_thr->th.th_bar_min_time,
  594                                                child_thr->th.th_bar_min_time);
  603         (*reduce)(this_thr->th.th_local.reduce_data,
  604                   child_thr->th.th_local.reduce_data);
  634   kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
  649     team = __kmp_threads[gtid]->th.th_team;
  688     team = __kmp_threads[gtid]->th.th_team;
  698   num_threads = this_thr->th.th_team_nproc;
  739         kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
  839         &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
  844         &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
  865   kmp_team_t *team = this_thr->th.th_team;
  866   kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
  867   kmp_uint32 nproc = this_thr->th.th_team_nproc;
  873           ->th.th_teams_microtask) // are we inside the teams construct?
  874     if (this_thr->th.th_teams_size.nteams > 1)
  889     this_thr->th.th_bar_arrive_time = __itt_get_timestamp();
  923             (*reduce)(this_thr->th.th_local.reduce_data,
  924                       other_threads[child_tid]->th.th_local.reduce_data);
  941           kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
  958             (*reduce)(this_thr->th.th_local.reduce_data,
  959                       child_thr->th.th_local.reduce_data);
  974           kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
  991             (*reduce)(this_thr->th.th_local.reduce_data,
  992                       child_thr->th.th_local.reduce_data);
 1043   kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
 1048     team = __kmp_threads[gtid]->th.th_team;
 1087     team = __kmp_threads[gtid]->th.th_team;
 1098   nproc = this_thr->th.th_team_nproc;
 1101           ->th.th_teams_microtask) { // are we inside the teams construct?
 1103         this_thr->th.th_teams_level == level)
 1105     if (this_thr->th.th_teams_size.nteams > 1)
 1168               &team->t.t_threads[child_tid]->th.th_bar[bt].bb;
 1199             kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
 1226           kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
 1289   kmp_team_t *team = this_thr->th.th_team;
 1306     my_task_data = OMPT_CUR_TASK_DATA(this_thr);
 1307     my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
 1324     this_thr->th.ompt_thread_info.state = ompt_state_wait_barrier;
 1355       this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1355       this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1373       this_thr->th.th_local.reduce_data = reduce_data;
 1444           this_thr->th.th_teams_microtask == NULL &&
 1446         ident_t *loc = __kmp_threads[gtid]->th.th_ident;
 1449         int nproc = this_thr->th.th_team_nproc;
 1453           __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
 1455           this_thr->th.th_frame_time = cur_time;
 1459           __kmp_itt_frame_submit(gtid, this_thr->th.th_bar_min_time, cur_time,
 1465             kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time;
 1468             this_thr->th.th_bar_arrive_time = 0;
 1470               delta += (cur_time - other_threads[i]->th.th_bar_arrive_time);
 1471               other_threads[i]->th.th_bar_arrive_time = 0;
 1473             __kmp_itt_metadata_imbalance(gtid, this_thr->th.th_frame_time,
 1477           __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
 1479           this_thr->th.th_frame_time = cur_time;
 1535       if (this_thr->th.th_task_team != NULL) {
 1574     this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
 1605         this_thr->th.th_bar[bs_plain_barrier].bb.b_arrived -=
 1621   kmp_team_t *team = this_thr->th.th_team;
 1679   team = this_thr->th.th_team;
 1680   nproc = this_thr->th.th_team_nproc;
 1686   master_thread = this_thr->th.th_team_master;
 1710     int ds_tid = this_thr->th.th_info.ds.ds_tid;
 1715     my_task_data = OMPT_CUR_TASK_DATA(this_thr);
 1716     my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr);
 1728       this_thr->th.ompt_thread_info.task_data = *OMPT_CUR_TASK_DATA(this_thr);
 1728       this_thr->th.ompt_thread_info.task_data = *OMPT_CUR_TASK_DATA(this_thr);
 1730     this_thr->th.ompt_thread_info.state = ompt_state_wait_barrier_implicit;
 1763     this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1763     this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1830         __kmp_forkjoin_frames_mode && this_thr->th.th_teams_microtask == NULL &&
 1835       int nproc = this_thr->th.th_team_nproc;
 1839         __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
 1843         __kmp_itt_frame_submit(gtid, this_thr->th.th_bar_min_time, cur_time, 1,
 1849           kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time;
 1852           this_thr->th.th_bar_arrive_time = 0;
 1854             delta += (cur_time - other_threads[i]->th.th_bar_arrive_time);
 1855             other_threads[i]->th.th_bar_arrive_time = 0;
 1857           __kmp_itt_metadata_imbalance(gtid, this_thr->th.th_frame_time,
 1860         __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
 1862         this_thr->th.th_frame_time = cur_time;
 1898   kmp_team_t *team = (tid == 0) ? this_thr->th.th_team : NULL;
 1957       this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1957       this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
 1988       this_thr->th.ompt_thread_info.state == ompt_state_wait_barrier_implicit) {
 1989     int ds_tid = this_thr->th.th_info.ds.ds_tid;
 1991                                  ? OMPT_CUR_TASK_DATA(this_thr)
 1992                                  : &(this_thr->th.ompt_thread_info.task_data);
 1993     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
 2020     this_thr->th.th_task_team = NULL;
 2039   team = (kmp_team_t *)TCR_PTR(this_thr->th.th_team);
 2079     if (this_thr->th.th_new_place == this_thr->th.th_current_place) {
 2079     if (this_thr->th.th_new_place == this_thr->th.th_current_place) {
 2097       this_thr->th.th_prev_num_threads = team->t.t_nproc;
 2098       this_thr->th.th_prev_level = team->t.t_level;
 2102     KMP_CHECK_UPDATE(this_thr->th.th_def_allocator, team->t.t_def_allocator);
projects/openmp/runtime/src/kmp_cancel.cpp
   49         kmp_team_t *this_team = this_thr->th.th_team;
   84         task = this_thr->th.th_current_task;
  157         kmp_team_t *this_team = this_thr->th.th_team;
  197         task = this_thr->th.th_current_task;
  246   kmp_team_t *this_team = this_thr->th.th_team;
  317       kmp_team_t *this_team = this_thr->th.th_team;
  323       task = this_thr->th.th_current_task;
projects/openmp/runtime/src/kmp_csupport.cpp
  149   return __kmp_entry_thread()->th.th_team->t.t_nproc;
  221   return __kmp_entry_thread()->th.th_root->r.r_active;
  294       kmp_team_t *parent_team = master_th->th.th_team;
  304       OMPT_STORE_RETURN_ADDRESS(gtid);
  391   this_thr->th.th_teams_microtask = microtask;
  392   this_thr->th.th_teams_level =
  393       this_thr->th.th_team->t.t_level; // AC: can be >0 on host
  396   kmp_team_t *parent_team = this_thr->th.th_team;
  402   OMPT_STORE_RETURN_ADDRESS(gtid);
  407   if (this_thr->th.th_teams_size.nteams == 0) {
  433   kmp_cg_root_t *tmp = this_thr->th.th_cg_roots;
  434   this_thr->th.th_cg_roots = tmp->up;
  445   this_thr->th.th_current_task->td_icvs.thread_limit =
  446       this_thr->th.th_cg_roots->cg_thread_limit;
  448   this_thr->th.th_teams_microtask = NULL;
  449   this_thr->th.th_teams_level = 0;
  450   *(kmp_int64 *)(&this_thr->th.th_teams_size) = 0L;
  484   OMPT_STORE_RETURN_ADDRESS(global_tid);
  516   serial_team = this_thr->th.th_serial_team;
  518   kmp_task_team_t *task_team = this_thr->th.th_task_team;
  533       this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
  534     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame = ompt_data_none;
  537           ompt_scope_end, NULL, OMPT_CUR_TASK_DATA(this_thr), 1,
  538           OMPT_CUR_TASK_INFO(this_thr)->thread_num, ompt_task_implicit);
  552     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
  560     copy_icvs(&serial_team->t.t_threads[0]->th.th_current_task->td_icvs, top);
  577   this_thr->th.th_def_allocator = serial_team->t.t_def_allocator; // restore
  592     this_thr->th.th_team = serial_team->t.t_parent;
  593     this_thr->th.th_info.ds.ds_tid = serial_team->t.t_master_tid;
  596     this_thr->th.th_team_nproc = serial_team->t.t_parent->t.t_nproc; /*  JPH */
  597     this_thr->th.th_team_master =
  599     this_thr->th.th_team_serialized = this_thr->th.th_team->t.t_serialized;
  599     this_thr->th.th_team_serialized = this_thr->th.th_team->t.t_serialized;
  602     this_thr->th.th_dispatch =
  603         &this_thr->th.th_team->t.t_dispatch[serial_team->t.t_master_tid];
  607     KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 0);
  608     this_thr->th.th_current_task->td_flags.executing = 1;
  612       this_thr->th.th_task_team =
  613           this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state];
  613           this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state];
  631     this_thr->th.ompt_thread_info.state =
  632         ((this_thr->th.th_team_serialized) ? ompt_state_work_serial
  738     OMPT_STORE_RETURN_ADDRESS(global_tid);
  741   __kmp_threads[global_tid]->th.th_ident = loc;
  784       kmp_team_t *team = this_thr->th.th_team;
  828   kmp_team_t *team = this_thr->th.th_team;
  878     OMPT_STORE_RETURN_ADDRESS(gtid);
  882     th->th.ompt_thread_info.wait_id = lck;
  883     th->th.ompt_thread_info.state = ompt_state_wait_ordered;
  895   if (th->th.th_dispatch->th_deo_fcn != 0)
  896     (*th->th.th_dispatch->th_deo_fcn)(&gtid, &cid, loc);
  903     th->th.ompt_thread_info.state = ompt_state_work_parallel;
  904     th->th.ompt_thread_info.wait_id = 0;
  939   if (th->th.th_dispatch->th_dxo_fcn != 0)
  940     (*th->th.th_dispatch->th_dxo_fcn)(&gtid, &cid, loc);
  945   OMPT_STORE_RETURN_ADDRESS(gtid);
 1164   OMPT_STORE_RETURN_ADDRESS(global_tid);
 1425       ti = __kmp_threads[global_tid]->th.ompt_thread_info;
 1464       ti = __kmp_threads[global_tid]->th.ompt_thread_info;
 1590   OMPT_STORE_RETURN_ADDRESS(global_tid);
 1630     OMPT_STORE_RETURN_ADDRESS(global_tid);
 1634   __kmp_threads[global_tid]->th.th_ident = loc;
 1694     OMPT_STORE_RETURN_ADDRESS(global_tid);
 1698   __kmp_threads[global_tid]->th.th_ident = loc;
 1750   kmp_team_t *team = this_thr->th.th_team;
 1797   kmp_team_t *team = this_thr->th.th_team;
 1865   set__dynamic(thread, flag ? TRUE : FALSE);
 1876   set__max_active_levels(thread, flag ? __kmp_dflt_max_active_levels : 1);
 2095     OMPT_STORE_RETURN_ADDRESS(gtid);
 2100   __kmp_threads[gtid]->th.th_ident = loc;
 2112     OMPT_STORE_RETURN_ADDRESS(gtid);
 2116   __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g.
 3330   if (th->th.th_teams_microtask) {
 3331     *team_p = team = th->th.th_team;
 3332     if (team->t.t_level == th->th.th_teams_level) {
 3336       th->th.th_info.ds.ds_tid = team->t.t_master_tid;
 3337       th->th.th_team = team->t.t_parent;
 3338       th->th.th_team_nproc = th->th.th_team->t.t_nproc;
 3338       th->th.th_team_nproc = th->th.th_team->t.t_nproc;
 3339       th->th.th_task_team = th->th.th_team->t.t_task_team[0];
 3339       th->th.th_task_team = th->th.th_team->t.t_task_team[0];
 3340       *task_state = th->th.th_task_state;
 3341       th->th.th_task_state = 0;
 3352   th->th.th_info.ds.ds_tid = 0;
 3353   th->th.th_team = team;
 3354   th->th.th_team_nproc = team->t.t_nproc;
 3355   th->th.th_task_team = team->t.t_task_team[task_state];
 3356   th->th.th_task_state = task_state;
 3430   __KMP_SET_REDUCTION_METHOD(global_tid, packed_reduction_method);
 3482       OMPT_STORE_RETURN_ADDRESS(global_tid);
 3486     __kmp_threads[global_tid]->th.th_ident = loc;
 3537   packed_reduction_method = __KMP_GET_REDUCTION_METHOD(global_tid);
 3630   __KMP_SET_REDUCTION_METHOD(global_tid, packed_reduction_method);
 3659       OMPT_STORE_RETURN_ADDRESS(global_tid);
 3663     __kmp_threads[global_tid]->th.th_ident =
 3722   packed_reduction_method = __KMP_GET_REDUCTION_METHOD(global_tid);
 3737       OMPT_STORE_RETURN_ADDRESS(global_tid);
 3741     __kmp_threads[global_tid]->th.th_ident = loc;
 3761       OMPT_STORE_RETURN_ADDRESS(global_tid);
 3765     __kmp_threads[global_tid]->th.th_ident = loc;
 3782       OMPT_STORE_RETURN_ADDRESS(global_tid);
 3787     __kmp_threads[global_tid]->th.th_ident = loc;
 3836   return thread->th.th_current_task->td_task_id;
 3851   parent_task = thread->th.th_current_task->td_parent;
 3872   kmp_team_t *team = th->th.th_team;
 3874   kmp_disp_t *pr_buf = th->th.th_dispatch;
 3992   kmp_team_t *team = th->th.th_team;
 4003   pr_buf = th->th.th_dispatch;
 4085   kmp_team_t *team = th->th.th_team;
 4097   pr_buf = th->th.th_dispatch;
 4137   kmp_team_t *team = th->th.th_team;
 4138   kmp_disp_t *pr_buf = th->th.th_dispatch;
 4146   if (num_done == th->th.th_team_nproc) {
projects/openmp/runtime/src/kmp_dispatch.cpp
   49     if (th->th.th_root->r.r_active &&
   50         (th->th.th_dispatch->th_dispatch_pr_current->pushed_ws != ct_none)) {
   65     if (th->th.th_dispatch->th_dispatch_pr_current->pushed_ws != ct_none) {
  132   team = th->th.th_team;
  138       KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL &&
  376         th->th.th_dispatch->th_steal_lock =
  378         __kmp_init_lock(th->th.th_dispatch->th_steal_lock);
  799   team = th->th.th_team;
  801   th->th.th_ident = loc;
  847       KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL &&
  852         th->th.th_dispatch->th_disp_buffer); /* top of the stack */
  857     my_buffer_index = th->th.th_dispatch->th_disp_index++;
  861         &th->th.th_dispatch
  873                                 chunk, (T)th->th.th_team_nproc,
  874                                 (T)th->th.th_info.ds.ds_tid);
  877       th->th.th_dispatch->th_deo_fcn = __kmp_dispatch_deo_error;
  878       th->th.th_dispatch->th_dxo_fcn = __kmp_dispatch_dxo_error;
  880       th->th.th_dispatch->th_deo_fcn = __kmp_dispatch_deo<UT>;
  881       th->th.th_dispatch->th_dxo_fcn = __kmp_dispatch_dxo<UT>;
  901     th->th.th_dispatch->th_dispatch_pr_current = (dispatch_private_info_t *)pr;
  902     th->th.th_dispatch->th_dispatch_sh_current =
 1002   if (!th->th.th_team->t.t_serialized) {
 1006             th->th.th_dispatch->th_dispatch_pr_current);
 1009             th->th.th_dispatch->th_dispatch_sh_current);
 1065   if (!th->th.th_team->t.t_serialized) {
 1069             th->th.th_dispatch->th_dispatch_pr_current);
 1072             th->th.th_dispatch->th_dispatch_sh_current);
 1154   kmp_team_t *team = th->th.th_team;
 1197       kmp_lock_t *lck = th->th.th_dispatch->th_steal_lock;
 1221                       ->th.th_dispatch->th_dispatch_pr_current);
 1229                     ->th.th_dispatch->th_dispatch_pr_current);
 1242           lck = other_threads[victimIdx]->th.th_dispatch->th_steal_lock;
 1270           __kmp_acquire_lock(th->th.th_dispatch->th_steal_lock, gtid);
 1273           __kmp_release_lock(th->th.th_dispatch->th_steal_lock, gtid);
 1321                       ->th.th_dispatch->th_dispatch_pr_current);
 1329                     ->th.th_dispatch->th_dispatch_pr_current);
 1902   kmp_team_t *team = th->th.th_team;
 1913         th->th.th_dispatch->th_disp_buffer); /* top of the stack */
 2031         th->th.th_dispatch->th_dispatch_pr_current);
 2034         th->th.th_dispatch->th_dispatch_sh_current);
 2043                                                 p_st, th->th.th_team_nproc,
 2044                                                 th->th.th_info.ds.ds_tid);
 2065       if ((ST)num_done == th->th.th_team_nproc - 1) {
 2072           for (i = 0; i < th->th.th_team_nproc; ++i) {
 2073             kmp_lock_t *lck = other_threads[i]->th.th_dispatch->th_steal_lock;
 2077             other_threads[i]->th.th_dispatch->th_steal_lock = NULL;
 2108       th->th.th_dispatch->th_deo_fcn = NULL;
 2109       th->th.th_dispatch->th_dxo_fcn = NULL;
 2110       th->th.th_dispatch->th_dispatch_sh_current = NULL;
 2111       th->th.th_dispatch->th_dispatch_pr_current = NULL;
 2189   team = th->th.th_team;
 2191   nteams = th->th.th_teams_size.nteams;
 2285   OMPT_STORE_RETURN_ADDRESS(gtid);
 2297   OMPT_STORE_RETURN_ADDRESS(gtid);
 2310   OMPT_STORE_RETURN_ADDRESS(gtid);
 2323   OMPT_STORE_RETURN_ADDRESS(gtid);
 2343   OMPT_STORE_RETURN_ADDRESS(gtid);
 2355   OMPT_STORE_RETURN_ADDRESS(gtid);
 2367   OMPT_STORE_RETURN_ADDRESS(gtid);
 2379   OMPT_STORE_RETURN_ADDRESS(gtid);
 2401   OMPT_STORE_RETURN_ADDRESS(gtid);
 2418   OMPT_STORE_RETURN_ADDRESS(gtid);
 2434   OMPT_STORE_RETURN_ADDRESS(gtid);
 2451   OMPT_STORE_RETURN_ADDRESS(gtid);
projects/openmp/runtime/src/kmp_dispatch.h
  330         th->th.th_dispatch->th_dispatch_pr_current);
  340   if (!th->th.th_team->t.t_serialized) {
  343             th->th.th_dispatch->th_dispatch_sh_current);
  348           th->th.th_dispatch->th_dispatch_pr_current);
  406         th->th.th_dispatch->th_dispatch_pr_current);
  412   if (!th->th.th_team->t.t_serialized) {
  415             th->th.th_dispatch->th_dispatch_sh_current);
  419           th->th.th_dispatch->th_dispatch_pr_current);
projects/openmp/runtime/src/kmp_error.cpp
  189   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  207   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  229   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  249   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  336   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  359   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  380   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  407   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  432   struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
projects/openmp/runtime/src/kmp_ftn_entry.h
  127   team = __kmp_threads[gtid]->th.th_team;
  136   else if (__kmp_zero_bt && !get__bt_set(team, tid)) {
  145     return get__blocktime(team, tid);
  346   return thread->th.th_current_task->td_icvs.nproc;
  354   OMPT_STORE_RETURN_ADDRESS(__kmp_entry_gtid());
  359   ompt_task_info_t *parent_task_info = OMPT_CUR_TASK_INFO(this_thr);
  604   int max_active_levels = get__max_active_levels(thread);
  607   set__max_active_levels(thread, (KMP_DEREF flag) ? max_active_levels : 1);
  618   return get__max_active_levels(thread) > 1;
  631   set__dynamic(thread, KMP_DEREF flag ? TRUE : FALSE);
  641   return get__dynamic(thread);
  650   if (th->th.th_teams_microtask) {
  655     return (th->th.th_team->t.t_active_level ? 1 : 0);
  657     return (th->th.th_root->r.r_in_parallel ? FTN_TRUE : FTN_FALSE);
  704   return __kmp_entry_thread()->th.th_team->t.t_active_level;
  713   return __kmp_entry_thread()->th.th_team->t.t_level;
  746   return thread->th.th_current_task->td_icvs.thread_limit;
  757   return __kmp_entry_thread()->th.th_current_task->td_flags.final;
  765   return get__proc_bind(__kmp_entry_thread());
  845   if (thread->th.th_current_place < 0)
  847   return thread->th.th_current_place;
  864   first_place = thread->th.th_first_place;
  865   last_place = thread->th.th_last_place;
  890   first_place = thread->th.th_first_place;
  891   last_place = thread->th.th_last_place;
  927   return __kmp_entry_thread()->th.th_current_task->td_icvs.default_device;
  935   __kmp_entry_thread()->th.th_current_task->td_icvs.default_device =
 1026   OMPT_STORE_RETURN_ADDRESS(gtid);
 1039   OMPT_STORE_RETURN_ADDRESS(gtid);
 1053   OMPT_STORE_RETURN_ADDRESS(gtid);
 1066   OMPT_STORE_RETURN_ADDRESS(gtid);
 1078   OMPT_STORE_RETURN_ADDRESS(gtid);
 1090   OMPT_STORE_RETURN_ADDRESS(gtid);
 1108   OMPT_STORE_RETURN_ADDRESS(gtid);
 1123   OMPT_STORE_RETURN_ADDRESS(gtid);
 1141   OMPT_STORE_RETURN_ADDRESS(gtid);
 1159   OMPT_STORE_RETURN_ADDRESS(gtid);
 1178   OMPT_STORE_RETURN_ADDRESS(gtid);
 1193   OMPT_STORE_RETURN_ADDRESS(gtid);
projects/openmp/runtime/src/kmp_gsupport.cpp
   38     OMPT_STORE_RETURN_ADDRESS(gtid);
   65   OMPT_STORE_RETURN_ADDRESS(gtid);
   75   OMPT_STORE_RETURN_ADDRESS(gtid);
  130   kmp_team_t *team = this_thr->th.th_team;
  186     OMPT_STORE_RETURN_ADDRESS(gtid);
  196     OMPT_STORE_RETURN_ADDRESS(gtid);
  222     OMPT_STORE_RETURN_ADDRESS(gtid);
  228     OMPT_STORE_RETURN_ADDRESS(gtid);
  244   OMPT_STORE_RETURN_ADDRESS(gtid);
  254   OMPT_STORE_RETURN_ADDRESS(gtid);
  296     enclosing_state = thr->th.ompt_thread_info.state;
  297     thr->th.ompt_thread_info.state = ompt_state_work_parallel;
  313     thr->th.ompt_thread_info.state = enclosing_state;
  341     enclosing_state = thr->th.ompt_thread_info.state;
  342     thr->th.ompt_thread_info.state = ompt_state_work_parallel;
  359     thr->th.ompt_thread_info.state = enclosing_state;
  372   kmp_team_t *team = thr->th.th_team;
  407     thr->th.ompt_thread_info.state = ompt_state_work_parallel;
  415   OMPT_STORE_RETURN_ADDRESS(gtid);
  431     OMPT_STORE_RETURN_ADDRESS(gtid);
  466   if (!thr->th.th_team->t.t_serialized) {
  468                                  thr->th.th_team);
  475       OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = ompt_data_none;
  772     OMPT_STORE_RETURN_ADDRESS(gtid);
 1170     OMPT_STORE_RETURN_ADDRESS(gtid);
 1171     current_task = __kmp_threads[gtid]->th.th_current_task;
 1202       oldInfo = thread->th.ompt_thread_info;
 1203       thread->th.ompt_thread_info.wait_id = 0;
 1204       thread->th.ompt_thread_info.state = ompt_state_work_parallel;
 1206       OMPT_STORE_RETURN_ADDRESS(gtid);
 1216       thread->th.ompt_thread_info = oldInfo;
 1236     OMPT_STORE_RETURN_ADDRESS(gtid);
 1286   OMPT_STORE_RETURN_ADDRESS(gtid);
 1313     OMPT_STORE_RETURN_ADDRESS(gtid);
 1352     OMPT_STORE_RETURN_ADDRESS(gtid);
 1388     OMPT_STORE_RETURN_ADDRESS(gtid);
 1413     OMPT_STORE_RETURN_ADDRESS(gtid);
 1435   OMPT_STORE_RETURN_ADDRESS(gtid);
 1454   OMPT_STORE_RETURN_ADDRESS(gtid);
 1524     OMPT_STORE_RETURN_ADDRESS(gtid);
 1539     OMPT_STORE_RETURN_ADDRESS(gtid);
 1754   kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
 1777   kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
projects/openmp/runtime/src/kmp_itt.inl
   90   ident_t *loc = __kmp_thread_from_gtid(gtid)->th.th_ident;
  443   ident_t *loc = __kmp_thread_from_gtid(gtid)->th.th_ident;
  491   kmp_team_t *team = thr->th.th_team;
  525         loc = thr->th.th_ident;
  634     kmp_taskdata_t *taskdata = thread->th.th_current_task;
  646   kmp_taskdata_t *taskdata = thread->th.th_current_task;
  868     ident_t *loc = thr->th.th_ident;
  874     thr->th.th_itt_mark_single = __itt_mark_create(name.str);
  879     __itt_mark(thr->th.th_itt_mark_single, NULL);
  888   __itt_mark_type mark = __kmp_thread_from_gtid(gtid)->th.th_itt_mark_single;
  912     ident_t const *loc = thr->th.th_ident;
  914     __itt_sync_create(thr->th.th_dispatch->th_dispatch_sh_current,
  926       __itt_sync_prepare(th->th.th_dispatch->th_dispatch_sh_current);
  938       __itt_sync_acquired(th->th.th_dispatch->th_dispatch_sh_current);
  950       __itt_sync_releasing(th->th.th_dispatch->th_dispatch_sh_current);
projects/openmp/runtime/src/kmp_lock.cpp
 1105   spin_here_p = &this_thr->th.th_spin_here;
 1207           this_thr->th.ompt_thread_info.state = prev_state;
 1208           this_thr->th.ompt_thread_info.wait_id = 0;
 1222       prev_state = this_thr->th.ompt_thread_info.state;
 1223       this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
 1224       this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
 1232         tail_thr->th.th_next_waiting = gtid + 1;
 1260       this_thr->th.ompt_thread_info.state = prev_state;
 1261       this_thr->th.ompt_thread_info.wait_id = 0;
 1438         waiting_id_p = &head_thr->th.th_next_waiting;
 1473       head_thr->th.th_next_waiting = 0;
 1480       head_thr->th.th_spin_here = FALSE;
projects/openmp/runtime/src/kmp_runtime.cpp
  152     stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize);
  153     stack_base = (char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase);
  183   if (!TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow)) {
  187   stack_base = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
  189     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr);
  190     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
  191             other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr -
  194     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
  200     char *stack_end = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
  201     char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize;
  203                                  other_threads[i]->th.th_info.ds.ds_stacksize,
  260     stack_end = (char *)th->th.th_info.ds.ds_stackbase;
  261     stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
  267           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
  269           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
  272           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
  274           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
  285       stack_end = (char *)th->th.th_info.ds.ds_stackbase;
  286       stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
  294             (char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase);
  296             other_stack_end - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize);
  304                 (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize),
  456   __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team,
  456   __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team,
  459   __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head,
  459   __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head,
  463       gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier],
  463       gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier],
  466   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier],
  467                                &thr->th.th_bar[bs_plain_barrier + 1],
  471   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier],
  472                                &thr->th.th_bar[bs_forkjoin_barrier + 1],
  477   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier],
  478                                &thr->th.th_bar[bs_reduction_barrier + 1],
  663     if (__kmp_threads[gtid]->th.th_root->r.r_active)
  689     if (__kmp_threads[gtid]->th.th_root->r.r_active)
  718   team = th->th.th_team;
  721   th->th.th_ident = id_ref;
  726     kmp_int32 old_this = th->th.th_local.this_construct;
  728     ++th->th.th_local.this_construct;
  734                                               th->th.th_local.this_construct);
  738         KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL &&
  787   if (!get__dynamic_2(parent_team, master_tid)) {
  853     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
  872   int cg_nthreads = this_thr->th.th_cg_roots->cg_nthreads;
  873   int max_cg_threads = this_thr->th.th_cg_roots->cg_thread_limit;
  884     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
  923       if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
  967   master_th->th.th_info.ds.ds_tid = 0;
  968   master_th->th.th_team = team;
  969   master_th->th.th_team_nproc = team->t.t_nproc;
  970   master_th->th.th_team_master = master_th;
  971   master_th->th.th_team_serialized = FALSE;
  972   master_th->th.th_dispatch = &team->t.t_dispatch[0];
  977   kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams;
  981     if (master_th->th.th_teams_microtask) { // are we inside the teams?
  982       if (master_th->th.th_teams_size.nteams > 1) {
  987           master_th->th.th_teams_level == team->t.t_level) {
 1030       thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
 1030       thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
 1031       thr->th.th_teams_level = master_th->th.th_teams_level;
 1031       thr->th.th_teams_level = master_th->th.th_teams_level;
 1032       thr->th.th_teams_size = master_th->th.th_teams_size;
 1032       thr->th.th_teams_size = master_th->th.th_teams_size;
 1035         kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
 1054       if (thr->th.th_prev_num_threads != team->t.t_nproc ||
 1055           thr->th.th_prev_level != team->t.t_level) {
 1148   serial_team = this_thr->th.th_serial_team;
 1163     this_thr->th.th_task_team = NULL;
 1166   kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind;
 1167   if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
 1172     proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind;
 1175   this_thr->th.th_set_proc_bind = proc_bind_default;
 1182       this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
 1185     parent_task_info = OMPT_CUR_TASK_INFO(this_thr);
 1199   if (this_thr->th.th_team != serial_team) {
 1201     int level = this_thr->th.th_team->t.t_level;
 1211           __kmp_allocate_team(this_thr->th.th_root, 1, 1,
 1215                               proc_bind, &this_thr->th.th_current_task->td_icvs,
 1222       new_team->t.t_parent = this_thr->th.th_team;
 1224       this_thr->th.th_serial_team = serial_team;
 1248     serial_team->t.t_parent = this_thr->th.th_team;
 1249     serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched;
 1250     this_thr->th.th_team = serial_team;
 1251     serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid;
 1255     KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 1);
 1256     this_thr->th.th_current_task->td_flags.executing = 0;
 1263     copy_icvs(&this_thr->th.th_current_task->td_icvs,
 1264               &this_thr->th.th_current_task->td_parent->td_icvs);
 1269       this_thr->th.th_current_task->td_icvs.nproc =
 1275       this_thr->th.th_current_task->td_icvs.proc_bind =
 1282     this_thr->th.th_info.ds.ds_tid = 0;
 1285     this_thr->th.th_team_nproc = 1;
 1286     this_thr->th.th_team_master = this_thr;
 1287     this_thr->th.th_team_serialized = 1;
 1291     serial_team->t.t_def_allocator = this_thr->th.th_def_allocator; // save
 1302     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
 1313     this_thr->th.th_team_serialized = serial_team->t.t_serialized;
 1316     int level = this_thr->th.th_team->t.t_level;
 1320       this_thr->th.th_current_task->td_icvs.nproc =
 1337     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
 1346     if (this_thr->th.th_prev_level != serial_team->t.t_level ||
 1347         this_thr->th.th_prev_num_threads != 1) {
 1350       this_thr->th.th_prev_level = serial_team->t.t_level;
 1351       this_thr->th.th_prev_num_threads = 1;
 1360       this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
 1361     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
 1371     implicit_task_data = OMPT_CUR_TASK_DATA(this_thr);
 1374           ompt_scope_begin, OMPT_CUR_TEAM_DATA(this_thr),
 1375           OMPT_CUR_TASK_DATA(this_thr), 1, __kmp_tid_from_gtid(global_tid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
 1376       OMPT_CUR_TASK_INFO(this_thr)
 1381     this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
 1382     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
 1440     parent_team = master_th->th.th_team;
 1441     master_tid = master_th->th.th_info.ds.ds_tid;
 1442     master_this_cons = master_th->th.th_local.this_construct;
 1443     root = master_th->th.th_root;
 1445     master_set_numthreads = master_th->th.th_set_nproc;
 1466     teams_level = master_th->th.th_teams_level;
 1468     p_hot_teams = &master_th->th.th_hot_teams;
 1483                             : get__nproc_2(parent_team, master_tid);
 1492       master_th->th.ompt_thread_info.state = ompt_state_overhead;
 1496     master_th->th.th_ident = loc;
 1498     if (master_th->th.th_teams_microtask && ap &&
 1515       if (parent_team == master_th->th.th_serial_team) {
 1535           implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
 1537             OMPT_CUR_TASK_INFO(master_th)
 1540                 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
 1542                 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
 1546           master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
 1569           OMPT_CUR_TASK_INFO(master_th)->frame.exit_frame = ompt_data_none;
 1573                 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
 1575           ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
 1579                 &ompt_parallel_data, OMPT_CUR_TASK_DATA(master_th),
 1583           master_th->th.ompt_thread_info.state = ompt_state_overhead;
 1594       parent_team->t.t_def_allocator = master_th->th.th_def_allocator; // save
 1607         if (master_set_numthreads < master_th->th.th_teams_size.nth) {
 1612             other_threads[i]->th.th_team_nproc = master_set_numthreads;
 1616         master_th->th.th_set_nproc = 0;
 1660         master_th->th.th_current_task->td_icvs.max_active_levels) {
 1668               : get__nproc_2(
 1676         if ((get__max_active_levels(master_th) == 1 &&
 1705     master_th->th.th_set_nproc = 0;
 1725         master_th->th.th_serial_team->t.t_ident = loc;
 1728           master_th->th.th_serial_team->t.t_level--;
 1745             task_info = OMPT_CUR_TASK_INFO(master_th);
 1748               OMPT_CUR_TASK_INFO(master_th)
 1751                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
 1753                   OMPT_CUR_TASK_INFO(master_th)->thread_num,
 1758             master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
 1782                   OMPT_CUR_TASK_INFO(master_th)->thread_num,
 1785             ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
 1793             master_th->th.ompt_thread_info.state = ompt_state_overhead;
 1799           team = master_th->th.th_team;
 1825             ompt_task_info_t *task_info = OMPT_CUR_TASK_INFO(master_th);
 1829                   OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_initial);
 1837             master_th->th.ompt_thread_info.state = ompt_state_overhead;
 1863             task_info = OMPT_CUR_TASK_INFO(master_th);
 1867             implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
 1870                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
 1873               OMPT_CUR_TASK_INFO(master_th)
 1878             master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
 1901                   OMPT_CUR_TASK_INFO(master_th)->thread_num,
 1905             ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
 1913             master_th->th.ompt_thread_info.state = ompt_state_overhead;
 1951     master_th->th.th_current_task->td_flags.executing = 0;
 1953     if (!master_th->th.th_teams_microtask || level > teams_level) {
 1959     int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc;
 1968     kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind;
 1971     if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
 1977         proc_bind = master_th->th.th_current_task->td_icvs.proc_bind;
 1985            master_th->th.th_current_task->td_icvs.proc_bind)) {
 1991     master_th->th.th_set_proc_bind = proc_bind_default;
 1995       copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs);
 2020                                  &master_th->th.th_current_task->td_icvs,
 2038     if (!master_th->th.th_teams_microtask || level > teams_level) {
 2050     kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
 2055     KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
 2071       if (active_level || master_th->th.th_task_team) {
 2074         if (master_th->th.th_task_state_top >=
 2075             master_th->th.th_task_state_stack_sz) { // increase size
 2076           kmp_uint32 new_size = 2 * master_th->th.th_task_state_stack_sz;
 2080           for (i = 0; i < master_th->th.th_task_state_stack_sz; ++i) {
 2081             new_stack[i] = master_th->th.th_task_state_memo_stack[i];
 2083           for (i = master_th->th.th_task_state_stack_sz; i < new_size;
 2087           old_stack = master_th->th.th_task_state_memo_stack;
 2088           master_th->th.th_task_state_memo_stack = new_stack;
 2089           master_th->th.th_task_state_stack_sz = new_size;
 2093         master_th->th
 2094             .th_task_state_memo_stack[master_th->th.th_task_state_top] =
 2095             master_th->th.th_task_state;
 2096         master_th->th.th_task_state_top++;
 2098         if (master_th->th.th_hot_teams &&
 2100             team == master_th->th.th_hot_teams[active_level].hot_team) {
 2102           master_th->th.th_task_state =
 2103               master_th->th
 2104                   .th_task_state_memo_stack[master_th->th.th_task_state_top];
 2107           master_th->th.th_task_state = 0;
 2156                          &master_th->th.th_current_task->td_icvs, loc);
 2159     master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
 2166         && !master_th->th.th_teams_microtask) { // not in teams construct
 2175         master_th->th.th_frame_time = tmp_time;
 2250     master_th->th.ompt_thread_info.state = ompt_state_overhead;
 2261   thread->th.ompt_thread_info.state =
 2298   root = master_th->th.th_root;
 2299   team = master_th->th.th_team;
 2302   master_th->th.th_ident = loc;
 2307     master_th->th.ompt_thread_info.state = ompt_state_overhead;
 2324     if (master_th->th.th_teams_microtask) {
 2327       int tlevel = master_th->th.th_teams_level;
 2357     master_th->th.th_task_state =
 2377       !master_th->th.th_teams_microtask) { /* not in teams construct */
 2378     master_th->th.th_ident = loc;
 2384                              master_th->th.th_frame_time, 0, loc,
 2385                              master_th->th.th_team_nproc, 1);
 2392   if (master_th->th.th_teams_microtask && !exit_teams &&
 2394       team->t.t_level == master_th->th.th_teams_level + 1) {
 2406             OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
 2410       ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
 2423     if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) {
 2423     if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) {
 2424       int old_num = master_th->th.th_team_nproc;
 2425       int new_num = master_th->th.th_teams_size.nth;
 2429         other_threads[i]->th.th_team_nproc = new_num;
 2435         kmp_balign_t *balign = other_threads[i]->th.th_bar;
 2445           other_threads[i]->th.th_task_state = master_th->th.th_task_state;
 2445           other_threads[i]->th.th_task_state = master_th->th.th_task_state;
 2461   master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
 2462   master_th->th.th_local.this_construct = team->t.t_master_this_cons;
 2464   master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
 2471   if (!master_th->th.th_teams_microtask ||
 2472       team->t.t_level > master_th->th.th_teams_level) {
 2488           OMPT_CUR_TASK_INFO(master_th)->thread_num, flags);
 2501   master_th->th.th_first_place = team->t.t_first_place;
 2502   master_th->th.th_last_place = team->t.t_last_place;
 2504   master_th->th.th_def_allocator = team->t.t_def_allocator;
 2519   master_th->th.th_team = parent_team;
 2520   master_th->th.th_team_nproc = parent_team->t.t_nproc;
 2521   master_th->th.th_team_master = parent_team->t.t_threads[0];
 2522   master_th->th.th_team_serialized = parent_team->t.t_serialized;
 2526       parent_team != master_th->th.th_serial_team &&
 2529                     master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL));
 2530     master_th->th.th_serial_team = parent_team;
 2534     if (master_th->th.th_task_state_top >
 2538       master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] =
 2538       master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] =
 2539           master_th->th.th_task_state;
 2540       --master_th->th.th_task_state_top; // pop
 2542       master_th->th.th_task_state =
 2543           master_th->th
 2544               .th_task_state_memo_stack[master_th->th.th_task_state_top];
 2547     master_th->th.th_task_team =
 2548         parent_team->t.t_task_team[master_th->th.th_task_state];
 2558   master_th->th.th_current_task->td_flags.executing = 1;
 2581   if (thread->th.th_team != thread->th.th_serial_team) {
 2581   if (thread->th.th_team != thread->th.th_serial_team) {
 2584   if (thread->th.th_team->t.t_serialized > 1) {
 2587     if (thread->th.th_team->t.t_control_stack_top == NULL) {
 2590       if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level !=
 2591           thread->th.th_team->t.t_serialized) {
 2600       copy_icvs(control, &thread->th.th_current_task->td_icvs);
 2602       control->serial_nesting_level = thread->th.th_team->t.t_serialized;
 2604       control->next = thread->th.th_team->t.t_control_stack_top;
 2605       thread->th.th_team->t.t_control_stack_top = control;
 2625   if (thread->th.th_current_task->td_icvs.nproc == new_nth)
 2630   set__nproc(thread, new_nth);
 2635   root = thread->th.th_root;
 2653         hot_team->t.t_threads[f]->th.th_task_team = NULL;
 2660     if (thread->th.th_hot_teams) {
 2662       thread->th.th_hot_teams[0].hot_team_nth = new_nth;
 2671       hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
 2720   set__max_active_levels(thread, max_active_levels);
 2736   return thread->th.th_current_task->td_icvs.max_active_levels;
 2777       thread->th.th_current_task->td_icvs.sched.r_sched_type = kmp_sch_static;
 2779       thread->th.th_current_task->td_icvs.sched.r_sched_type =
 2785     thread->th.th_current_task->td_icvs.sched.r_sched_type =
 2790       orig_kind, &(thread->th.th_current_task->td_icvs.sched.r_sched_type));
 2793     thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK;
 2795     thread->th.th_current_task->td_icvs.sched.chunk = chunk;
 2809   th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type;
 2845   *chunk = thread->th.th_current_task->td_icvs.sched.chunk;
 2863   team = thr->th.th_team;
 2868   if (thr->th.th_teams_microtask) {
 2870     int tlevel = thr->th.th_teams_level; // the level of the teams construct
 2921   team = thr->th.th_team;
 2926   if (thr->th.th_teams_microtask) {
 2928     int tlevel = thr->th.th_teams_level; // the level of the teams construct
 3135   copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
 3443   unsigned x = thread->th.th_x;
 3446   thread->th.th_x = x * thread->th.th_a + 1;
 3446   thread->th.th_x = x * thread->th.th_a + 1;
 3456   unsigned seed = thread->th.th_info.ds.ds_tid;
 3458   thread->th.th_a =
 3460   thread->th.th_x = (seed + 1) * thread->th.th_a + 1;
 3460   thread->th.th_x = (seed + 1) * thread->th.th_a + 1;
 3691     root_thread->th.th_info.ds.ds_gtid = gtid;
 3693     root_thread->th.ompt_thread_info.thread_data = ompt_data_none;
 3695     root_thread->th.th_root = root;
 3697       root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid);
 3711   if (!root_thread->th.th_serial_team) {
 3714     root_thread->th.th_serial_team = __kmp_allocate_team(
 3721   KMP_ASSERT(root_thread->th.th_serial_team);
 3730   root_thread->th.th_serial_team->t.t_threads[0] = root_thread;
 3732   root_thread->th.th_serial_team->t.t_serialized = 0;
 3760       root_thread->th.th_bar[b].bb.b_arrived = KMP_INIT_BARRIER_STATE;
 3770   root_thread->th.th_current_place = KMP_PLACE_UNDEFINED;
 3771   root_thread->th.th_new_place = KMP_PLACE_UNDEFINED;
 3772   root_thread->th.th_first_place = KMP_PLACE_UNDEFINED;
 3773   root_thread->th.th_last_place = KMP_PLACE_UNDEFINED;
 3778   root_thread->th.th_def_allocator = __kmp_def_allocator;
 3779   root_thread->th.th_prev_level = 0;
 3780   root_thread->th.th_prev_num_threads = 1;
 3790   root_thread->th.th_cg_roots = tmp;
 3827   kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams;
 3839       if (i > 0 && th->th.th_hot_teams) {
 3840         __kmp_free(th->th.th_hot_teams);
 3841         th->th.th_hot_teams = NULL;
 3873       if (th->th.th_hot_teams) {
 3874         __kmp_free(th->th.th_hot_teams);
 3875         th->th.th_hot_teams = NULL;
 3909         &(root->r.r_uber_thread->th.ompt_thread_info.thread_data));
 3915   i = root->r.r_uber_thread->th.th_cg_roots->cg_nthreads--;
 3925     __kmp_free(root->r.r_uber_thread->th.th_cg_roots);
 3926     root->r.r_uber_thread->th.th_cg_roots = NULL;
 3956   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
 3962   kmp_team_t *team = thread->th.th_team;
 3963   kmp_task_team_t *task_team = thread->th.th_task_team;
 3969     thread->th.ompt_thread_info.state = ompt_state_undefined;
 4046   TCW_SYNC_PTR(this_thr->th.th_team, team);
 4048   this_thr->th.th_info.ds.ds_tid = tid;
 4049   this_thr->th.th_set_nproc = 0;
 4053     this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
 4055     this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
 4056   this_thr->th.th_set_proc_bind = proc_bind_default;
 4058   this_thr->th.th_new_place = this_thr->th.th_current_place;
 4058   this_thr->th.th_new_place = this_thr->th.th_current_place;
 4060   this_thr->th.th_root = master->th.th_root;
 4060   this_thr->th.th_root = master->th.th_root;
 4063   this_thr->th.th_team_nproc = team->t.t_nproc;
 4064   this_thr->th.th_team_master = master;
 4065   this_thr->th.th_team_serialized = team->t.t_serialized;
 4066   TCW_PTR(this_thr->th.th_sleep_loc, NULL);
 4073   __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr,
 4073   __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr,
 4082   this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
 4084   this_thr->th.th_local.this_construct = 0;
 4086   if (!this_thr->th.th_pri_common) {
 4087     this_thr->th.th_pri_common =
 4091           gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1,
 4091           gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1,
 4094     this_thr->th.th_pri_head = NULL;
 4098       this_thr->th.th_cg_roots != master->th.th_cg_roots) { // CG root not set
 4098       this_thr->th.th_cg_roots != master->th.th_cg_roots) { // CG root not set
 4101     kmp_cg_root_t *tmp = this_thr->th.th_cg_roots;
 4112     this_thr->th.th_cg_roots = master->th.th_cg_roots;
 4112     this_thr->th.th_cg_roots = master->th.th_cg_roots;
 4114     this_thr->th.th_cg_roots->cg_nthreads++;
 4120     this_thr->th.th_current_task->td_icvs.thread_limit =
 4121         this_thr->th.th_cg_roots->cg_thread_limit;
 4126     volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch;
 4164   this_thr->th.th_next_pool = NULL;
 4166   if (!this_thr->th.th_task_state_memo_stack) {
 4168     this_thr->th.th_task_state_memo_stack =
 4170     this_thr->th.th_task_state_top = 0;
 4171     this_thr->th.th_task_state_stack_sz = 4;
 4172     for (i = 0; i < this_thr->th.th_task_state_stack_sz;
 4174       this_thr->th.th_task_state_memo_stack[i] = 0;
 4204     __kmp_thread_pool = (volatile kmp_info_t *)new_thr->th.th_next_pool;
 4208     TCW_4(new_thr->th.th_in_pool, FALSE);
 4211     if (new_thr->th.th_active_in_pool == TRUE) {
 4214       new_thr->th.th_active_in_pool = FALSE;
 4220     KMP_ASSERT(!new_thr->th.th_team);
 4225                           new_thr->th.th_info.ds.ds_gtid);
 4230     new_thr->th.th_task_state = 0;
 4231     new_thr->th.th_task_state_top = 0;
 4232     new_thr->th.th_task_state_stack_sz = 4;
 4312     new_thr->th.th_serial_team = serial_team =
 4348   kmp_balign_t *balign = new_thr->th.th_bar;
 4356   new_thr->th.th_spin_here = FALSE;
 4357   new_thr->th.th_next_waiting = 0;
 4359   new_thr->th.th_blocking = false;
 4363   new_thr->th.th_current_place = KMP_PLACE_UNDEFINED;
 4364   new_thr->th.th_new_place = KMP_PLACE_UNDEFINED;
 4365   new_thr->th.th_first_place = KMP_PLACE_UNDEFINED;
 4366   new_thr->th.th_last_place = KMP_PLACE_UNDEFINED;
 4368   new_thr->th.th_def_allocator = __kmp_def_allocator;
 4369   new_thr->th.th_prev_level = 0;
 4370   new_thr->th.th_prev_num_threads = 1;
 4372   TCW_4(new_thr->th.th_in_pool, FALSE);
 4373   new_thr->th.th_active_in_pool = FALSE;
 4374   TCW_4(new_thr->th.th_active, TRUE);
 4526   int first_place = master_th->th.th_first_place;
 4527   int last_place = master_th->th.th_last_place;
 4528   int masters_place = master_th->th.th_current_place;
 4551       th->th.th_first_place = first_place;
 4552       th->th.th_last_place = last_place;
 4553       th->th.th_new_place = masters_place;
 4554       if (__kmp_display_affinity && masters_place != th->th.th_current_place &&
 4588         th->th.th_first_place = first_place;
 4589         th->th.th_last_place = last_place;
 4590         th->th.th_new_place = place;
 4591         if (__kmp_display_affinity && place != th->th.th_current_place &&
 4613         th->th.th_first_place = first_place;
 4614         th->th.th_last_place = last_place;
 4615         th->th.th_new_place = place;
 4616         if (__kmp_display_affinity && place != th->th.th_current_place &&
 4686           th->th.th_first_place = place;
 4687           th->th.th_new_place = place;
 4688           if (__kmp_display_affinity && place != th->th.th_current_place &&
 4714           th->th.th_last_place = place;
 4780             th->th.th_first_place = first;
 4781             th->th.th_new_place = place;
 4782             th->th.th_last_place = last;
 4783             if (__kmp_display_affinity && place != th->th.th_current_place &&
 4812         th->th.th_first_place = place;
 4813         th->th.th_last_place = place;
 4814         th->th.th_new_place = place;
 4815         if (__kmp_display_affinity && place != th->th.th_current_place &&
 4890     team = master->th.th_team;
 4892     if (master->th.th_teams_microtask) { // in teams construct?
 4893       if (master->th.th_teams_size.nteams > 1 &&
 4897               master->th.th_teams_level <
 4903     hot_teams = master->th.th_hot_teams;
 4948                               root->r.r_uber_thread->th.th_ident);
 4991             team->t.t_threads[f]->th.th_task_team = NULL;
 5003           kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
 5017                               root->r.r_uber_thread->th.th_ident);
 5021         team->t.t_threads[f]->th.th_team_nproc = new_nproc;
 5066         kmp_balign_t *balign = other_threads[f]->th.th_bar;
 5116             kmp_balign_t *balign = new_worker->th.th_bar;
 5142                             root->r.r_uber_thread->th.th_ident);
 5157           team->t.t_threads[f]->th.th_task_state =
 5158               team->t.t_threads[0]->th.th_task_state_memo_stack[level];
 5161             team->t.t_threads[0]->th.th_task_state; // copy master's state
 5163           team->t.t_threads[f]->th.th_task_state = old_state;
 5181     if (master->th.th_teams_microtask) {
 5185         thr->th.th_teams_microtask = master->th.th_teams_microtask;
 5185         thr->th.th_teams_microtask = master->th.th_teams_microtask;
 5186         thr->th.th_teams_level = master->th.th_teams_level;
 5186         thr->th.th_teams_level = master->th.th_teams_level;
 5187         thr->th.th_teams_size = master->th.th_teams_size;
 5187         thr->th.th_teams_size = master->th.th_teams_size;
 5197         kmp_balign_t *balign = thr->th.th_bar;
 5373     if (master->th.th_teams_microtask) { // in teams construct?
 5374       if (master->th.th_teams_size.nteams > 1) {
 5379           master->th.th_teams_level == team->t.t_level) {
 5384     hot_teams = master->th.th_hot_teams;
 5407         volatile kmp_uint32 *state = &th->th.th_reap_state;
 5418           kmp_flag_64 fl(&th->th.th_bar[bs_forkjoin_barrier].bb.b_go, th);
 5432             team->t.t_threads[f]->th.th_task_team = NULL;
 5466     if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) {
 5473         kmp_cg_root_t *tmp = thr->th.th_cg_roots;
 5474         thr->th.th_cg_roots = tmp->up;
 5483         if (thr->th.th_cg_roots)
 5484           thr->th.th_current_task->td_icvs.thread_limit =
 5485               thr->th.th_cg_roots->cg_thread_limit;
 5552   kmp_balign_t *balign = this_th->th.th_bar;
 5559   this_th->th.th_task_state = 0;
 5560   this_th->th.th_reap_state = KMP_SAFE_TO_REAP;
 5563   TCW_PTR(this_th->th.th_team, NULL);
 5564   TCW_PTR(this_th->th.th_root, NULL);
 5565   TCW_PTR(this_th->th.th_dispatch, NULL); /* NOT NEEDED */
 5567   while (this_th->th.th_cg_roots) {
 5568     this_th->th.th_cg_roots->cg_nthreads--;
 5574     kmp_cg_root_t *tmp = this_th->th.th_cg_roots;
 5579       this_th->th.th_cg_roots = tmp->up;
 5585       this_th->th.th_cg_roots = NULL;
 5596   this_th->th.th_current_task = NULL;
 5600   gtid = this_th->th.th_info.ds.ds_gtid;
 5603     if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) {
 5614     scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool);
 5618   for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid);
 5619        scan = &((*scan)->th.th_next_pool))
 5624   TCW_PTR(this_th->th.th_next_pool, *scan);
 5629   TCW_4(this_th->th.th_in_pool, TRUE);
 5632   if (this_th->th.th_active == TRUE) {
 5634     this_th->th.th_active_in_pool = TRUE;
 5662   int gtid = this_thr->th.th_info.ds.ds_gtid;
 5670     this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid); // ATT: Memory leak?
 5676     thread_data = &(this_thr->th.ompt_thread_info.thread_data);
 5679     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
 5680     this_thr->th.ompt_thread_info.wait_id = 0;
 5681     this_thr->th.ompt_thread_info.idle_frame = OMPT_GET_FRAME_ADDRESS(0);
 5682     this_thr->th.ompt_thread_info.parallel_flags = 0;
 5687     this_thr->th.ompt_thread_info.state = ompt_state_idle;
 5704       this_thr->th.ompt_thread_info.state = ompt_state_overhead;
 5708     pteam = &this_thr->th.th_team;
 5724           this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
 5741         this_thr->th.ompt_thread_info.state = ompt_state_overhead;
 5756   this_thr->th.th_task_team = NULL;
 5854   gtid = thread->th.th_info.ds.ds_gtid;
 5865       kmp_flag_64 flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
 5882     if (thread->th.th_active_in_pool) {
 5883       thread->th.th_active_in_pool = FALSE;
 5917     if (thread->th.th_cons) {
 5918       __kmp_free_cons_stack(thread->th.th_cons);
 5919       thread->th.th_cons = NULL;
 5923   if (thread->th.th_pri_common != NULL) {
 5924     __kmp_free(thread->th.th_pri_common);
 5925     thread->th.th_pri_common = NULL;
 5928   if (thread->th.th_task_state_memo_stack != NULL) {
 5929     __kmp_free(thread->th.th_task_state_memo_stack);
 5930     thread->th.th_task_state_memo_stack = NULL;
 5934   if (thread->th.th_local.bget_data != NULL) {
 5940   if (thread->th.th_affin_mask != NULL) {
 5941     KMP_CPU_FREE(thread->th.th_affin_mask);
 5942     thread->th.th_affin_mask = NULL;
 5953   __kmp_reap_team(thread->th.th_serial_team);
 5954   thread->th.th_serial_team = NULL;
 6023       __kmp_thread_pool = thread->th.th_next_pool;
 6026       thread->th.th_next_pool = NULL;
 6027       thread->th.th_in_pool = FALSE;
 6051       while (thr && KMP_ATOMIC_LD_ACQ(&thr->th.th_blocking))
 6255         __kmp_threads[gtid]->th.th_task_team = NULL;
 6850       if (thread->th.th_current_task->td_icvs.nproc != 0)
 6853       set__nproc(__kmp_threads[i], __kmp_dflt_team_nth);
 6971   this_thr->th.th_local.this_construct = 0;
 6975   dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch);
 7001   kmp_team_t *team = this_thr->th.th_team;
 7037     OMPT_CUR_TASK_INFO(this_thr)->thread_num = __kmp_tid_from_gtid(gtid);
 7060    this_thr->th.ompt_thread_info.parallel_flags |= ompt_parallel_team;
 7085   kmp_team_t *team = thr->th.th_team;
 7087   thr->th.th_set_nproc = thr->th.th_teams_size.nth;
 7087   thr->th.th_set_nproc = thr->th.th_teams_size.nth;
 7097   tmp->cg_thread_limit = thr->th.th_current_task->td_icvs.thread_limit;
 7102   tmp->up = thr->th.th_cg_roots;
 7103   thr->th.th_cg_roots = tmp;
 7111                   (microtask_t)thr->th.th_teams_microtask, // "wrapped" task
 7117   if (thr->th.th_team_nproc < thr->th.th_teams_size.nth)
 7117   if (thr->th.th_team_nproc < thr->th.th_teams_size.nth)
 7118     thr->th.th_teams_size.nth = thr->th.th_team_nproc;
 7118     thr->th.th_teams_size.nth = thr->th.th_team_nproc;
 7132   kmp_team_t *team = this_thr->th.th_team;
 7148     OMPT_CUR_TASK_INFO(this_thr)->thread_num = tid;
 7153   this_thr->th.ompt_thread_info.parallel_flags |= ompt_parallel_league;
 7168     thr->th.th_set_nproc = num_threads;
 7192   thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams;
 7192   thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams;
 7207     if (num_threads > thr->th.th_current_task->td_icvs.thread_limit) {
 7208       num_threads = thr->th.th_current_task->td_icvs.thread_limit;
 7216     thr->th.th_current_task->td_icvs.thread_limit = num_threads;
 7232   thr->th.th_teams_size.nth = num_threads;
 7238   thr->th.th_set_proc_bind = proc_bind;
 7273   KMP_ASSERT(this_thr->th.th_team == team);
 7314       this_thr->th.ompt_thread_info.state == ompt_state_wait_barrier_implicit) {
 7315     int ds_tid = this_thr->th.th_info.ds.ds_tid;
 7316     ompt_data_t *task_data = OMPT_CUR_TASK_DATA(this_thr);
 7317     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
 7323       codeptr = OMPT_CUR_TEAM_INFO(this_thr)->master_return_address;
 7344   KMP_ASSERT(this_thr->th.th_team == team);
 7369     if (hot_team->t.t_threads[i]->th.th_active) {
 7572   root = __kmp_threads[gtid]->th.th_root;
 7600   root = thread->th.th_root;
 7612     thread->th.th_set_nproc = 0;
 7613     set__nproc(thread, 1);
 7616     thread->th.th_set_nproc = 0;
 7617     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
 7621     thread->th.th_set_nproc = 0;
 7622     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
 7689   if (thr->th.th_teams_microtask) {
 7690     kmp_team_t *team = thr->th.th_team;
 7691     int tlevel = thr->th.th_teams_level; // the level of the teams construct
 7906     rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_level);
 7924     rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_nproc);
 7928         __kmp_get_ancestor_thread_num(gtid, th->th.th_team->t.t_level - 1);
 7935     __kmp_affinity_str_buf_mask(&buf, th->th.th_affin_mask);
 8032   set__blocktime_team(thread->th.th_team, tid, blocktime);
 8032   set__blocktime_team(thread->th.th_team, tid, blocktime);
 8033   set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
 8033   set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
 8046   set__bt_set_team(thread->th.th_team, tid, bt_set);
 8046   set__bt_set_team(thread->th.th_team, tid, bt_set);
 8047   set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
 8047   set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
 8104   team_size = __kmp_get_team_num_threads(global_tid);
 8229   return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8);
 8251         kmp_flag_64 fl(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
projects/openmp/runtime/src/kmp_sched.cpp
  189     tid = th->th.th_team->t.t_master_tid;
  190     team = th->th.th_team->t.t_parent;
  193     team = th->th.th_team;
  381       __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL &&
  477   nth = th->th.th_team_nproc;
  478   team = th->th.th_team;
  480   nteams = th->th.th_teams_size.nteams;
  719   team = th->th.th_team;
  721   nteams = th->th.th_teams_size.nteams;
projects/openmp/runtime/src/kmp_taskdeps.cpp
  516   kmp_taskdata_t *current_task = thread->th.th_current_task;
  520     OMPT_STORE_RETURN_ADDRESS(gtid);
  591   kmp_task_team_t *task_team = thread->th.th_task_team;
  669   kmp_taskdata_t *current_task = thread->th.th_current_task;
  677   ignore = ignore && thread->th.th_task_team != NULL &&
  678            thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
projects/openmp/runtime/src/kmp_tasking.cpp
  327   kmp_task_team_t *task_team = thread->th.th_task_team;
  376                               thread->th.th_current_task)) {
  396                                 thread->th.th_current_task)) {
  441   this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
  441   this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
  469     if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
  471           this_thr->th.th_current_task;
  472       this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
  477     this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
  516   thread->th.th_current_task = taskdata;
  561   if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
  563     __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
  604   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
  669     OMPT_STORE_RETURN_ADDRESS(gtid);
  808       thread->th.th_task_team; // might be NULL for serial teams...
  840       thread->th.th_current_task = resumed_task; // restore current_task
  943   thread->th.th_current_task = resumed_task;
 1097   kmp_taskdata_t *task = thread->th.th_current_task;
 1123   kmp_taskdata_t *task = thread->th.th_current_task;
 1161   kmp_team_t *team = thread->th.th_team;
 1162   kmp_taskdata_t *parent_task = thread->th.th_current_task;
 1182     KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
 1195     if ((thread->th.th_task_team) == NULL) {
 1205       thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
 1205       thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
 1207     kmp_task_team_t *task_team = thread->th.th_task_team;
 1215       kmp_int32 tid = thread->th.th_info.ds.ds_tid;
 1291   taskdata->td_task_team = thread->th.th_task_team;
 1449     oldInfo = thread->th.ompt_thread_info;
 1450     thread->th.ompt_thread_info.wait_id = 0;
 1451     thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
 1451     thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
 1469     kmp_team_t *this_team = thread->th.th_team;
 1536       if (thread->th.th_bar_arrive_time)
 1556       thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
 1568       thread->th.ompt_thread_info = oldInfo;
 1622     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
 1662     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
 1699       OMPT_STORE_RETURN_ADDRESS(gtid);
 1805     taskdata = thread->th.th_current_task;
 1813       my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
 1849     must_wait = must_wait || (thread->th.th_task_team != NULL &&
 1850                               thread->th.th_task_team->tt.tt_found_proxy_tasks);
 1911     OMPT_STORE_RETURN_ADDRESS(gtid);
 1933     taskdata = thread->th.th_current_task;
 1950       kmp_task_team_t *task_team = thread->th.th_task_team;
 1955             thread->th.ompt_thread_info.ompt_task_yielded = 1;
 1963             thread->th.ompt_thread_info.ompt_task_yielded = 0;
 2086   kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
 2087   kmp_int32 nth = thread->th.th_team_nproc;
 2201   kmp_int32 nth = thread->th.th_team_nproc;
 2207     tg = thread->th.th_current_task->td_taskgroup;
 2211   kmp_int32 tid = thread->th.th_info.ds.ds_tid;
 2257   kmp_int32 nth = th->th.th_team_nproc;
 2306   kmp_int32 nth = thr->th.th_team_nproc;
 2312     return (void *)thr->th.th_current_task->td_taskgroup;
 2314   kmp_team_t *team = thr->th.th_team;
 2338     tg = thr->th.th_current_task->td_taskgroup;
 2401   kmp_taskdata_t *taskdata = thread->th.th_current_task;
 2417     kmp_team_t *team = thread->th.th_team;
 2433   kmp_taskdata_t *taskdata = thread->th.th_current_task;
 2443     team = thread->th.th_team;
 2479         (thread->th.th_task_team != NULL &&
 2480          thread->th.th_task_team->tt.tt_found_proxy_tasks)) {
 2509     kmp_team_t *t = thread->th.th_team;
 2517       if (cnt == thread->th.th_team_nproc - 1) {
 2536       if (cnt == thread->th.th_team_nproc - 1) {
 2616                              thread->th.th_current_task)) {
 2661   victim_tid = victim_thr->th.th_info.ds.ds_tid;
 2693   current = __kmp_threads[gtid]->th.th_current_task;
 2787   kmp_task_team_t *task_team = thread->th.th_task_team;
 2791   kmp_taskdata_t *current_task = thread->th.th_current_task;
 2794                       tid = thread->th.th_info.ds.ds_tid;
 2806   thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
 2857                 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
 2861                                         other_thread->th.th_sleep_loc);
 2922       if (thread->th.th_task_team == NULL) {
 2971     if (thread->th.th_task_team == NULL) {
 3057       if (i == this_thr->th.th_info.ds.ds_tid) {
 3066       if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
 3198     kmp_team_t *team = thread->th.th_team;
 3406          thread = thread->th.th_next_pool) {
 3410       if (TCR_PTR(thread->th.th_task_team) == NULL) {
 3432         if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
 3460   if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
 3462     team->t.t_task_team[this_thr->th.th_task_state] =
 3480     int other_team = 1 - this_thr->th.th_task_state;
 3520   this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
 3520   this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
 3523   TCW_PTR(this_thr->th.th_task_team,
 3524           team->t.t_task_team[this_thr->th.th_task_state]);
 3542   kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
 3574     TCW_PTR(this_thr->th.th_task_team, NULL);
 3586       &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
 3870         if (thread->th.th_team == team) {
 3892   kmp_taskdata_t *parent_task = thread->th.th_current_task;
 3947     __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
 4097   kmp_taskdata_t *current_task = thread->th.th_current_task;
 4390     OMPT_STORE_RETURN_ADDRESS(gtid);
 4406   kmp_taskdata_t *current_task = thread->th.th_current_task;
 4442         KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
 4448     grainsize = thread->th.th_team_nproc * 10;
 4527     OMPT_STORE_RETURN_ADDRESS(gtid);
projects/openmp/runtime/src/kmp_threadprivate.cpp
  185                       __kmp_threads[gtid]->th.th_pri_common, gtid,
  204                       __kmp_threads[gtid]->th.th_pri_common, gtid,
  243       for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
  427   tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
  449   tn->link = __kmp_threads[gtid]->th.th_pri_head;
  450   __kmp_threads[gtid]->th.th_pri_head = tn;
  552   if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
  568         __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
projects/openmp/runtime/src/kmp_wait_release.h
  125   int ds_tid = this_thr->th.th_info.ds.ds_tid;
  127     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
  143         int flags = this_thr->th.ompt_thread_info.parallel_flags;
  150       this_thr->th.ompt_thread_info.state = ompt_state_idle;
  152       this_thr->th.ompt_thread_info.state = ompt_state_overhead;
  187   th_gtid = this_thr->th.th_info.ds.ds_gtid;
  189     kmp_team_t *team = this_thr->th.th_team;
  195     KMP_ATOMIC_ST_REL(&this_thr->th.th_blocking, true);
  257     ompt_entry_state = this_thr->th.ompt_thread_info.state;
  259         KMP_MASTER_TID(this_thr->th.th_info.ds.ds_tid)) {
  261           this_thr->th.th_team->t.ompt_serialized_team_info;
  265         tId = OMPT_CUR_TASK_DATA(this_thr);
  268       tId = &(this_thr->th.ompt_thread_info.task_data);
  271                        this_thr->th.th_task_team == NULL)) {
  314       hibernate_goal = KMP_NOW() + this_thr->th.th_team_bt_intervals;
  326       task_team = this_thr->th.th_task_team;
  341             this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
  349           this_thr->th.th_task_team = NULL;
  350           this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
  353         this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
  379       kmp_team_t *team = this_thr->th.th_team;
  414       KMP_ATOMIC_ST_REL(&this_thr->th.th_blocking, false);
  419       KMP_ATOMIC_ST_REL(&this_thr->th.th_blocking, true);
  427                this_thr->th.th_reap_state == KMP_SAFE_TO_REAP) {
  428       this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
  434   ompt_state_t ompt_exit_state = this_thr->th.ompt_thread_info.state;
  439       ompt_exit_state = this_thr->th.ompt_thread_info.state;
  443       this_thr->th.ompt_thread_info.state = ompt_state_overhead;
  458     KMP_ATOMIC_ST_REL(&this_thr->th.th_blocking, false);
  462     kmp_team_t *team = this_thr->th.th_team;
  467         kmp_task_team_t *task_team = this_thr->th.th_task_team;
  503           int wait_gtid = waiter->th.th_info.ds.ds_gtid;
  856     if (this_thr->th.th_bar[bt].bb.wait_flag == KMP_BARRIER_SWITCH_TO_OWN_FLAG)
  861       this_thr->th.th_bar[bt].bb.wait_flag = KMP_BARRIER_SWITCHING;
  862       kmp_flag_64 flag(&this_thr->th.th_bar[bt].bb.b_go,
projects/openmp/runtime/src/ompt-general.cpp
  600   if (thread == NULL || thread->th.th_current_place < 0)
  602   return thread->th.th_current_place;
  623   first_place = thread->th.th_first_place;
  624   last_place = thread->th.th_last_place;
projects/openmp/runtime/src/ompt-specific.cpp
   55     kmp_team *team = thr->th.th_team;
  108     kmp_taskdata_t *taskdata = thr->th.th_current_task;
  148     kmp_taskdata_t *taskdata = thr->th.th_current_task;
  201     return &(thread->th.ompt_thread_info.thread_data);
  214     ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t)(uintptr_t)variable;
  222       *omp_wait_id = ti->th.ompt_thread_info.wait_id;
  223     return ti->th.ompt_thread_info.state;
  275       thr->th.th_team->t.t_serialized >
  285     link_lwt->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
  286     *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
  289     link_lwt->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
  290     *OMPT_CUR_TASK_INFO(thr) = tmp_task;
  294         thr->th.th_team->t.ompt_serialized_team_info;
  296     thr->th.th_team->t.ompt_serialized_team_info = link_lwt;
  300     *OMPT_CUR_TEAM_INFO(thr) = lwt->ompt_team_info;
  301     *OMPT_CUR_TASK_INFO(thr) = lwt->ompt_task_info;
  306   ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info;
  308     thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent;
  311     lwtask->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
  312     *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
  315     lwtask->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
  316     *OMPT_CUR_TASK_INFO(thr) = tmp_task;
  348     kmp_taskdata_t *taskdata = thr->th.th_current_task;
  351     kmp_team *team = thr->th.th_team, *prev_team = NULL;
  440   kmp_taskdata_t *taskdata = thr->th.th_current_task;
  494   if (!thr->th.th_ident)
  497   kmp_int32 flags = thr->th.th_ident->flags;
projects/openmp/runtime/src/ompt-specific.h
   72   void *return_address = thr->th.ompt_thread_info.return_address;
   73   thr->th.ompt_thread_info.return_address = NULL;
   98   thread->th.ompt_thread_info.state = state;
projects/openmp/runtime/src/z_Linux_util.cpp
  459   status = pthread_cancel(th->th.th_info.ds.ds_thread);
  508     TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
  509     TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
  510     TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
  516   TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
  517   TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
  518   TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
  534   gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
  778   th->th.th_info.ds.ds_gtid = gtid;
  801     th->th.th_info.ds.ds_thread = pthread_self();
  884   th->th.th_info.ds.ds_thread = handle;
 1105   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
 1387   int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
 1394       !__kmp_atomic_compare_store(&th->th.th_suspend_init_count, old_value,
 1396     while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
 1402     status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
 1405     status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
 1408     KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
 1414   if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
 1419     status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
 1423     status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
 1427     --th->th.th_suspend_init_count;
 1435   return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
 1439   int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
 1444   int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
 1462   status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
 1474     status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
 1492     TCW_PTR(th->th.th_sleep_loc, (void *)flag);
 1505         th->th.th_active = FALSE;
 1506         if (th->th.th_active_in_pool) {
 1507           th->th.th_active_in_pool = FALSE;
 1536       status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
 1537                                  &th->th.th_suspend_mx.m_mutex);
 1562       th->th.th_active = TRUE;
 1563       if (TCR_4(th->th.th_in_pool)) {
 1565         th->th.th_active_in_pool = TRUE;
 1578   status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
 1612   status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
 1616     flag = (C *)CCAST(void *, th->th.th_sleep_loc);
 1627     status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
 1638       status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
 1647   TCW_PTR(th->th.th_sleep_loc, NULL);
 1657   status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
 1659   status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);