18#include "ompt-specific.h"
25#define THREAD_LOCAL __declspec(thread)
27#define THREAD_LOCAL __thread
30#define OMPT_WEAK_ATTRIBUTE KMP_WEAK_ATTRIBUTE_INTERNAL
36#define LWT_FROM_TEAM(team) (team)->t.ompt_serialized_team_info
38#define OMPT_THREAD_ID_BITS 16
51ompt_team_info_t *__ompt_get_teaminfo(
int depth,
int *size) {
52 kmp_info_t *thr = ompt_get_thread();
55 kmp_team *team = thr->th.th_team;
59 ompt_lw_taskteam_t *next_lwt = LWT_FROM_TEAM(team), *lwt = NULL;
73 team = team->t.t_parent;
75 next_lwt = LWT_FROM_TEAM(team);
89 return &lwt->ompt_team_info;
93 *size = team->t.t_nproc;
96 return &team->t.ompt_team_info;
103ompt_task_info_t *__ompt_get_task_info_object(
int depth) {
104 ompt_task_info_t *info = NULL;
105 kmp_info_t *thr = ompt_get_thread();
108 kmp_taskdata_t *taskdata = thr->th.th_current_task;
109 ompt_lw_taskteam_t *lwt = NULL,
110 *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
119 if (!lwt && taskdata) {
124 taskdata = taskdata->td_parent;
126 next_lwt = LWT_FROM_TEAM(taskdata->td_team);
134 info = &lwt->ompt_task_info;
135 }
else if (taskdata) {
136 info = &taskdata->ompt_task_info;
143ompt_task_info_t *__ompt_get_scheduling_taskinfo(
int depth) {
144 ompt_task_info_t *info = NULL;
145 kmp_info_t *thr = ompt_get_thread();
148 kmp_taskdata_t *taskdata = thr->th.th_current_task;
150 ompt_lw_taskteam_t *lwt = NULL,
151 *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
160 if (!lwt && taskdata) {
162 if (taskdata->ompt_task_info.scheduling_parent) {
163 taskdata = taskdata->ompt_task_info.scheduling_parent;
164 }
else if (next_lwt) {
169 taskdata = taskdata->td_parent;
171 next_lwt = LWT_FROM_TEAM(taskdata->td_team);
179 info = &lwt->ompt_task_info;
180 }
else if (taskdata) {
181 info = &taskdata->ompt_task_info;
196ompt_data_t *__ompt_get_thread_data_internal() {
197 if (__kmp_get_gtid() >= 0) {
198 kmp_info_t *thread = ompt_get_thread();
201 return &(thread->th.ompt_thread_info.thread_data);
210void __ompt_thread_assign_wait_id(
void *variable) {
211 kmp_info_t *ti = ompt_get_thread();
214 ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t)(uintptr_t)variable;
217int __ompt_get_state_internal(ompt_wait_id_t *omp_wait_id) {
218 kmp_info_t *ti = ompt_get_thread();
222 *omp_wait_id = ti->th.ompt_thread_info.wait_id;
223 return ti->th.ompt_thread_info.state;
225 return ompt_state_undefined;
232int __ompt_get_parallel_info_internal(
int ancestor_level,
233 ompt_data_t **parallel_data,
235 if (__kmp_get_gtid() >= 0) {
236 ompt_team_info_t *info;
238 info = __ompt_get_teaminfo(ancestor_level, team_size);
240 info = __ompt_get_teaminfo(ancestor_level, NULL);
243 *parallel_data = info ? &(info->parallel_data) : NULL;
255void __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
int gtid,
256 ompt_data_t *ompt_pid,
void *codeptr) {
259 lwt->ompt_team_info.parallel_data = *ompt_pid;
260 lwt->ompt_team_info.master_return_address = codeptr;
261 lwt->ompt_task_info.task_data.value = 0;
262 lwt->ompt_task_info.frame.enter_frame = ompt_data_none;
263 lwt->ompt_task_info.frame.exit_frame = ompt_data_none;
264 lwt->ompt_task_info.scheduling_parent = NULL;
269void __ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
270 int on_heap,
bool always) {
271 ompt_lw_taskteam_t *link_lwt = lwt;
273 thr->th.th_team->t.t_serialized >
277 (ompt_lw_taskteam_t *)__kmp_allocate(
sizeof(ompt_lw_taskteam_t));
279 link_lwt->heap = on_heap;
282 ompt_team_info_t tmp_team = lwt->ompt_team_info;
283 link_lwt->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
284 *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
287 ompt_lw_taskteam_t *my_parent =
288 thr->th.th_team->t.ompt_serialized_team_info;
289 link_lwt->parent = my_parent;
290 thr->th.th_team->t.ompt_serialized_team_info = link_lwt;
292 if (ompd_state & OMPD_ENABLE_BP) {
293 ompd_bp_parallel_begin();
297 ompt_task_info_t tmp_task = lwt->ompt_task_info;
298 link_lwt->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
299 *OMPT_CUR_TASK_INFO(thr) = tmp_task;
303 *OMPT_CUR_TEAM_INFO(thr) = lwt->ompt_team_info;
305 if (ompd_state & OMPD_ENABLE_BP) {
306 ompd_bp_parallel_begin();
309 *OMPT_CUR_TASK_INFO(thr) = lwt->ompt_task_info;
313void __ompt_lw_taskteam_unlink(kmp_info_t *thr) {
314 ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info;
316 ompt_task_info_t tmp_task = lwtask->ompt_task_info;
317 lwtask->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
318 *OMPT_CUR_TASK_INFO(thr) = tmp_task;
320 if (ompd_state & OMPD_ENABLE_BP) {
321 ompd_bp_parallel_end();
324 thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent;
326 ompt_team_info_t tmp_team = lwtask->ompt_team_info;
327 lwtask->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
328 *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
342int __ompt_get_task_info_internal(
int ancestor_level,
int *type,
343 ompt_data_t **task_data,
344 ompt_frame_t **task_frame,
345 ompt_data_t **parallel_data,
347 if (__kmp_get_gtid() < 0)
350 if (ancestor_level < 0)
354 ompt_task_info_t *info = NULL;
355 ompt_team_info_t *team_info = NULL;
356 kmp_info_t *thr = ompt_get_thread();
357 int level = ancestor_level;
360 kmp_taskdata_t *taskdata = thr->th.th_current_task;
361 if (taskdata == NULL)
363 kmp_team *team = thr->th.th_team, *prev_team = NULL;
366 ompt_lw_taskteam_t *lwt = NULL,
367 *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
369 while (ancestor_level > 0) {
376 if (!lwt && taskdata) {
378 if (taskdata->ompt_task_info.scheduling_parent) {
379 taskdata = taskdata->ompt_task_info.scheduling_parent;
380 }
else if (next_lwt) {
385 taskdata = taskdata->td_parent;
389 team = team->t.t_parent;
391 next_lwt = LWT_FROM_TEAM(taskdata->td_team);
399 info = &lwt->ompt_task_info;
400 team_info = &lwt->ompt_team_info;
402 *type = ompt_task_implicit;
404 }
else if (taskdata) {
405 info = &taskdata->ompt_task_info;
406 team_info = &team->t.ompt_team_info;
408 if (taskdata->td_parent) {
409 *type = (taskdata->td_flags.tasktype ? ompt_task_explicit
410 : ompt_task_implicit) |
411 TASK_TYPE_DETAILS_FORMAT(taskdata);
413 *type = ompt_task_initial;
418 *task_data = info ? &info->task_data : NULL;
422 *task_frame = info ? &info->frame : NULL;
425 *parallel_data = team_info ? &(team_info->parallel_data) : NULL;
429 *thread_num = __kmp_get_tid();
432 else if (!prev_team) {
440 *thread_num = __kmp_get_tid();
442 *thread_num = prev_team->t.t_master_tid;
450int __ompt_get_task_memory_internal(
void **addr,
size_t *size,
int blocknum) {
454 kmp_info_t *thr = ompt_get_thread();
458 kmp_taskdata_t *taskdata = thr->th.th_current_task;
459 kmp_task_t *task = KMP_TASKDATA_TO_TASK(taskdata);
461 if (taskdata->td_flags.tasktype != TASK_EXPLICIT)
465 int64_t ret_size = taskdata->td_size_alloc -
sizeof(kmp_taskdata_t);
468 if (taskdata->td_flags.destructors_thunk)
469 ret_addr = &task->data1 + 1;
471 ret_addr = &task->part_id + 1;
473 ret_size -= (
char *)(ret_addr) - (
char *)(task);
478 *size = (size_t)ret_size;
486void __ompt_team_assign_id(kmp_team_t *team, ompt_data_t ompt_pid) {
487 team->t.ompt_team_info.parallel_data = ompt_pid;
494static uint64_t __ompt_get_unique_id_internal() {
495 static uint64_t thread = 1;
496 static THREAD_LOCAL uint64_t ID = 0;
498 uint64_t new_thread = KMP_TEST_THEN_INC64((kmp_int64 *)&thread);
499 ID = new_thread << (
sizeof(uint64_t) * 8 - OMPT_THREAD_ID_BITS);
504ompt_sync_region_t __ompt_get_barrier_kind(
enum barrier_type bt,
506 if (bt == bs_forkjoin_barrier)
507 return ompt_sync_region_barrier_implicit;
509 if (bt != bs_plain_barrier)
510 return ompt_sync_region_barrier_implementation;
512 if (!thr->th.th_ident)
513 return ompt_sync_region_barrier;
515 kmp_int32 flags = thr->th.th_ident->flags;
518 return ompt_sync_region_barrier_explicit;
521 return ompt_sync_region_barrier_implicit;
523 return ompt_sync_region_barrier_implementation;