14#include "kmp_affinity.h"
18#include "kmp_wrapper_getpid.h"
20#include "kmp_dispatch_hier.h"
24#define HWLOC_GROUP_KIND_INTEL_MODULE 102
25#define HWLOC_GROUP_KIND_INTEL_TILE 103
26#define HWLOC_GROUP_KIND_INTEL_DIE 104
27#define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220
32kmp_topology_t *__kmp_topology =
nullptr;
34kmp_hw_subset_t *__kmp_hw_subset =
nullptr;
37static hierarchy_info machine_hierarchy;
39void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
41void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
45 if (TCR_1(machine_hierarchy.uninitialized))
46 machine_hierarchy.init(nproc);
49 if (nproc > machine_hierarchy.base_num_threads)
50 machine_hierarchy.resize(nproc);
52 depth = machine_hierarchy.depth;
53 KMP_DEBUG_ASSERT(depth > 0);
55 thr_bar->depth = depth;
56 __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
57 &(thr_bar->base_leaf_kids));
58 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
61static int nCoresPerPkg, nPackages;
62static int __kmp_nThreadsPerCore;
63#ifndef KMP_DFLT_NTH_CORES
64static int __kmp_ncores;
67const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural) {
70 return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
72 return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
74 return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
76 return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
78 return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
80 return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
82 return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
84 return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
86 return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache));
88 return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
90 return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
91 case KMP_HW_PROC_GROUP:
92 return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
94 return KMP_I18N_STR(Unknown);
97const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural) {
100 return ((plural) ?
"sockets" :
"socket");
102 return ((plural) ?
"dice" :
"die");
104 return ((plural) ?
"modules" :
"module");
106 return ((plural) ?
"tiles" :
"tile");
108 return ((plural) ?
"numa_domains" :
"numa_domain");
110 return ((plural) ?
"l3_caches" :
"l3_cache");
112 return ((plural) ?
"l2_caches" :
"l2_cache");
114 return ((plural) ?
"l1_caches" :
"l1_cache");
116 return ((plural) ?
"ll_caches" :
"ll_cache");
118 return ((plural) ?
"cores" :
"core");
120 return ((plural) ?
"threads" :
"thread");
121 case KMP_HW_PROC_GROUP:
122 return ((plural) ?
"proc_groups" :
"proc_group");
124 return ((plural) ?
"unknowns" :
"unknown");
127const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) {
129 case KMP_HW_CORE_TYPE_UNKNOWN:
131#if KMP_ARCH_X86 || KMP_ARCH_X86_64
132 case KMP_HW_CORE_TYPE_ATOM:
133 return "Intel Atom(R) processor";
134 case KMP_HW_CORE_TYPE_CORE:
135 return "Intel(R) Core(TM) processor";
141#if KMP_AFFINITY_SUPPORTED
144#define KMP_AFF_WARNING(...) \
145 if (__kmp_affinity_verbose || \
146 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) { \
147 KMP_WARNING(__VA_ARGS__); \
150#define KMP_AFF_WARNING KMP_WARNING
155int kmp_hw_thread_t::compare_ids(
const void *a,
const void *b) {
156 const kmp_hw_thread_t *ahwthread = (
const kmp_hw_thread_t *)a;
157 const kmp_hw_thread_t *bhwthread = (
const kmp_hw_thread_t *)b;
158 int depth = __kmp_topology->get_depth();
159 for (
int level = 0; level < depth; ++level) {
160 if (ahwthread->ids[level] < bhwthread->ids[level])
162 else if (ahwthread->ids[level] > bhwthread->ids[level])
165 if (ahwthread->os_id < bhwthread->os_id)
167 else if (ahwthread->os_id > bhwthread->os_id)
172#if KMP_AFFINITY_SUPPORTED
173int kmp_hw_thread_t::compare_compact(
const void *a,
const void *b) {
175 const kmp_hw_thread_t *aa = (
const kmp_hw_thread_t *)a;
176 const kmp_hw_thread_t *bb = (
const kmp_hw_thread_t *)b;
177 int depth = __kmp_topology->get_depth();
178 KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
179 KMP_DEBUG_ASSERT(__kmp_affinity_compact <= depth);
180 for (i = 0; i < __kmp_affinity_compact; i++) {
181 int j = depth - i - 1;
182 if (aa->sub_ids[j] < bb->sub_ids[j])
184 if (aa->sub_ids[j] > bb->sub_ids[j])
187 for (; i < depth; i++) {
188 int j = i - __kmp_affinity_compact;
189 if (aa->sub_ids[j] < bb->sub_ids[j])
191 if (aa->sub_ids[j] > bb->sub_ids[j])
198void kmp_hw_thread_t::print()
const {
199 int depth = __kmp_topology->get_depth();
200 printf(
"%4d ", os_id);
201 for (
int i = 0; i < depth; ++i) {
202 printf(
"%4d ", ids[i]);
205 if (attrs.is_core_type_valid())
206 printf(
" (%s)", __kmp_hw_get_core_type_string(attrs.get_core_type()));
207 if (attrs.is_core_eff_valid())
208 printf(
" (eff=%d)", attrs.get_core_eff());
218void kmp_topology_t::_insert_layer(kmp_hw_t type,
const int *ids) {
222 int previous_id = kmp_hw_thread_t::UNKNOWN_ID;
223 int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID;
227 for (target_layer = 0; target_layer < depth; ++target_layer) {
228 bool layers_equal =
true;
229 bool strictly_above_target_layer =
false;
230 for (
int i = 0; i < num_hw_threads; ++i) {
231 int id = hw_threads[i].ids[target_layer];
233 if (
id != previous_id && new_id == previous_new_id) {
235 strictly_above_target_layer =
true;
236 layers_equal =
false;
238 }
else if (
id == previous_id && new_id != previous_new_id) {
240 layers_equal =
false;
244 previous_new_id = new_id;
246 if (strictly_above_target_layer || layers_equal)
252 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
254 types[target_layer] = type;
255 for (
int k = 0; k < num_hw_threads; ++k) {
256 for (
int i = depth - 1, j = depth; i >= target_layer; --i, --j)
257 hw_threads[k].ids[j] = hw_threads[k].ids[i];
258 hw_threads[k].ids[target_layer] = ids[k];
260 equivalent[type] = type;
264#if KMP_GROUP_AFFINITY
266void kmp_topology_t::_insert_windows_proc_groups() {
268 if (__kmp_num_proc_groups == 1)
270 kmp_affin_mask_t *mask;
271 int *ids = (
int *)__kmp_allocate(
sizeof(
int) * num_hw_threads);
273 for (
int i = 0; i < num_hw_threads; ++i) {
275 KMP_CPU_SET(hw_threads[i].os_id, mask);
276 ids[i] = __kmp_get_proc_group(mask);
279 _insert_layer(KMP_HW_PROC_GROUP, ids);
286void kmp_topology_t::_remove_radix1_layers() {
287 int preference[KMP_HW_LAST];
288 int top_index1, top_index2;
290 preference[KMP_HW_SOCKET] = 110;
291 preference[KMP_HW_PROC_GROUP] = 100;
292 preference[KMP_HW_CORE] = 95;
293 preference[KMP_HW_THREAD] = 90;
294 preference[KMP_HW_NUMA] = 85;
295 preference[KMP_HW_DIE] = 80;
296 preference[KMP_HW_TILE] = 75;
297 preference[KMP_HW_MODULE] = 73;
298 preference[KMP_HW_L3] = 70;
299 preference[KMP_HW_L2] = 65;
300 preference[KMP_HW_L1] = 60;
301 preference[KMP_HW_LLC] = 5;
304 while (top_index1 < depth - 1 && top_index2 < depth) {
305 kmp_hw_t type1 = types[top_index1];
306 kmp_hw_t type2 = types[top_index2];
307 KMP_ASSERT_VALID_HW_TYPE(type1);
308 KMP_ASSERT_VALID_HW_TYPE(type2);
311 if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE ||
312 type1 == KMP_HW_SOCKET) &&
313 (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE ||
314 type2 == KMP_HW_SOCKET)) {
315 top_index1 = top_index2++;
319 bool all_same =
true;
320 int id1 = hw_threads[0].ids[top_index1];
321 int id2 = hw_threads[0].ids[top_index2];
322 int pref1 = preference[type1];
323 int pref2 = preference[type2];
324 for (
int hwidx = 1; hwidx < num_hw_threads; ++hwidx) {
325 if (hw_threads[hwidx].ids[top_index1] == id1 &&
326 hw_threads[hwidx].ids[top_index2] != id2) {
330 if (hw_threads[hwidx].ids[top_index2] != id2)
332 id1 = hw_threads[hwidx].ids[top_index1];
333 id2 = hw_threads[hwidx].ids[top_index2];
337 kmp_hw_t remove_type, keep_type;
338 int remove_layer, remove_layer_ids;
341 remove_layer = remove_layer_ids = top_index2;
345 remove_layer = remove_layer_ids = top_index1;
351 remove_layer_ids = top_index2;
354 set_equivalent_type(remove_type, keep_type);
355 for (
int idx = 0; idx < num_hw_threads; ++idx) {
356 kmp_hw_thread_t &hw_thread = hw_threads[idx];
357 for (
int d = remove_layer_ids; d < depth - 1; ++d)
358 hw_thread.ids[d] = hw_thread.ids[d + 1];
360 for (
int idx = remove_layer; idx < depth - 1; ++idx)
361 types[idx] = types[idx + 1];
364 top_index1 = top_index2++;
367 KMP_ASSERT(depth > 0);
370void kmp_topology_t::_set_last_level_cache() {
371 if (get_equivalent_type(KMP_HW_L3) != KMP_HW_UNKNOWN)
372 set_equivalent_type(KMP_HW_LLC, KMP_HW_L3);
373 else if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
374 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
376 else if (__kmp_mic_type == mic3) {
377 if (get_equivalent_type(KMP_HW_L2) != KMP_HW_UNKNOWN)
378 set_equivalent_type(KMP_HW_LLC, KMP_HW_L2);
379 else if (get_equivalent_type(KMP_HW_TILE) != KMP_HW_UNKNOWN)
380 set_equivalent_type(KMP_HW_LLC, KMP_HW_TILE);
383 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
386 else if (get_equivalent_type(KMP_HW_L1) != KMP_HW_UNKNOWN)
387 set_equivalent_type(KMP_HW_LLC, KMP_HW_L1);
389 if (get_equivalent_type(KMP_HW_LLC) == KMP_HW_UNKNOWN) {
390 if (get_equivalent_type(KMP_HW_SOCKET) != KMP_HW_UNKNOWN)
391 set_equivalent_type(KMP_HW_LLC, KMP_HW_SOCKET);
392 else if (get_equivalent_type(KMP_HW_CORE) != KMP_HW_UNKNOWN)
393 set_equivalent_type(KMP_HW_LLC, KMP_HW_CORE);
395 KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN);
399void kmp_topology_t::_gather_enumeration_information() {
400 int previous_id[KMP_HW_LAST];
401 int max[KMP_HW_LAST];
403 for (
int i = 0; i < depth; ++i) {
404 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
409 int core_level = get_level(KMP_HW_CORE);
410 for (
int i = 0; i < num_hw_threads; ++i) {
411 kmp_hw_thread_t &hw_thread = hw_threads[i];
412 for (
int layer = 0; layer < depth; ++layer) {
413 int id = hw_thread.ids[layer];
414 if (
id != previous_id[layer]) {
416 for (
int l = layer; l < depth; ++l)
420 for (
int l = layer + 1; l < depth; ++l) {
421 if (max[l] > ratio[l])
427 if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) {
428 if (hw_thread.attrs.is_core_eff_valid() &&
429 hw_thread.attrs.core_eff >= num_core_efficiencies) {
432 num_core_efficiencies = hw_thread.attrs.core_eff + 1;
434 if (hw_thread.attrs.is_core_type_valid()) {
436 for (
int j = 0; j < num_core_types; ++j) {
437 if (hw_thread.attrs.get_core_type() == core_types[j]) {
443 KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES);
444 core_types[num_core_types++] = hw_thread.attrs.get_core_type();
451 for (
int layer = 0; layer < depth; ++layer) {
452 previous_id[layer] = hw_thread.ids[layer];
455 for (
int layer = 0; layer < depth; ++layer) {
456 if (max[layer] > ratio[layer])
457 ratio[layer] = max[layer];
461int kmp_topology_t::_get_ncores_with_attr(
const kmp_hw_attr_t &attr,
463 bool find_all)
const {
464 int current, current_max;
465 int previous_id[KMP_HW_LAST];
466 for (
int i = 0; i < depth; ++i)
467 previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID;
468 int core_level = get_level(KMP_HW_CORE);
471 KMP_ASSERT(above_level < core_level);
474 for (
int i = 0; i < num_hw_threads; ++i) {
475 kmp_hw_thread_t &hw_thread = hw_threads[i];
476 if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) {
477 if (current > current_max)
478 current_max = current;
479 current = hw_thread.attrs.contains(attr);
481 for (
int level = above_level + 1; level <= core_level; ++level) {
482 if (hw_thread.ids[level] != previous_id[level]) {
483 if (hw_thread.attrs.contains(attr))
489 for (
int level = 0; level < depth; ++level)
490 previous_id[level] = hw_thread.ids[level];
492 if (current > current_max)
493 current_max = current;
498void kmp_topology_t::_discover_uniformity() {
500 for (
int level = 0; level < depth; ++level)
502 flags.uniform = (num == count[depth - 1]);
506void kmp_topology_t::_set_sub_ids() {
507 int previous_id[KMP_HW_LAST];
508 int sub_id[KMP_HW_LAST];
510 for (
int i = 0; i < depth; ++i) {
514 for (
int i = 0; i < num_hw_threads; ++i) {
515 kmp_hw_thread_t &hw_thread = hw_threads[i];
517 for (
int j = 0; j < depth; ++j) {
518 if (hw_thread.ids[j] != previous_id[j]) {
520 for (
int k = j + 1; k < depth; ++k) {
527 for (
int j = 0; j < depth; ++j) {
528 previous_id[j] = hw_thread.ids[j];
531 for (
int j = 0; j < depth; ++j) {
532 hw_thread.sub_ids[j] = sub_id[j];
537void kmp_topology_t::_set_globals() {
539 int core_level, thread_level, package_level;
540 package_level = get_level(KMP_HW_SOCKET);
541#if KMP_GROUP_AFFINITY
542 if (package_level == -1)
543 package_level = get_level(KMP_HW_PROC_GROUP);
545 core_level = get_level(KMP_HW_CORE);
546 thread_level = get_level(KMP_HW_THREAD);
548 KMP_ASSERT(core_level != -1);
549 KMP_ASSERT(thread_level != -1);
551 __kmp_nThreadsPerCore = calculate_ratio(thread_level, core_level);
552 if (package_level != -1) {
553 nCoresPerPkg = calculate_ratio(core_level, package_level);
554 nPackages = get_count(package_level);
557 nCoresPerPkg = get_count(core_level);
560#ifndef KMP_DFLT_NTH_CORES
561 __kmp_ncores = get_count(core_level);
565kmp_topology_t *kmp_topology_t::allocate(
int nproc,
int ndepth,
566 const kmp_hw_t *types) {
567 kmp_topology_t *retval;
569 size_t size =
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc +
570 sizeof(int) * (
size_t)KMP_HW_LAST * 3;
571 char *bytes = (
char *)__kmp_allocate(size);
572 retval = (kmp_topology_t *)bytes;
574 retval->hw_threads = (kmp_hw_thread_t *)(bytes +
sizeof(kmp_topology_t));
576 retval->hw_threads =
nullptr;
578 retval->num_hw_threads = nproc;
579 retval->depth = ndepth;
581 (
int *)(bytes +
sizeof(kmp_topology_t) +
sizeof(kmp_hw_thread_t) * nproc);
582 retval->types = (kmp_hw_t *)arr;
583 retval->ratio = arr + (size_t)KMP_HW_LAST;
584 retval->count = arr + 2 * (size_t)KMP_HW_LAST;
585 retval->num_core_efficiencies = 0;
586 retval->num_core_types = 0;
587 for (
int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i)
588 retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN;
589 KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; }
590 for (
int i = 0; i < ndepth; ++i) {
591 retval->types[i] = types[i];
592 retval->equivalent[types[i]] = types[i];
597void kmp_topology_t::deallocate(kmp_topology_t *topology) {
599 __kmp_free(topology);
602bool kmp_topology_t::check_ids()
const {
604 if (num_hw_threads == 0)
606 for (
int i = 1; i < num_hw_threads; ++i) {
607 kmp_hw_thread_t ¤t_thread = hw_threads[i];
608 kmp_hw_thread_t &previous_thread = hw_threads[i - 1];
610 for (
int j = 0; j < depth; ++j) {
611 if (previous_thread.ids[j] != current_thread.ids[j]) {
623void kmp_topology_t::dump()
const {
624 printf(
"***********************\n");
625 printf(
"*** __kmp_topology: ***\n");
626 printf(
"***********************\n");
627 printf(
"* depth: %d\n", depth);
630 for (
int i = 0; i < depth; ++i)
631 printf(
"%15s ", __kmp_hw_get_keyword(types[i]));
635 for (
int i = 0; i < depth; ++i) {
636 printf(
"%15d ", ratio[i]);
641 for (
int i = 0; i < depth; ++i) {
642 printf(
"%15d ", count[i]);
646 printf(
"* num_core_eff: %d\n", num_core_efficiencies);
647 printf(
"* num_core_types: %d\n", num_core_types);
648 printf(
"* core_types: ");
649 for (
int i = 0; i < num_core_types; ++i)
650 printf(
"%3d ", core_types[i]);
653 printf(
"* equivalent map:\n");
654 KMP_FOREACH_HW_TYPE(i) {
655 const char *key = __kmp_hw_get_keyword(i);
656 const char *value = __kmp_hw_get_keyword(equivalent[i]);
657 printf(
"%-15s -> %-15s\n", key, value);
660 printf(
"* uniform: %s\n", (is_uniform() ?
"Yes" :
"No"));
662 printf(
"* num_hw_threads: %d\n", num_hw_threads);
663 printf(
"* hw_threads:\n");
664 for (
int i = 0; i < num_hw_threads; ++i) {
665 hw_threads[i].print();
667 printf(
"***********************\n");
670void kmp_topology_t::print(
const char *env_var)
const {
672 int print_types_depth;
673 __kmp_str_buf_init(&buf);
674 kmp_hw_t print_types[KMP_HW_LAST + 2];
677 KMP_INFORM(AvailableOSProc, env_var, num_hw_threads);
681 KMP_INFORM(Uniform, env_var);
683 KMP_INFORM(NonUniform, env_var);
687 KMP_FOREACH_HW_TYPE(type) {
688 kmp_hw_t eq_type = equivalent[type];
689 if (eq_type != KMP_HW_UNKNOWN && eq_type != type) {
690 KMP_INFORM(AffEqualTopologyTypes, env_var,
691 __kmp_hw_get_catalog_string(type),
692 __kmp_hw_get_catalog_string(eq_type));
697 KMP_ASSERT(depth > 0 && depth <= (
int)KMP_HW_LAST);
700 print_types_depth = 0;
701 for (
int level = 0; level < depth; ++level)
702 print_types[print_types_depth++] = types[level];
703 if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) {
705 if (print_types[print_types_depth - 1] == KMP_HW_THREAD) {
708 print_types[print_types_depth - 1] = KMP_HW_CORE;
709 print_types[print_types_depth++] = KMP_HW_THREAD;
711 print_types[print_types_depth++] = KMP_HW_CORE;
715 if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD)
716 print_types[print_types_depth++] = KMP_HW_THREAD;
718 __kmp_str_buf_clear(&buf);
719 kmp_hw_t numerator_type;
720 kmp_hw_t denominator_type = KMP_HW_UNKNOWN;
721 int core_level = get_level(KMP_HW_CORE);
722 int ncores = get_count(core_level);
724 for (
int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) {
727 numerator_type = print_types[plevel];
728 KMP_ASSERT_VALID_HW_TYPE(numerator_type);
729 if (equivalent[numerator_type] != numerator_type)
732 c = get_ratio(level++);
735 __kmp_str_buf_print(&buf,
"%d %s", c,
736 __kmp_hw_get_catalog_string(numerator_type, plural));
738 __kmp_str_buf_print(&buf,
" x %d %s/%s", c,
739 __kmp_hw_get_catalog_string(numerator_type, plural),
740 __kmp_hw_get_catalog_string(denominator_type));
742 denominator_type = numerator_type;
744 KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores);
747 if (__kmp_is_hybrid_cpu()) {
748 for (
int i = 0; i < num_core_types; ++i) {
749 kmp_hw_core_type_t core_type = core_types[i];
752 attr.set_core_type(core_type);
753 int ncores = get_ncores_with_attr(attr);
755 KMP_INFORM(TopologyHybrid, env_var, ncores,
756 __kmp_hw_get_core_type_string(core_type));
757 KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS)
758 for (
int eff = 0; eff < num_core_efficiencies; ++eff) {
759 attr.set_core_eff(eff);
760 int ncores_with_eff = get_ncores_with_attr(attr);
761 if (ncores_with_eff > 0) {
762 KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff);
769 if (num_hw_threads <= 0) {
770 __kmp_str_buf_free(&buf);
775 KMP_INFORM(OSProcToPhysicalThreadMap, env_var);
776 for (
int i = 0; i < num_hw_threads; i++) {
777 __kmp_str_buf_clear(&buf);
778 for (
int level = 0; level < depth; ++level) {
779 kmp_hw_t type = types[level];
780 __kmp_str_buf_print(&buf,
"%s ", __kmp_hw_get_catalog_string(type));
781 __kmp_str_buf_print(&buf,
"%d ", hw_threads[i].ids[level]);
783 if (__kmp_is_hybrid_cpu())
786 __kmp_hw_get_core_type_string(hw_threads[i].attrs.get_core_type()));
787 KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str);
790 __kmp_str_buf_free(&buf);
793void kmp_topology_t::canonicalize() {
794#if KMP_GROUP_AFFINITY
795 _insert_windows_proc_groups();
797 _remove_radix1_layers();
798 _gather_enumeration_information();
799 _discover_uniformity();
802 _set_last_level_cache();
806 if (__kmp_mic_type == mic3) {
807 if (get_level(KMP_HW_L2) != -1)
808 set_equivalent_type(KMP_HW_TILE, KMP_HW_L2);
809 else if (get_level(KMP_HW_TILE) != -1)
810 set_equivalent_type(KMP_HW_L2, KMP_HW_TILE);
815 KMP_ASSERT(depth > 0);
816 for (
int level = 0; level < depth; ++level) {
818 KMP_ASSERT(count[level] > 0 && ratio[level] > 0);
819 KMP_ASSERT_VALID_HW_TYPE(types[level]);
821 KMP_ASSERT(equivalent[types[level]] == types[level]);
824#if KMP_AFFINITY_SUPPORTED
826 if (__kmp_affinity_gran_levels < 0) {
827 kmp_hw_t gran_type = get_equivalent_type(__kmp_affinity_gran);
829 if (gran_type == KMP_HW_UNKNOWN) {
831 kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET};
832 for (
auto g : gran_types) {
833 if (get_equivalent_type(g) != KMP_HW_UNKNOWN) {
838 KMP_ASSERT(gran_type != KMP_HW_UNKNOWN);
840 KMP_AFF_WARNING(AffGranularityBad,
"KMP_AFFINITY",
841 __kmp_hw_get_catalog_string(__kmp_affinity_gran),
842 __kmp_hw_get_catalog_string(gran_type));
843 __kmp_affinity_gran = gran_type;
845#if KMP_GROUP_AFFINITY
853 if (__kmp_num_proc_groups > 1) {
854 int gran_depth = get_level(gran_type);
855 int proc_group_depth = get_level(KMP_HW_PROC_GROUP);
856 if (gran_depth >= 0 && proc_group_depth >= 0 &&
857 gran_depth < proc_group_depth) {
858 KMP_AFF_WARNING(AffGranTooCoarseProcGroup,
"KMP_AFFINITY",
859 __kmp_hw_get_catalog_string(__kmp_affinity_gran));
860 __kmp_affinity_gran = gran_type = KMP_HW_PROC_GROUP;
864 __kmp_affinity_gran_levels = 0;
865 for (
int i = depth - 1; i >= 0 && get_type(i) != gran_type; --i)
866 __kmp_affinity_gran_levels++;
872void kmp_topology_t::canonicalize(
int npackages,
int ncores_per_pkg,
873 int nthreads_per_core,
int ncores) {
876 KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; }
877 for (
int level = 0; level < depth; ++level) {
881 count[0] = npackages;
883 count[2] = __kmp_xproc;
884 ratio[0] = npackages;
885 ratio[1] = ncores_per_pkg;
886 ratio[2] = nthreads_per_core;
887 equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET;
888 equivalent[KMP_HW_CORE] = KMP_HW_CORE;
889 equivalent[KMP_HW_THREAD] = KMP_HW_THREAD;
890 types[0] = KMP_HW_SOCKET;
891 types[1] = KMP_HW_CORE;
892 types[2] = KMP_HW_THREAD;
894 _discover_uniformity();
899template <
size_t SIZE,
typename IndexFunc>
struct kmp_sub_ids_t {
902 int prev_sub_id[KMP_HW_LAST];
906 kmp_sub_ids_t(
int last_level) : last_level(last_level) {
907 KMP_ASSERT(last_level < KMP_HW_LAST);
908 for (
size_t i = 0; i < SIZE; ++i)
910 for (
size_t i = 0; i < KMP_HW_LAST; ++i)
913 void update(
const kmp_hw_thread_t &hw_thread) {
914 int idx = indexer(hw_thread);
915 KMP_ASSERT(idx < (
int)SIZE);
916 for (
int level = 0; level <= last_level; ++level) {
917 if (hw_thread.sub_ids[level] != prev_sub_id[level]) {
918 if (level < last_level)
924 for (
int level = 0; level <= last_level; ++level)
925 prev_sub_id[level] = hw_thread.sub_ids[level];
927 int get_sub_id(
const kmp_hw_thread_t &hw_thread)
const {
928 return sub_id[indexer(hw_thread)];
932static kmp_str_buf_t *
933__kmp_hw_get_catalog_core_string(
const kmp_hw_attr_t &attr, kmp_str_buf_t *buf,
935 __kmp_str_buf_init(buf);
936 if (attr.is_core_type_valid())
937 __kmp_str_buf_print(buf,
"%s %s",
938 __kmp_hw_get_core_type_string(attr.get_core_type()),
939 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural));
941 __kmp_str_buf_print(buf,
"%s eff=%d",
942 __kmp_hw_get_catalog_string(KMP_HW_CORE, plural),
943 attr.get_core_eff());
950bool kmp_topology_t::filter_hw_subset() {
952 if (!__kmp_hw_subset)
956 __kmp_hw_subset->sort();
959 bool using_core_types =
false;
960 bool using_core_effs =
false;
961 int hw_subset_depth = __kmp_hw_subset->get_depth();
962 kmp_hw_t specified[KMP_HW_LAST];
963 int *topology_levels = (
int *)KMP_ALLOCA(
sizeof(
int) * hw_subset_depth);
964 KMP_ASSERT(hw_subset_depth > 0);
965 KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; }
966 int core_level = get_level(KMP_HW_CORE);
967 for (
int i = 0; i < hw_subset_depth; ++i) {
969 const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(i);
970 int num = item.num[0];
971 int offset = item.offset[0];
972 kmp_hw_t type = item.type;
973 kmp_hw_t equivalent_type = equivalent[type];
974 int level = get_level(type);
975 topology_levels[i] = level;
978 if (equivalent_type != KMP_HW_UNKNOWN) {
979 __kmp_hw_subset->at(i).type = equivalent_type;
981 KMP_AFF_WARNING(AffHWSubsetNotExistGeneric,
982 __kmp_hw_get_catalog_string(type));
988 if (specified[equivalent_type] != KMP_HW_UNKNOWN) {
989 KMP_AFF_WARNING(AffHWSubsetEqvLayers, __kmp_hw_get_catalog_string(type),
990 __kmp_hw_get_catalog_string(specified[equivalent_type]));
993 specified[equivalent_type] = type;
996 max_count = get_ratio(level);
998 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
999 bool plural = (num > 1);
1000 KMP_AFF_WARNING(AffHWSubsetManyGeneric,
1001 __kmp_hw_get_catalog_string(type, plural));
1006 if (core_level == level) {
1008 for (
int j = 0; j < item.num_attrs; ++j) {
1009 if (item.attr[j].is_core_type_valid())
1010 using_core_types =
true;
1011 if (item.attr[j].is_core_eff_valid())
1012 using_core_effs =
true;
1020 if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) {
1021 if (item.num_attrs == 1) {
1022 if (using_core_effs) {
1023 KMP_AFF_WARNING(AffHWSubsetIgnoringAttr,
"efficiency");
1025 KMP_AFF_WARNING(AffHWSubsetIgnoringAttr,
"core_type");
1027 using_core_effs =
false;
1028 using_core_types =
false;
1030 KMP_AFF_WARNING(AffHWSubsetAttrsNonHybrid);
1036 if (using_core_types && using_core_effs) {
1037 KMP_AFF_WARNING(AffHWSubsetIncompat,
"core_type",
"efficiency");
1042 if (using_core_effs) {
1043 for (
int j = 0; j < item.num_attrs; ++j) {
1044 if (item.attr[j].is_core_eff_valid()) {
1045 int core_eff = item.attr[j].get_core_eff();
1046 if (core_eff < 0 || core_eff >= num_core_efficiencies) {
1048 __kmp_str_buf_init(&buf);
1049 __kmp_str_buf_print(&buf,
"%d", item.attr[j].get_core_eff());
1050 __kmp_msg(kmp_ms_warning,
1051 KMP_MSG(AffHWSubsetAttrInvalid,
"efficiency", buf.str),
1052 KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1),
1054 __kmp_str_buf_free(&buf);
1062 if (using_core_types || using_core_effs) {
1063 for (
int j = 0; j < item.num_attrs; ++j) {
1064 int num = item.num[j];
1065 int offset = item.offset[j];
1066 int level_above = core_level - 1;
1067 if (level_above >= 0) {
1068 max_count = get_ncores_with_attr_per(item.attr[j], level_above);
1069 if (max_count <= 0 ||
1070 (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) {
1072 __kmp_hw_get_catalog_core_string(item.attr[j], &buf, num > 0);
1073 KMP_AFF_WARNING(AffHWSubsetManyGeneric, buf.str);
1074 __kmp_str_buf_free(&buf);
1081 if ((using_core_types || using_core_effs) && item.num_attrs > 1) {
1082 for (
int j = 0; j < item.num_attrs; ++j) {
1085 if (!item.attr[j]) {
1086 kmp_hw_attr_t other_attr;
1087 for (
int k = 0; k < item.num_attrs; ++k) {
1088 if (item.attr[k] != item.attr[j]) {
1089 other_attr = item.attr[k];
1094 __kmp_hw_get_catalog_core_string(other_attr, &buf, item.num[j] > 0);
1095 KMP_AFF_WARNING(AffHWSubsetIncompat,
1096 __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str);
1097 __kmp_str_buf_free(&buf);
1101 for (
int k = 0; k < j; ++k) {
1102 if (!item.attr[j] || !item.attr[k])
1104 if (item.attr[k] == item.attr[j]) {
1106 __kmp_hw_get_catalog_core_string(item.attr[j], &buf,
1108 KMP_AFF_WARNING(AffHWSubsetAttrRepeat, buf.str);
1109 __kmp_str_buf_free(&buf);
1118 struct core_type_indexer {
1119 int operator()(
const kmp_hw_thread_t &t)
const {
1120 switch (t.attrs.get_core_type()) {
1121#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1122 case KMP_HW_CORE_TYPE_ATOM:
1124 case KMP_HW_CORE_TYPE_CORE:
1127 case KMP_HW_CORE_TYPE_UNKNOWN:
1134 struct core_eff_indexer {
1135 int operator()(
const kmp_hw_thread_t &t)
const {
1136 return t.attrs.get_core_eff();
1140 kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_TYPES, core_type_indexer> core_type_sub_ids(
1142 kmp_sub_ids_t<KMP_HW_MAX_NUM_CORE_EFFS, core_eff_indexer> core_eff_sub_ids(
1146 int num_filtered = 0;
1147 bool *filtered = (
bool *)__kmp_allocate(
sizeof(
bool) * num_hw_threads);
1148 for (
int i = 0; i < num_hw_threads; ++i) {
1149 kmp_hw_thread_t &hw_thread = hw_threads[i];
1151 if (using_core_types)
1152 core_type_sub_ids.update(hw_thread);
1153 if (using_core_effs)
1154 core_eff_sub_ids.update(hw_thread);
1157 bool should_be_filtered =
false;
1158 for (
int hw_subset_index = 0; hw_subset_index < hw_subset_depth;
1159 ++hw_subset_index) {
1160 const auto &hw_subset_item = __kmp_hw_subset->at(hw_subset_index);
1161 int level = topology_levels[hw_subset_index];
1164 if ((using_core_effs || using_core_types) && level == core_level) {
1170 kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type();
1171 int core_eff = hw_thread.attrs.get_core_eff();
1172 for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) {
1173 if (using_core_types &&
1174 hw_subset_item.attr[attr_idx].get_core_type() == core_type)
1176 if (using_core_effs &&
1177 hw_subset_item.attr[attr_idx].get_core_eff() == core_eff)
1181 if (attr_idx == hw_subset_item.num_attrs) {
1182 should_be_filtered =
true;
1186 int num = hw_subset_item.num[attr_idx];
1187 int offset = hw_subset_item.offset[attr_idx];
1188 if (using_core_types)
1189 sub_id = core_type_sub_ids.get_sub_id(hw_thread);
1191 sub_id = core_eff_sub_ids.get_sub_id(hw_thread);
1192 if (sub_id < offset ||
1193 (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) {
1194 should_be_filtered =
true;
1198 int num = hw_subset_item.num[0];
1199 int offset = hw_subset_item.offset[0];
1200 if (hw_thread.sub_ids[level] < offset ||
1201 (num != kmp_hw_subset_t::USE_ALL &&
1202 hw_thread.sub_ids[level] >= offset + num)) {
1203 should_be_filtered =
true;
1209 filtered[i] = should_be_filtered;
1210 if (should_be_filtered)
1215 if (num_filtered == num_hw_threads) {
1216 KMP_AFF_WARNING(AffHWSubsetAllFiltered);
1217 __kmp_free(filtered);
1223 for (
int i = 0; i < num_hw_threads; ++i) {
1226 hw_threads[new_index] = hw_threads[i];
1229#if KMP_AFFINITY_SUPPORTED
1230 KMP_CPU_CLR(hw_threads[i].os_id, __kmp_affin_fullMask);
1236 KMP_DEBUG_ASSERT(new_index <= num_hw_threads);
1237 num_hw_threads = new_index;
1240 _gather_enumeration_information();
1241 _discover_uniformity();
1243 _set_last_level_cache();
1244 __kmp_free(filtered);
1248bool kmp_topology_t::is_close(
int hwt1,
int hwt2,
int hw_level)
const {
1249 if (hw_level >= depth)
1252 const kmp_hw_thread_t &t1 = hw_threads[hwt1];
1253 const kmp_hw_thread_t &t2 = hw_threads[hwt2];
1254 for (
int i = 0; i < (depth - hw_level); ++i) {
1255 if (t1.ids[i] != t2.ids[i])
1263#if KMP_AFFINITY_SUPPORTED
1264class kmp_affinity_raii_t {
1265 kmp_affin_mask_t *mask;
1269 kmp_affinity_raii_t() : restored(false) {
1270 KMP_CPU_ALLOC(mask);
1271 KMP_ASSERT(mask != NULL);
1272 __kmp_get_system_affinity(mask, TRUE);
1275 __kmp_set_system_affinity(mask, TRUE);
1279 ~kmp_affinity_raii_t() {
1281 __kmp_set_system_affinity(mask, TRUE);
1287bool KMPAffinity::picked_api =
false;
1289void *KMPAffinity::Mask::operator
new(
size_t n) {
return __kmp_allocate(n); }
1290void *KMPAffinity::Mask::operator
new[](
size_t n) {
return __kmp_allocate(n); }
1291void KMPAffinity::Mask::operator
delete(
void *p) { __kmp_free(p); }
1292void KMPAffinity::Mask::operator
delete[](
void *p) { __kmp_free(p); }
1293void *KMPAffinity::operator
new(
size_t n) {
return __kmp_allocate(n); }
1294void KMPAffinity::operator
delete(
void *p) { __kmp_free(p); }
1296void KMPAffinity::pick_api() {
1297 KMPAffinity *affinity_dispatch;
1303 if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
1304 __kmp_affinity_type != affinity_disabled) {
1305 affinity_dispatch =
new KMPHwlocAffinity();
1309 affinity_dispatch =
new KMPNativeAffinity();
1311 __kmp_affinity_dispatch = affinity_dispatch;
1315void KMPAffinity::destroy_api() {
1316 if (__kmp_affinity_dispatch != NULL) {
1317 delete __kmp_affinity_dispatch;
1318 __kmp_affinity_dispatch = NULL;
1323#define KMP_ADVANCE_SCAN(scan) \
1324 while (*scan != '\0') { \
1332char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
1333 kmp_affin_mask_t *mask) {
1334 int start = 0, finish = 0, previous = 0;
1337 KMP_ASSERT(buf_len >= 40);
1340 char *end = buf + buf_len - 1;
1343 if (mask->begin() == mask->end()) {
1344 KMP_SNPRINTF(scan, end - scan + 1,
"{<empty>}");
1345 KMP_ADVANCE_SCAN(scan);
1346 KMP_ASSERT(scan <= end);
1351 start = mask->begin();
1355 for (finish = mask->next(start), previous = start;
1356 finish == previous + 1 && finish != mask->end();
1357 finish = mask->next(finish)) {
1364 KMP_SNPRINTF(scan, end - scan + 1,
"%s",
",");
1365 KMP_ADVANCE_SCAN(scan);
1367 first_range =
false;
1370 if (previous - start > 1) {
1371 KMP_SNPRINTF(scan, end - scan + 1,
"%u-%u", start, previous);
1374 KMP_SNPRINTF(scan, end - scan + 1,
"%u", start);
1375 KMP_ADVANCE_SCAN(scan);
1376 if (previous - start > 0) {
1377 KMP_SNPRINTF(scan, end - scan + 1,
",%u", previous);
1380 KMP_ADVANCE_SCAN(scan);
1383 if (start == mask->end())
1391 KMP_ASSERT(scan <= end);
1394#undef KMP_ADVANCE_SCAN
1400kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
1401 kmp_affin_mask_t *mask) {
1402 int start = 0, finish = 0, previous = 0;
1407 __kmp_str_buf_clear(buf);
1410 if (mask->begin() == mask->end()) {
1411 __kmp_str_buf_print(buf,
"%s",
"{<empty>}");
1416 start = mask->begin();
1420 for (finish = mask->next(start), previous = start;
1421 finish == previous + 1 && finish != mask->end();
1422 finish = mask->next(finish)) {
1429 __kmp_str_buf_print(buf,
"%s",
",");
1431 first_range =
false;
1434 if (previous - start > 1) {
1435 __kmp_str_buf_print(buf,
"%u-%u", start, previous);
1438 __kmp_str_buf_print(buf,
"%u", start);
1439 if (previous - start > 0) {
1440 __kmp_str_buf_print(buf,
",%u", previous);
1445 if (start == mask->end())
1453kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() {
1454 kmp_affin_mask_t *offline;
1455 KMP_CPU_ALLOC(offline);
1456 KMP_CPU_ZERO(offline);
1458 int n, begin_cpu, end_cpu;
1460 auto skip_ws = [](FILE *f) {
1464 }
while (isspace(c));
1470 int status = offline_file.
try_open(
"/sys/devices/system/cpu/offline",
"r");
1473 while (!feof(offline_file)) {
1474 skip_ws(offline_file);
1475 n = fscanf(offline_file,
"%d", &begin_cpu);
1478 skip_ws(offline_file);
1479 int c = fgetc(offline_file);
1480 if (c == EOF || c ==
',') {
1482 end_cpu = begin_cpu;
1483 }
else if (c ==
'-') {
1485 skip_ws(offline_file);
1486 n = fscanf(offline_file,
"%d", &end_cpu);
1489 skip_ws(offline_file);
1490 c = fgetc(offline_file);
1496 if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 ||
1497 end_cpu >= __kmp_xproc || begin_cpu > end_cpu) {
1501 for (
int cpu = begin_cpu; cpu <= end_cpu; ++cpu) {
1502 KMP_CPU_SET(cpu, offline);
1510int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
1514#if KMP_GROUP_AFFINITY
1516 if (__kmp_num_proc_groups > 1) {
1518 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
1519 for (group = 0; group < __kmp_num_proc_groups; group++) {
1521 int num = __kmp_GetActiveProcessorCount(group);
1522 for (i = 0; i < num; i++) {
1523 KMP_CPU_SET(i + group * (CHAR_BIT *
sizeof(DWORD_PTR)), mask);
1533 kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus();
1534 for (proc = 0; proc < __kmp_xproc; proc++) {
1536 if (KMP_CPU_ISSET(proc, offline_cpus))
1538 KMP_CPU_SET(proc, mask);
1541 KMP_CPU_FREE(offline_cpus);
1550kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
1552kmp_affin_mask_t *__kmp_affin_origMask = NULL;
1555static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
1556#if HWLOC_API_VERSION >= 0x00020000
1557 return hwloc_obj_type_is_cache(obj->type);
1559 return obj->type == HWLOC_OBJ_CACHE;
1564static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
1566 if (__kmp_hwloc_is_cache_type(obj)) {
1567 if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
1568 return KMP_HW_UNKNOWN;
1569 switch (obj->attr->cache.depth) {
1573#if KMP_MIC_SUPPORTED
1574 if (__kmp_mic_type == mic3) {
1582 return KMP_HW_UNKNOWN;
1585 switch (obj->type) {
1586 case HWLOC_OBJ_PACKAGE:
1587 return KMP_HW_SOCKET;
1588 case HWLOC_OBJ_NUMANODE:
1590 case HWLOC_OBJ_CORE:
1593 return KMP_HW_THREAD;
1594 case HWLOC_OBJ_GROUP:
1595 if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE)
1597 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE)
1599 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE)
1600 return KMP_HW_MODULE;
1601 else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP)
1602 return KMP_HW_PROC_GROUP;
1603 return KMP_HW_UNKNOWN;
1604#if HWLOC_API_VERSION >= 0x00020100
1609 return KMP_HW_UNKNOWN;
1616static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
1617 hwloc_obj_type_t type) {
1620 for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
1621 obj->logical_index, type, 0);
1622 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology,
1623 obj->type, first) == obj;
1624 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
1633static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
1634 hwloc_obj_t lower) {
1636 hwloc_obj_type_t ltype = lower->type;
1637 int lindex = lower->logical_index - 1;
1640 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1641 while (obj && lindex >= 0 &&
1642 hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
1643 if (obj->userdata) {
1644 sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
1649 obj = hwloc_get_obj_by_type(t, ltype, lindex);
1652 lower->userdata = RCAST(
void *, sub_id + 1);
1656static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *
const msg_id) {
1658 int hw_thread_index, sub_id;
1660 hwloc_obj_t pu, obj, root, prev;
1661 kmp_hw_t types[KMP_HW_LAST];
1662 hwloc_obj_type_t hwloc_types[KMP_HW_LAST];
1664 hwloc_topology_t tp = __kmp_hwloc_topology;
1665 *msg_id = kmp_i18n_null;
1666 if (__kmp_affinity_verbose) {
1667 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
1670 if (!KMP_AFFINITY_CAPABLE()) {
1673 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1675 hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
1677 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
1680 o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
1682 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
1684 __kmp_nThreadsPerCore = 1;
1685 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1686 if (nCoresPerPkg == 0)
1688 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1693 int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0);
1695 typedef struct kmp_hwloc_cpukinds_info_t {
1697 kmp_hw_core_type_t core_type;
1698 hwloc_bitmap_t mask;
1699 } kmp_hwloc_cpukinds_info_t;
1700 kmp_hwloc_cpukinds_info_t *cpukinds =
nullptr;
1702 if (nr_cpu_kinds > 0) {
1704 struct hwloc_info_s *infos;
1705 cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate(
1706 sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds);
1707 for (
unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) {
1708 cpukinds[idx].efficiency = -1;
1709 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN;
1710 cpukinds[idx].mask = hwloc_bitmap_alloc();
1711 if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask,
1712 &cpukinds[idx].efficiency, &nr_infos, &infos,
1714 for (
unsigned i = 0; i < nr_infos; ++i) {
1715 if (__kmp_str_match(
"CoreType", 8, infos[i].name)) {
1716#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1717 if (__kmp_str_match(
"IntelAtom", 9, infos[i].value)) {
1718 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM;
1720 }
else if (__kmp_str_match(
"IntelCore", 9, infos[i].value)) {
1721 cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE;
1731 root = hwloc_get_root_obj(tp);
1735 pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
1738 types[depth] = KMP_HW_THREAD;
1739 hwloc_types[depth] = obj->type;
1741 while (obj != root && obj != NULL) {
1743#if HWLOC_API_VERSION >= 0x00020000
1744 if (obj->memory_arity) {
1746 for (memory = obj->memory_first_child; memory;
1747 memory = hwloc_get_next_child(tp, obj, memory)) {
1748 if (memory->type == HWLOC_OBJ_NUMANODE)
1751 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1752 types[depth] = KMP_HW_NUMA;
1753 hwloc_types[depth] = memory->type;
1758 type = __kmp_hwloc_type_2_topology_type(obj);
1759 if (type != KMP_HW_UNKNOWN) {
1760 types[depth] = type;
1761 hwloc_types[depth] = obj->type;
1765 KMP_ASSERT(depth > 0);
1768 for (
int i = 0, j = depth - 1; i < j; ++i, --j) {
1769 hwloc_obj_type_t hwloc_temp = hwloc_types[i];
1770 kmp_hw_t temp = types[i];
1771 types[i] = types[j];
1773 hwloc_types[i] = hwloc_types[j];
1774 hwloc_types[j] = hwloc_temp;
1778 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1780 hw_thread_index = 0;
1782 while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) {
1783 int index = depth - 1;
1784 bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
1785 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
1788 hw_thread.ids[index] = pu->logical_index;
1789 hw_thread.os_id = pu->os_index;
1792 int cpukind_index = -1;
1793 for (
int i = 0; i < nr_cpu_kinds; ++i) {
1794 if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) {
1799 if (cpukind_index >= 0) {
1800 hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type);
1801 hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency);
1808 while (obj != root && obj != NULL) {
1810#if HWLOC_API_VERSION >= 0x00020000
1814 if (obj->memory_arity) {
1816 for (memory = obj->memory_first_child; memory;
1817 memory = hwloc_get_next_child(tp, obj, memory)) {
1818 if (memory->type == HWLOC_OBJ_NUMANODE)
1821 if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
1822 sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
1824 hw_thread.ids[index] = memory->logical_index;
1825 hw_thread.ids[index + 1] = sub_id;
1833 type = __kmp_hwloc_type_2_topology_type(obj);
1834 if (type != KMP_HW_UNKNOWN) {
1835 sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
1837 hw_thread.ids[index] = obj->logical_index;
1838 hw_thread.ids[index + 1] = sub_id;
1850 for (
int idx = 0; idx < nr_cpu_kinds; ++idx)
1851 hwloc_bitmap_free(cpukinds[idx].mask);
1852 __kmp_free(cpukinds);
1854 __kmp_topology->sort_ids();
1862static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *
const msg_id) {
1863 *msg_id = kmp_i18n_null;
1865 kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD};
1867 if (__kmp_affinity_verbose) {
1868 KMP_INFORM(UsingFlatOS,
"KMP_AFFINITY");
1874 if (!KMP_AFFINITY_CAPABLE()) {
1875 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1876 __kmp_ncores = nPackages = __kmp_xproc;
1877 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1885 __kmp_ncores = nPackages = __kmp_avail_proc;
1886 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1889 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1892 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1894 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1897 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct);
1899 hw_thread.os_id = i;
1900 hw_thread.ids[0] = i;
1901 hw_thread.ids[1] = 0;
1902 hw_thread.ids[2] = 0;
1905 if (__kmp_affinity_verbose) {
1906 KMP_INFORM(OSProcToPackage,
"KMP_AFFINITY");
1911#if KMP_GROUP_AFFINITY
1916static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *
const msg_id) {
1917 *msg_id = kmp_i18n_null;
1919 kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD};
1920 const static size_t BITS_PER_GROUP = CHAR_BIT *
sizeof(DWORD_PTR);
1922 if (__kmp_affinity_verbose) {
1923 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
1927 if (!KMP_AFFINITY_CAPABLE()) {
1928 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1929 nPackages = __kmp_num_proc_groups;
1930 __kmp_nThreadsPerCore = 1;
1931 __kmp_ncores = __kmp_xproc;
1932 nCoresPerPkg = nPackages / __kmp_ncores;
1937 __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types);
1940 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1942 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1945 kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++);
1947 hw_thread.os_id = i;
1948 hw_thread.ids[0] = i / BITS_PER_GROUP;
1949 hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP;
1955#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1957template <kmp_u
int32 LSB, kmp_u
int32 MSB>
1958static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1959 const kmp_uint32 SHIFT_LEFT =
sizeof(kmp_uint32) * 8 - 1 - MSB;
1960 const kmp_uint32 SHIFT_RIGHT = LSB;
1961 kmp_uint32 retval = v;
1962 retval <<= SHIFT_LEFT;
1963 retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1967static int __kmp_cpuid_mask_width(
int count) {
1970 while ((1 << r) < count)
1975class apicThreadInfo {
1979 unsigned maxCoresPerPkg;
1980 unsigned maxThreadsPerPkg;
1986static int __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *a,
1988 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
1989 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
1990 if (aa->pkgId < bb->pkgId)
1992 if (aa->pkgId > bb->pkgId)
1994 if (aa->coreId < bb->coreId)
1996 if (aa->coreId > bb->coreId)
1998 if (aa->threadId < bb->threadId)
2000 if (aa->threadId > bb->threadId)
2005class kmp_cache_info_t {
2008 unsigned level, mask;
2010 kmp_cache_info_t() : depth(0) { get_leaf4_levels(); }
2011 size_t get_depth()
const {
return depth; }
2012 info_t &operator[](
size_t index) {
return table[index]; }
2013 const info_t &operator[](
size_t index)
const {
return table[index]; }
2015 static kmp_hw_t get_topology_type(
unsigned level) {
2016 KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL);
2025 return KMP_HW_UNKNOWN;
2029 static const int MAX_CACHE_LEVEL = 3;
2032 info_t table[MAX_CACHE_LEVEL];
2034 void get_leaf4_levels() {
2036 while (depth < MAX_CACHE_LEVEL) {
2037 unsigned cache_type, max_threads_sharing;
2038 unsigned cache_level, cache_mask_width;
2040 __kmp_x86_cpuid(4, level, &buf2);
2041 cache_type = __kmp_extract_bits<0, 4>(buf2.eax);
2045 if (cache_type == 2) {
2049 max_threads_sharing = __kmp_extract_bits<14, 25>(buf2.eax) + 1;
2050 cache_mask_width = __kmp_cpuid_mask_width(max_threads_sharing);
2051 cache_level = __kmp_extract_bits<5, 7>(buf2.eax);
2052 table[depth].level = cache_level;
2053 table[depth].mask = ((-1) << cache_mask_width);
2064static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *
const msg_id) {
2066 *msg_id = kmp_i18n_null;
2068 if (__kmp_affinity_verbose) {
2069 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
2073 __kmp_x86_cpuid(0, 0, &buf);
2075 *msg_id = kmp_i18n_str_NoLeaf4Support;
2084 if (!KMP_AFFINITY_CAPABLE()) {
2087 KMP_ASSERT(__kmp_affinity_type == affinity_none);
2093 __kmp_x86_cpuid(1, 0, &buf);
2094 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2095 if (maxThreadsPerPkg == 0) {
2096 maxThreadsPerPkg = 1;
2110 __kmp_x86_cpuid(0, 0, &buf);
2112 __kmp_x86_cpuid(4, 0, &buf);
2113 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2131 __kmp_ncores = __kmp_xproc;
2132 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2133 __kmp_nThreadsPerCore = 1;
2142 kmp_affinity_raii_t previous_affinity;
2170 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
2171 __kmp_avail_proc *
sizeof(apicThreadInfo));
2172 unsigned nApics = 0;
2173 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
2175 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
2178 KMP_DEBUG_ASSERT((
int)nApics < __kmp_avail_proc);
2180 __kmp_affinity_dispatch->bind_thread(i);
2181 threadInfo[nApics].osId = i;
2184 __kmp_x86_cpuid(1, 0, &buf);
2185 if (((buf.edx >> 9) & 1) == 0) {
2186 __kmp_free(threadInfo);
2187 *msg_id = kmp_i18n_str_ApicNotPresent;
2190 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
2191 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
2192 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
2193 threadInfo[nApics].maxThreadsPerPkg = 1;
2202 __kmp_x86_cpuid(0, 0, &buf);
2204 __kmp_x86_cpuid(4, 0, &buf);
2205 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
2207 threadInfo[nApics].maxCoresPerPkg = 1;
2211 int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
2212 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
2214 int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
2215 int widthT = widthCT - widthC;
2220 __kmp_free(threadInfo);
2221 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2225 int maskC = (1 << widthC) - 1;
2226 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
2228 int maskT = (1 << widthT) - 1;
2229 threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
2236 previous_affinity.restore();
2239 qsort(threadInfo, nApics,
sizeof(*threadInfo),
2240 __kmp_affinity_cmp_apicThreadInfo_phys_id);
2257 __kmp_nThreadsPerCore = 1;
2258 unsigned nCores = 1;
2261 unsigned lastPkgId = threadInfo[0].pkgId;
2262 unsigned coreCt = 1;
2263 unsigned lastCoreId = threadInfo[0].coreId;
2264 unsigned threadCt = 1;
2265 unsigned lastThreadId = threadInfo[0].threadId;
2268 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
2269 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
2271 for (i = 1; i < nApics; i++) {
2272 if (threadInfo[i].pkgId != lastPkgId) {
2275 lastPkgId = threadInfo[i].pkgId;
2276 if ((
int)coreCt > nCoresPerPkg)
2277 nCoresPerPkg = coreCt;
2279 lastCoreId = threadInfo[i].coreId;
2280 if ((
int)threadCt > __kmp_nThreadsPerCore)
2281 __kmp_nThreadsPerCore = threadCt;
2283 lastThreadId = threadInfo[i].threadId;
2287 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
2288 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
2292 if (threadInfo[i].coreId != lastCoreId) {
2295 lastCoreId = threadInfo[i].coreId;
2296 if ((
int)threadCt > __kmp_nThreadsPerCore)
2297 __kmp_nThreadsPerCore = threadCt;
2299 lastThreadId = threadInfo[i].threadId;
2300 }
else if (threadInfo[i].threadId != lastThreadId) {
2302 lastThreadId = threadInfo[i].threadId;
2304 __kmp_free(threadInfo);
2305 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2311 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
2312 (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
2313 __kmp_free(threadInfo);
2314 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
2322 if ((
int)coreCt > nCoresPerPkg)
2323 nCoresPerPkg = coreCt;
2324 if ((
int)threadCt > __kmp_nThreadsPerCore)
2325 __kmp_nThreadsPerCore = threadCt;
2326 __kmp_ncores = nCores;
2327 KMP_DEBUG_ASSERT(nApics == (
unsigned)__kmp_avail_proc);
2335 int threadLevel = 2;
2337 int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
2340 types[idx++] = KMP_HW_SOCKET;
2342 types[idx++] = KMP_HW_CORE;
2343 if (threadLevel >= 0)
2344 types[idx++] = KMP_HW_THREAD;
2346 KMP_ASSERT(depth > 0);
2347 __kmp_topology = kmp_topology_t::allocate(nApics, depth, types);
2349 for (i = 0; i < nApics; ++i) {
2351 unsigned os = threadInfo[i].osId;
2352 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
2355 if (pkgLevel >= 0) {
2356 hw_thread.ids[idx++] = threadInfo[i].pkgId;
2358 if (coreLevel >= 0) {
2359 hw_thread.ids[idx++] = threadInfo[i].coreId;
2361 if (threadLevel >= 0) {
2362 hw_thread.ids[idx++] = threadInfo[i].threadId;
2364 hw_thread.os_id = os;
2367 __kmp_free(threadInfo);
2368 __kmp_topology->sort_ids();
2369 if (!__kmp_topology->check_ids()) {
2370 kmp_topology_t::deallocate(__kmp_topology);
2371 __kmp_topology =
nullptr;
2372 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
2380static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type,
int *efficiency,
2381 unsigned *native_model_id) {
2383 __kmp_x86_cpuid(0x1a, 0, &buf);
2384 *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(buf.eax);
2386 case KMP_HW_CORE_TYPE_ATOM:
2389 case KMP_HW_CORE_TYPE_CORE:
2395 *native_model_id = __kmp_extract_bits<0, 23>(buf.eax);
2417 INTEL_LEVEL_TYPE_INVALID = 0,
2418 INTEL_LEVEL_TYPE_SMT = 1,
2419 INTEL_LEVEL_TYPE_CORE = 2,
2420 INTEL_LEVEL_TYPE_TILE = 3,
2421 INTEL_LEVEL_TYPE_MODULE = 4,
2422 INTEL_LEVEL_TYPE_DIE = 5,
2423 INTEL_LEVEL_TYPE_LAST = 6,
2426struct cpuid_level_info_t {
2427 unsigned level_type, mask, mask_width, nitems, cache_mask;
2430static kmp_hw_t __kmp_intel_type_2_topology_type(
int intel_type) {
2431 switch (intel_type) {
2432 case INTEL_LEVEL_TYPE_INVALID:
2433 return KMP_HW_SOCKET;
2434 case INTEL_LEVEL_TYPE_SMT:
2435 return KMP_HW_THREAD;
2436 case INTEL_LEVEL_TYPE_CORE:
2438 case INTEL_LEVEL_TYPE_TILE:
2440 case INTEL_LEVEL_TYPE_MODULE:
2441 return KMP_HW_MODULE;
2442 case INTEL_LEVEL_TYPE_DIE:
2445 return KMP_HW_UNKNOWN;
2452__kmp_x2apicid_get_levels(
int leaf,
2453 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
2454 kmp_uint64 known_levels) {
2455 unsigned level, levels_index;
2456 unsigned level_type, mask_width, nitems;
2466 level = levels_index = 0;
2468 __kmp_x86_cpuid(leaf, level, &buf);
2469 level_type = __kmp_extract_bits<8, 15>(buf.ecx);
2470 mask_width = __kmp_extract_bits<0, 4>(buf.eax);
2471 nitems = __kmp_extract_bits<0, 15>(buf.ebx);
2472 if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
2475 if (known_levels & (1ull << level_type)) {
2477 KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
2478 levels[levels_index].level_type = level_type;
2479 levels[levels_index].mask_width = mask_width;
2480 levels[levels_index].nitems = nitems;
2484 if (levels_index > 0) {
2485 levels[levels_index - 1].mask_width = mask_width;
2486 levels[levels_index - 1].nitems = nitems;
2490 }
while (level_type != INTEL_LEVEL_TYPE_INVALID);
2493 for (
unsigned i = 0; i < levels_index; ++i) {
2494 if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
2495 levels[i].mask = ~((-1) << levels[i].mask_width);
2496 levels[i].cache_mask = (-1) << levels[i].mask_width;
2497 for (
unsigned j = 0; j < i; ++j)
2498 levels[i].mask ^= levels[j].mask;
2500 KMP_DEBUG_ASSERT(levels_index > 0);
2501 levels[i].mask = (-1) << levels[i - 1].mask_width;
2502 levels[i].cache_mask = 0;
2505 return levels_index;
2508static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *
const msg_id) {
2510 cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
2511 kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
2512 unsigned levels_index;
2514 kmp_uint64 known_levels;
2515 int topology_leaf, highest_leaf, apic_id;
2517 static int leaves[] = {0, 0};
2519 kmp_i18n_id_t leaf_message_id;
2521 KMP_BUILD_ASSERT(
sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
2523 *msg_id = kmp_i18n_null;
2524 if (__kmp_affinity_verbose) {
2525 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
2529 known_levels = 0ull;
2530 for (
int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
2531 if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
2532 known_levels |= (1ull << i);
2537 __kmp_x86_cpuid(0, 0, &buf);
2538 highest_leaf = buf.eax;
2543 if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
2546 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2547 }
else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
2550 leaf_message_id = kmp_i18n_str_NoLeaf31Support;
2555 leaf_message_id = kmp_i18n_str_NoLeaf11Support;
2559 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2561 for (
int i = 0; i < num_leaves; ++i) {
2562 int leaf = leaves[i];
2563 if (highest_leaf < leaf)
2565 __kmp_x86_cpuid(leaf, 0, &buf);
2568 topology_leaf = leaf;
2569 levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
2570 if (levels_index == 0)
2574 if (topology_leaf == -1 || levels_index == 0) {
2575 *msg_id = leaf_message_id;
2578 KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
2585 if (!KMP_AFFINITY_CAPABLE()) {
2588 KMP_ASSERT(__kmp_affinity_type == affinity_none);
2589 for (
unsigned i = 0; i < levels_index; ++i) {
2590 if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
2591 __kmp_nThreadsPerCore = levels[i].nitems;
2592 }
else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
2593 nCoresPerPkg = levels[i].nitems;
2596 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
2597 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
2602 int depth = levels_index;
2603 for (
int i = depth - 1, j = 0; i >= 0; --i, ++j)
2604 types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
2606 kmp_topology_t::allocate(__kmp_avail_proc, levels_index, types);
2609 kmp_cache_info_t cache_info;
2610 for (
size_t i = 0; i < cache_info.get_depth(); ++i) {
2611 const kmp_cache_info_t::info_t &info = cache_info[i];
2612 unsigned cache_mask = info.mask;
2613 unsigned cache_level = info.level;
2614 for (
unsigned j = 0; j < levels_index; ++j) {
2615 unsigned hw_cache_mask = levels[j].cache_mask;
2616 kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(cache_level);
2617 if (hw_cache_mask == cache_mask && j < levels_index - 1) {
2619 __kmp_intel_type_2_topology_type(levels[j + 1].level_type);
2620 __kmp_topology->set_equivalent_type(cache_type, type);
2630 kmp_affinity_raii_t previous_affinity;
2635 int hw_thread_index = 0;
2636 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
2637 cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
2638 unsigned my_levels_index;
2641 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
2644 KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc);
2646 __kmp_affinity_dispatch->bind_thread(proc);
2649 __kmp_x86_cpuid(topology_leaf, 0, &buf);
2651 kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index);
2653 __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
2654 if (my_levels_index == 0 || my_levels_index != levels_index) {
2655 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
2659 hw_thread.os_id = proc;
2661 for (
unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
2662 hw_thread.ids[idx] = apic_id & my_levels[j].mask;
2664 hw_thread.ids[idx] >>= my_levels[j - 1].mask_width;
2668 if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) {
2669 kmp_hw_core_type_t type;
2670 unsigned native_model_id;
2672 __kmp_get_hybrid_info(&type, &efficiency, &native_model_id);
2673 hw_thread.attrs.set_core_type(type);
2674 hw_thread.attrs.set_core_eff(efficiency);
2678 KMP_ASSERT(hw_thread_index > 0);
2679 __kmp_topology->sort_ids();
2680 if (!__kmp_topology->check_ids()) {
2681 kmp_topology_t::deallocate(__kmp_topology);
2682 __kmp_topology =
nullptr;
2683 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
2691#define threadIdIndex 1
2692#define coreIdIndex 2
2694#define nodeIdIndex 4
2696typedef unsigned *ProcCpuInfo;
2697static unsigned maxIndex = pkgIdIndex;
2699static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *a,
2702 const unsigned *aa = *(
unsigned *
const *)a;
2703 const unsigned *bb = *(
unsigned *
const *)b;
2704 for (i = maxIndex;; i--) {
2715#if KMP_USE_HIER_SCHED
2717static void __kmp_dispatch_set_hierarchy_values() {
2723 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2724 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2725 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2726#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2728 if (__kmp_mic_type >= mic3)
2729 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2732 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2733 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2734 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2735 __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2738 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2739 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2740 __kmp_nThreadsPerCore;
2741#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2743 if (__kmp_mic_type >= mic3)
2744 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2745 2 * __kmp_nThreadsPerCore;
2748 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2749 __kmp_nThreadsPerCore;
2750 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2751 nCoresPerPkg * __kmp_nThreadsPerCore;
2752 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2753 nCoresPerPkg * __kmp_nThreadsPerCore;
2754 __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2755 nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2760int __kmp_dispatch_get_index(
int tid, kmp_hier_layer_e type) {
2761 int index = type + 1;
2762 int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2763 KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2764 if (type == kmp_hier_layer_e::LAYER_THREAD)
2766 else if (type == kmp_hier_layer_e::LAYER_LOOP)
2768 KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2769 if (tid >= num_hw_threads)
2770 tid = tid % num_hw_threads;
2771 return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2775int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2778 KMP_DEBUG_ASSERT(i1 <= i2);
2779 KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2780 KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2781 KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2783 return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2787static inline const char *__kmp_cpuinfo_get_filename() {
2788 const char *filename;
2789 if (__kmp_cpuinfo_file !=
nullptr)
2790 filename = __kmp_cpuinfo_file;
2792 filename =
"/proc/cpuinfo";
2796static inline const char *__kmp_cpuinfo_get_envvar() {
2797 const char *envvar =
nullptr;
2798 if (__kmp_cpuinfo_file !=
nullptr)
2799 envvar =
"KMP_CPUINFO_FILE";
2805static bool __kmp_affinity_create_cpuinfo_map(
int *line,
2806 kmp_i18n_id_t *
const msg_id) {
2807 const char *filename = __kmp_cpuinfo_get_filename();
2808 const char *envvar = __kmp_cpuinfo_get_envvar();
2809 *msg_id = kmp_i18n_null;
2811 if (__kmp_affinity_verbose) {
2812 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
2820 unsigned num_records = 0;
2822 buf[
sizeof(buf) - 1] = 1;
2823 if (!fgets(buf,
sizeof(buf), f)) {
2828 char s1[] =
"processor";
2829 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2836 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
2838 if (level > (
unsigned)__kmp_xproc) {
2839 level = __kmp_xproc;
2841 if (nodeIdIndex + level >= maxIndex) {
2842 maxIndex = nodeIdIndex + level;
2850 if (num_records == 0) {
2851 *msg_id = kmp_i18n_str_NoProcRecords;
2854 if (num_records > (
unsigned)__kmp_xproc) {
2855 *msg_id = kmp_i18n_str_TooManyProcRecords;
2864 if (fseek(f, 0, SEEK_SET) != 0) {
2865 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2871 unsigned **threadInfo =
2872 (
unsigned **)__kmp_allocate((num_records + 1) *
sizeof(
unsigned *));
2874 for (i = 0; i <= num_records; i++) {
2876 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
2879#define CLEANUP_THREAD_INFO \
2880 for (i = 0; i <= num_records; i++) { \
2881 __kmp_free(threadInfo[i]); \
2883 __kmp_free(threadInfo);
2888#define INIT_PROC_INFO(p) \
2889 for (__index = 0; __index <= maxIndex; __index++) { \
2890 (p)[__index] = UINT_MAX; \
2893 for (i = 0; i <= num_records; i++) {
2894 INIT_PROC_INFO(threadInfo[i]);
2897 unsigned num_avail = 0;
2904 buf[
sizeof(buf) - 1] = 1;
2905 bool long_line =
false;
2906 if (!fgets(buf,
sizeof(buf), f)) {
2911 for (i = 0; i <= maxIndex; i++) {
2912 if (threadInfo[num_avail][i] != UINT_MAX) {
2920 }
else if (!buf[
sizeof(buf) - 1]) {
2927 CLEANUP_THREAD_INFO; \
2928 *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2934 char s1[] =
"processor";
2935 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2937 char *p = strchr(buf +
sizeof(s1) - 1,
':');
2939 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2941 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2951 threadInfo[num_avail][osIdIndex] = val;
2952#if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2956 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2957 threadInfo[num_avail][osIdIndex]);
2958 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][pkgIdIndex]);
2960 KMP_SNPRINTF(path,
sizeof(path),
2961 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2962 threadInfo[num_avail][osIdIndex]);
2963 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][coreIdIndex]);
2967 char s2[] =
"physical id";
2968 if (strncmp(buf, s2,
sizeof(s2) - 1) == 0) {
2970 char *p = strchr(buf +
sizeof(s2) - 1,
':');
2972 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2974 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2976 threadInfo[num_avail][pkgIdIndex] = val;
2979 char s3[] =
"core id";
2980 if (strncmp(buf, s3,
sizeof(s3) - 1) == 0) {
2982 char *p = strchr(buf +
sizeof(s3) - 1,
':');
2984 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2986 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2988 threadInfo[num_avail][coreIdIndex] = val;
2992 char s4[] =
"thread id";
2993 if (strncmp(buf, s4,
sizeof(s4) - 1) == 0) {
2995 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2997 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
2999 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
3001 threadInfo[num_avail][threadIdIndex] = val;
3005 if (KMP_SSCANF(buf,
"node_%u id", &level) == 1) {
3007 char *p = strchr(buf +
sizeof(s4) - 1,
':');
3009 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
3012 if (level > (
unsigned)__kmp_xproc) {
3013 level = __kmp_xproc;
3015 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
3017 threadInfo[num_avail][nodeIdIndex + level] = val;
3024 if ((*buf != 0) && (*buf !=
'\n')) {
3029 while (((ch = fgetc(f)) != EOF) && (ch !=
'\n'))
3037 if ((
int)num_avail == __kmp_xproc) {
3038 CLEANUP_THREAD_INFO;
3039 *msg_id = kmp_i18n_str_TooManyEntries;
3045 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
3046 CLEANUP_THREAD_INFO;
3047 *msg_id = kmp_i18n_str_MissingProcField;
3050 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
3051 CLEANUP_THREAD_INFO;
3052 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
3057 if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
3058 __kmp_affin_fullMask)) {
3059 INIT_PROC_INFO(threadInfo[num_avail]);
3066 KMP_ASSERT(num_avail <= num_records);
3067 INIT_PROC_INFO(threadInfo[num_avail]);
3072 CLEANUP_THREAD_INFO;
3073 *msg_id = kmp_i18n_str_MissingValCpuinfo;
3077 CLEANUP_THREAD_INFO;
3078 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
3083#if KMP_MIC && REDUCE_TEAM_SIZE
3084 unsigned teamSize = 0;
3092 KMP_ASSERT(num_avail > 0);
3093 KMP_ASSERT(num_avail <= num_records);
3096 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
3097 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
3109 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3111 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3113 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3115 (
unsigned *)__kmp_allocate((maxIndex + 1) *
sizeof(unsigned));
3117 bool assign_thread_ids =
false;
3118 unsigned threadIdCt;
3125 if (assign_thread_ids) {
3126 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
3127 threadInfo[0][threadIdIndex] = threadIdCt++;
3128 }
else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
3129 threadIdCt = threadInfo[0][threadIdIndex] + 1;
3132 for (index = 0; index <= maxIndex; index++) {
3136 lastId[index] = threadInfo[0][index];
3141 for (i = 1; i < num_avail; i++) {
3144 for (index = maxIndex; index >= threadIdIndex; index--) {
3145 if (assign_thread_ids && (index == threadIdIndex)) {
3147 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3148 threadInfo[i][threadIdIndex] = threadIdCt++;
3152 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3153 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3156 if (threadInfo[i][index] != lastId[index]) {
3161 for (index2 = threadIdIndex; index2 < index; index2++) {
3163 if (counts[index2] > maxCt[index2]) {
3164 maxCt[index2] = counts[index2];
3167 lastId[index2] = threadInfo[i][index2];
3171 lastId[index] = threadInfo[i][index];
3173 if (assign_thread_ids && (index > threadIdIndex)) {
3175#if KMP_MIC && REDUCE_TEAM_SIZE
3178 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3185 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
3186 threadInfo[i][threadIdIndex] = threadIdCt++;
3192 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
3193 threadIdCt = threadInfo[i][threadIdIndex] + 1;
3199 if (index < threadIdIndex) {
3203 if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
3208 CLEANUP_THREAD_INFO;
3209 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3215 assign_thread_ids =
true;
3216 goto restart_radix_check;
3220#if KMP_MIC && REDUCE_TEAM_SIZE
3223 teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
3226 for (index = threadIdIndex; index <= maxIndex; index++) {
3227 if (counts[index] > maxCt[index]) {
3228 maxCt[index] = counts[index];
3232 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
3233 nCoresPerPkg = maxCt[coreIdIndex];
3234 nPackages = totals[pkgIdIndex];
3240 __kmp_ncores = totals[coreIdIndex];
3241 if (!KMP_AFFINITY_CAPABLE()) {
3242 KMP_ASSERT(__kmp_affinity_type == affinity_none);
3246#if KMP_MIC && REDUCE_TEAM_SIZE
3248 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
3249 __kmp_dflt_team_nth = teamSize;
3250 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting "
3251 "__kmp_dflt_team_nth = %d\n",
3252 __kmp_dflt_team_nth));
3256 KMP_DEBUG_ASSERT(num_avail == (
unsigned)__kmp_avail_proc);
3263 bool *inMap = (
bool *)__kmp_allocate((maxIndex + 1) *
sizeof(bool));
3264 for (index = threadIdIndex; index < maxIndex; index++) {
3265 KMP_ASSERT(totals[index] >= totals[index + 1]);
3266 inMap[index] = (totals[index] > totals[index + 1]);
3268 inMap[maxIndex] = (totals[maxIndex] > 1);
3269 inMap[pkgIdIndex] =
true;
3270 inMap[coreIdIndex] =
true;
3271 inMap[threadIdIndex] =
true;
3275 kmp_hw_t types[KMP_HW_LAST];
3278 int threadLevel = -1;
3279 for (index = threadIdIndex; index <= maxIndex; index++) {
3284 if (inMap[pkgIdIndex]) {
3286 types[idx++] = KMP_HW_SOCKET;
3288 if (inMap[coreIdIndex]) {
3290 types[idx++] = KMP_HW_CORE;
3292 if (inMap[threadIdIndex]) {
3294 types[idx++] = KMP_HW_THREAD;
3296 KMP_ASSERT(depth > 0);
3299 __kmp_topology = kmp_topology_t::allocate(num_avail, depth, types);
3301 for (i = 0; i < num_avail; ++i) {
3302 unsigned os = threadInfo[i][osIdIndex];
3305 kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3307 hw_thread.os_id = os;
3310 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
3311 if (!inMap[src_index]) {
3314 if (src_index == pkgIdIndex) {
3315 hw_thread.ids[pkgLevel] = threadInfo[i][src_index];
3316 }
else if (src_index == coreIdIndex) {
3317 hw_thread.ids[coreLevel] = threadInfo[i][src_index];
3318 }
else if (src_index == threadIdIndex) {
3319 hw_thread.ids[threadLevel] = threadInfo[i][src_index];
3330 CLEANUP_THREAD_INFO;
3331 __kmp_topology->sort_ids();
3332 if (!__kmp_topology->check_ids()) {
3333 kmp_topology_t::deallocate(__kmp_topology);
3334 __kmp_topology =
nullptr;
3335 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
3344static kmp_affin_mask_t *__kmp_create_masks(
unsigned *maxIndex,
3345 unsigned *numUnique) {
3349 int numAddrs = __kmp_topology->get_num_hw_threads();
3350 int depth = __kmp_topology->get_depth();
3351 KMP_ASSERT(numAddrs);
3355 for (i = numAddrs - 1;; --i) {
3356 int osId = __kmp_topology->at(i).os_id;
3357 if (osId > maxOsId) {
3363 kmp_affin_mask_t *osId2Mask;
3364 KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
3365 KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
3366 if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
3367 KMP_INFORM(ThreadsMigrate,
"KMP_AFFINITY", __kmp_affinity_gran_levels);
3369 if (__kmp_affinity_gran_levels >= (
int)depth) {
3370 KMP_AFF_WARNING(AffThreadsMayMigrate);
3380 kmp_affin_mask_t *sum;
3381 KMP_CPU_ALLOC_ON_STACK(sum);
3383 KMP_CPU_SET(__kmp_topology->at(0).os_id, sum);
3384 for (i = 1; i < numAddrs; i++) {
3388 if (__kmp_topology->is_close(leader, i, __kmp_affinity_gran_levels)) {
3389 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3395 for (; j < i; j++) {
3396 int osId = __kmp_topology->at(j).os_id;
3397 KMP_DEBUG_ASSERT(osId <= maxOsId);
3398 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
3399 KMP_CPU_COPY(mask, sum);
3400 __kmp_topology->at(j).leader = (j == leader);
3407 KMP_CPU_SET(__kmp_topology->at(i).os_id, sum);
3412 for (; j < i; j++) {
3413 int osId = __kmp_topology->at(j).os_id;
3414 KMP_DEBUG_ASSERT(osId <= maxOsId);
3415 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
3416 KMP_CPU_COPY(mask, sum);
3417 __kmp_topology->at(j).leader = (j == leader);
3420 KMP_CPU_FREE_FROM_STACK(sum);
3422 *maxIndex = maxOsId;
3423 *numUnique = unique;
3430static kmp_affin_mask_t *newMasks;
3431static int numNewMasks;
3432static int nextNewMask;
3434#define ADD_MASK(_mask) \
3436 if (nextNewMask >= numNewMasks) { \
3439 kmp_affin_mask_t *temp; \
3440 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
3441 for (i = 0; i < numNewMasks / 2; i++) { \
3442 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
3443 kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
3444 KMP_CPU_COPY(dest, src); \
3446 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
3449 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
3453#define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
3455 if (((_osId) > _maxOsId) || \
3456 (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
3457 KMP_AFF_WARNING(AffIgnoreInvalidProcID, _osId); \
3459 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
3465static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
3466 unsigned int *out_numMasks,
3467 const char *proclist,
3468 kmp_affin_mask_t *osId2Mask,
3471 const char *scan = proclist;
3472 const char *next = proclist;
3477 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3479 kmp_affin_mask_t *sumMask;
3480 KMP_CPU_ALLOC(sumMask);
3484 int start, end, stride;
3488 if (*next ==
'\0') {
3500 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad proclist");
3502 num = __kmp_str_to_int(scan, *next);
3503 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
3506 if ((num > maxOsId) ||
3507 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3508 KMP_AFF_WARNING(AffIgnoreInvalidProcID, num);
3509 KMP_CPU_ZERO(sumMask);
3511 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3531 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3534 num = __kmp_str_to_int(scan, *next);
3535 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
3538 if ((num > maxOsId) ||
3539 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3540 KMP_AFF_WARNING(AffIgnoreInvalidProcID, num);
3542 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3559 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3561 start = __kmp_str_to_int(scan, *next);
3562 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
3567 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3581 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3583 end = __kmp_str_to_int(scan, *next);
3584 KMP_ASSERT2(end >= 0,
"bad explicit proc list");
3601 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
3603 stride = __kmp_str_to_int(scan, *next);
3604 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
3609 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
3611 KMP_ASSERT2(start <= end,
"bad explicit proc list");
3613 KMP_ASSERT2(start >= end,
"bad explicit proc list");
3615 KMP_ASSERT2((end - start) / stride <= 65536,
"bad explicit proc list");
3620 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3622 }
while (start <= end);
3625 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3627 }
while (start >= end);
3638 *out_numMasks = nextNewMask;
3639 if (nextNewMask == 0) {
3641 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3644 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3645 for (i = 0; i < nextNewMask; i++) {
3646 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3647 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3648 KMP_CPU_COPY(dest, src);
3650 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3651 KMP_CPU_FREE(sumMask);
3674static void __kmp_process_subplace_list(
const char **scan,
3675 kmp_affin_mask_t *osId2Mask,
3676 int maxOsId, kmp_affin_mask_t *tempMask,
3681 int start, count, stride, i;
3685 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3688 start = __kmp_str_to_int(*scan, *next);
3689 KMP_ASSERT(start >= 0);
3694 if (**scan ==
'}' || **scan ==
',') {
3695 if ((start > maxOsId) ||
3696 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3697 KMP_AFF_WARNING(AffIgnoreInvalidProcID, start);
3699 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3702 if (**scan ==
'}') {
3708 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3713 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3716 count = __kmp_str_to_int(*scan, *next);
3717 KMP_ASSERT(count >= 0);
3722 if (**scan ==
'}' || **scan ==
',') {
3723 for (i = 0; i < count; i++) {
3724 if ((start > maxOsId) ||
3725 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3726 KMP_AFF_WARNING(AffIgnoreInvalidProcID, start);
3729 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3734 if (**scan ==
'}') {
3740 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3747 if (**scan ==
'+') {
3751 if (**scan ==
'-') {
3759 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
"bad explicit places list");
3762 stride = __kmp_str_to_int(*scan, *next);
3763 KMP_ASSERT(stride >= 0);
3769 if (**scan ==
'}' || **scan ==
',') {
3770 for (i = 0; i < count; i++) {
3771 if ((start > maxOsId) ||
3772 (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3773 KMP_AFF_WARNING(AffIgnoreInvalidProcID, start);
3776 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3781 if (**scan ==
'}') {
3788 KMP_ASSERT2(0,
"bad explicit places list");
3792static void __kmp_process_place(
const char **scan, kmp_affin_mask_t *osId2Mask,
3793 int maxOsId, kmp_affin_mask_t *tempMask,
3799 if (**scan ==
'{') {
3801 __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3802 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
3804 }
else if (**scan ==
'!') {
3806 __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3807 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3808 }
else if ((**scan >=
'0') && (**scan <=
'9')) {
3811 int num = __kmp_str_to_int(*scan, *next);
3812 KMP_ASSERT(num >= 0);
3813 if ((num > maxOsId) ||
3814 (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3815 KMP_AFF_WARNING(AffIgnoreInvalidProcID, num);
3817 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3822 KMP_ASSERT2(0,
"bad explicit places list");
3827void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3828 unsigned int *out_numMasks,
3829 const char *placelist,
3830 kmp_affin_mask_t *osId2Mask,
3832 int i, j, count, stride, sign;
3833 const char *scan = placelist;
3834 const char *next = placelist;
3837 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3843 kmp_affin_mask_t *tempMask;
3844 kmp_affin_mask_t *previousMask;
3845 KMP_CPU_ALLOC(tempMask);
3846 KMP_CPU_ZERO(tempMask);
3847 KMP_CPU_ALLOC(previousMask);
3848 KMP_CPU_ZERO(previousMask);
3852 __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3856 if (*scan ==
'\0' || *scan ==
',') {
3860 KMP_CPU_ZERO(tempMask);
3862 if (*scan ==
'\0') {
3869 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3874 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
3877 count = __kmp_str_to_int(scan, *next);
3878 KMP_ASSERT(count >= 0);
3883 if (*scan ==
'\0' || *scan ==
',') {
3886 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3905 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
"bad explicit places list");
3908 stride = __kmp_str_to_int(scan, *next);
3909 KMP_DEBUG_ASSERT(stride >= 0);
3915 for (i = 0; i < count; i++) {
3920 KMP_CPU_COPY(previousMask, tempMask);
3921 ADD_MASK(previousMask);
3922 KMP_CPU_ZERO(tempMask);
3924 KMP_CPU_SET_ITERATE(j, previousMask) {
3925 if (!KMP_CPU_ISSET(j, previousMask)) {
3928 if ((j + stride > maxOsId) || (j + stride < 0) ||
3929 (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3930 (!KMP_CPU_ISSET(j + stride,
3931 KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3932 if (i < count - 1) {
3933 KMP_AFF_WARNING(AffIgnoreInvalidProcID, j + stride);
3937 KMP_CPU_SET(j + stride, tempMask);
3941 KMP_CPU_ZERO(tempMask);
3946 if (*scan ==
'\0') {
3954 KMP_ASSERT2(0,
"bad explicit places list");
3957 *out_numMasks = nextNewMask;
3958 if (nextNewMask == 0) {
3960 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3963 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3964 KMP_CPU_FREE(tempMask);
3965 KMP_CPU_FREE(previousMask);
3966 for (i = 0; i < nextNewMask; i++) {
3967 kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3968 kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3969 KMP_CPU_COPY(dest, src);
3971 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3979static int __kmp_affinity_find_core_level(
int nprocs,
int bottom_level) {
3982 for (
int i = 0; i < nprocs; i++) {
3983 const kmp_hw_thread_t &hw_thread = __kmp_topology->at(i);
3984 for (
int j = bottom_level; j > 0; j--) {
3985 if (hw_thread.ids[j] > 0) {
3986 if (core_level < (j - 1)) {
3996static int __kmp_affinity_compute_ncores(
int nprocs,
int bottom_level,
3998 return __kmp_topology->get_count(core_level);
4001static int __kmp_affinity_find_core(
int proc,
int bottom_level,
4004 KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads());
4005 for (
int i = 0; i <= proc; ++i) {
4006 if (i + 1 <= proc) {
4007 for (
int j = 0; j <= core_level; ++j) {
4008 if (__kmp_topology->at(i + 1).sub_ids[j] !=
4009 __kmp_topology->at(i).sub_ids[j]) {
4021static int __kmp_affinity_max_proc_per_core(
int nprocs,
int bottom_level,
4023 if (core_level >= bottom_level)
4025 int thread_level = __kmp_topology->get_level(KMP_HW_THREAD);
4026 return __kmp_topology->calculate_ratio(thread_level, core_level);
4029static int *procarr = NULL;
4030static int __kmp_aff_depth = 0;
4034static void __kmp_create_affinity_none_places() {
4035 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4036 KMP_ASSERT(__kmp_affinity_type == affinity_none);
4037 __kmp_affinity_num_masks = 1;
4038 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4039 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
4040 KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4043static void __kmp_aux_affinity_initialize(
void) {
4044 if (__kmp_affinity_masks != NULL) {
4045 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4053 if (__kmp_affin_fullMask == NULL) {
4054 KMP_CPU_ALLOC(__kmp_affin_fullMask);
4056 if (__kmp_affin_origMask == NULL) {
4057 KMP_CPU_ALLOC(__kmp_affin_origMask);
4059 if (KMP_AFFINITY_CAPABLE()) {
4060 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4062 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4063 if (__kmp_affinity_respect_mask) {
4066 __kmp_avail_proc = 0;
4067 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4068 if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4073 if (__kmp_avail_proc > __kmp_xproc) {
4074 KMP_AFF_WARNING(ErrorInitializeAffinity);
4075 __kmp_affinity_type = affinity_none;
4076 KMP_AFFINITY_DISABLE();
4080 if (__kmp_affinity_verbose) {
4081 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4082 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4083 __kmp_affin_fullMask);
4084 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
4087 if (__kmp_affinity_verbose) {
4088 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4089 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4090 __kmp_affin_fullMask);
4091 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
4094 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4096 if (__kmp_num_proc_groups <= 1) {
4098 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4102 __kmp_affin_fullMask->set_process_affinity(
true);
4107 kmp_i18n_id_t msg_id = kmp_i18n_null;
4111 if ((__kmp_cpuinfo_file != NULL) &&
4112 (__kmp_affinity_top_method == affinity_top_method_all)) {
4113 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4116 bool success =
false;
4117 if (__kmp_affinity_top_method == affinity_top_method_all) {
4123 __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4124 if (!__kmp_hwloc_error) {
4125 success = __kmp_affinity_create_hwloc_map(&msg_id);
4126 if (!success && __kmp_affinity_verbose) {
4127 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
4129 }
else if (__kmp_affinity_verbose) {
4130 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
4135#if KMP_ARCH_X86 || KMP_ARCH_X86_64
4137 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4138 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4139 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4143 success = __kmp_affinity_create_apicid_map(&msg_id);
4144 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4145 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4153 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4154 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4155 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4160#if KMP_GROUP_AFFINITY
4161 if (!success && (__kmp_num_proc_groups > 1)) {
4162 success = __kmp_affinity_create_proc_group_map(&msg_id);
4163 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4164 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4170 success = __kmp_affinity_create_flat_map(&msg_id);
4171 if (!success && __kmp_affinity_verbose && msg_id != kmp_i18n_null) {
4172 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id));
4174 KMP_ASSERT(success);
4182 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4183 KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4184 success = __kmp_affinity_create_hwloc_map(&msg_id);
4186 KMP_ASSERT(msg_id != kmp_i18n_null);
4187 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4192#if KMP_ARCH_X86 || KMP_ARCH_X86_64
4193 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4194 __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4195 success = __kmp_affinity_create_x2apicid_map(&msg_id);
4197 KMP_ASSERT(msg_id != kmp_i18n_null);
4198 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4200 }
else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4201 success = __kmp_affinity_create_apicid_map(&msg_id);
4203 KMP_ASSERT(msg_id != kmp_i18n_null);
4204 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4209 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4211 success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id);
4213 KMP_ASSERT(msg_id != kmp_i18n_null);
4214 const char *filename = __kmp_cpuinfo_get_filename();
4216 KMP_FATAL(FileLineMsgExiting, filename, line,
4217 __kmp_i18n_catgets(msg_id));
4219 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4224#if KMP_GROUP_AFFINITY
4225 else if (__kmp_affinity_top_method == affinity_top_method_group) {
4226 success = __kmp_affinity_create_proc_group_map(&msg_id);
4227 KMP_ASSERT(success);
4229 KMP_ASSERT(msg_id != kmp_i18n_null);
4230 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4235 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4236 success = __kmp_affinity_create_flat_map(&msg_id);
4238 KMP_ASSERT(success);
4242 if (!__kmp_topology) {
4243 if (KMP_AFFINITY_CAPABLE()) {
4244 KMP_AFF_WARNING(ErrorInitializeAffinity);
4246 if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 &&
4248 __kmp_topology = kmp_topology_t::allocate(0, 0, NULL);
4249 __kmp_topology->canonicalize(nPackages, nCoresPerPkg,
4250 __kmp_nThreadsPerCore, __kmp_ncores);
4251 if (__kmp_affinity_verbose) {
4252 __kmp_topology->print(
"KMP_AFFINITY");
4255 __kmp_affinity_type = affinity_none;
4256 __kmp_create_affinity_none_places();
4257#if KMP_USE_HIER_SCHED
4258 __kmp_dispatch_set_hierarchy_values();
4260 KMP_AFFINITY_DISABLE();
4266 __kmp_topology->canonicalize();
4267 if (__kmp_affinity_verbose)
4268 __kmp_topology->print(
"KMP_AFFINITY");
4269 bool filtered = __kmp_topology->filter_hw_subset();
4273 if (__kmp_num_proc_groups <= 1)
4275 __kmp_affin_origMask->copy(__kmp_affin_fullMask);
4277 if (filtered && __kmp_affinity_verbose)
4278 __kmp_topology->print(
"KMP_HW_SUBSET");
4279 machine_hierarchy.init(__kmp_topology->get_num_hw_threads());
4280 KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads());
4284 if (__kmp_affinity_type == affinity_none) {
4285 __kmp_create_affinity_none_places();
4286#if KMP_USE_HIER_SCHED
4287 __kmp_dispatch_set_hierarchy_values();
4291 int depth = __kmp_topology->get_depth();
4296 kmp_affin_mask_t *osId2Mask = __kmp_create_masks(&maxIndex, &numUnique);
4297 if (__kmp_affinity_gran_levels == 0) {
4298 KMP_DEBUG_ASSERT((
int)numUnique == __kmp_avail_proc);
4301 switch (__kmp_affinity_type) {
4303 case affinity_explicit:
4304 KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
4305 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4306 __kmp_affinity_process_proclist(
4307 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4308 __kmp_affinity_proclist, osId2Mask, maxIndex);
4310 __kmp_affinity_process_placelist(
4311 &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4312 __kmp_affinity_proclist, osId2Mask, maxIndex);
4314 if (__kmp_affinity_num_masks == 0) {
4315 KMP_AFF_WARNING(AffNoValidProcID);
4316 __kmp_affinity_type = affinity_none;
4317 __kmp_create_affinity_none_places();
4326 case affinity_logical:
4327 __kmp_affinity_compact = 0;
4328 if (__kmp_affinity_offset) {
4329 __kmp_affinity_offset =
4330 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4334 case affinity_physical:
4335 if (__kmp_nThreadsPerCore > 1) {
4336 __kmp_affinity_compact = 1;
4337 if (__kmp_affinity_compact >= depth) {
4338 __kmp_affinity_compact = 0;
4341 __kmp_affinity_compact = 0;
4343 if (__kmp_affinity_offset) {
4344 __kmp_affinity_offset =
4345 __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4349 case affinity_scatter:
4350 if (__kmp_affinity_compact >= depth) {
4351 __kmp_affinity_compact = 0;
4353 __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
4357 case affinity_compact:
4358 if (__kmp_affinity_compact >= depth) {
4359 __kmp_affinity_compact = depth - 1;
4363 case affinity_balanced:
4365 KMP_AFF_WARNING(AffBalancedNotAvail,
"KMP_AFFINITY");
4366 __kmp_affinity_type = affinity_none;
4367 __kmp_create_affinity_none_places();
4369 }
else if (!__kmp_topology->is_uniform()) {
4371 __kmp_aff_depth = depth;
4374 __kmp_affinity_find_core_level(__kmp_avail_proc, depth - 1);
4375 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc, depth - 1,
4377 int maxprocpercore = __kmp_affinity_max_proc_per_core(
4378 __kmp_avail_proc, depth - 1, core_level);
4380 int nproc = ncores * maxprocpercore;
4381 if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4382 KMP_AFF_WARNING(AffBalancedNotAvail,
"KMP_AFFINITY");
4383 __kmp_affinity_type = affinity_none;
4387 procarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
4388 for (
int i = 0; i < nproc; i++) {
4394 for (
int i = 0; i < __kmp_avail_proc; i++) {
4395 int proc = __kmp_topology->at(i).os_id;
4396 int core = __kmp_affinity_find_core(i, depth - 1, core_level);
4398 if (core == lastcore) {
4405 procarr[core * maxprocpercore + inlastcore] = proc;
4408 if (__kmp_affinity_compact >= depth) {
4409 __kmp_affinity_compact = depth - 1;
4414 if (__kmp_affinity_dups) {
4415 __kmp_affinity_num_masks = __kmp_avail_proc;
4417 __kmp_affinity_num_masks = numUnique;
4420 if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4421 (__kmp_affinity_num_places > 0) &&
4422 ((
unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
4423 __kmp_affinity_num_masks = __kmp_affinity_num_places;
4426 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4430 __kmp_topology->sort_compact();
4434 int num_hw_threads = __kmp_topology->get_num_hw_threads();
4435 for (i = 0, j = 0; i < num_hw_threads; i++) {
4436 if ((!__kmp_affinity_dups) && (!__kmp_topology->at(i).leader)) {
4439 int osId = __kmp_topology->at(i).os_id;
4441 kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4442 kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4443 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4444 KMP_CPU_COPY(dest, src);
4445 if (++j >= __kmp_affinity_num_masks) {
4449 KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4452 __kmp_topology->sort_ids();
4456 KMP_ASSERT2(0,
"Unexpected affinity setting");
4459 KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
4462void __kmp_affinity_initialize(
void) {
4471 int disabled = (__kmp_affinity_type == affinity_disabled);
4472 if (!KMP_AFFINITY_CAPABLE()) {
4473 KMP_ASSERT(disabled);
4476 __kmp_affinity_type = affinity_none;
4478 __kmp_aux_affinity_initialize();
4480 __kmp_affinity_type = affinity_disabled;
4484void __kmp_affinity_uninitialize(
void) {
4485 if (__kmp_affinity_masks != NULL) {
4486 KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4487 __kmp_affinity_masks = NULL;
4489 if (__kmp_affin_fullMask != NULL) {
4490 KMP_CPU_FREE(__kmp_affin_fullMask);
4491 __kmp_affin_fullMask = NULL;
4493 if (__kmp_affin_origMask != NULL) {
4494 KMP_CPU_FREE(__kmp_affin_origMask);
4495 __kmp_affin_origMask = NULL;
4497 __kmp_affinity_num_masks = 0;
4498 __kmp_affinity_type = affinity_default;
4499 __kmp_affinity_num_places = 0;
4500 if (__kmp_affinity_proclist != NULL) {
4501 __kmp_free(__kmp_affinity_proclist);
4502 __kmp_affinity_proclist = NULL;
4504 if (procarr != NULL) {
4505 __kmp_free(procarr);
4509 if (__kmp_hwloc_topology != NULL) {
4510 hwloc_topology_destroy(__kmp_hwloc_topology);
4511 __kmp_hwloc_topology = NULL;
4514 if (__kmp_hw_subset) {
4515 kmp_hw_subset_t::deallocate(__kmp_hw_subset);
4516 __kmp_hw_subset =
nullptr;
4518 if (__kmp_topology) {
4519 kmp_topology_t::deallocate(__kmp_topology);
4520 __kmp_topology =
nullptr;
4522 KMPAffinity::destroy_api();
4525void __kmp_affinity_set_init_mask(
int gtid,
int isa_root) {
4526 if (!KMP_AFFINITY_CAPABLE()) {
4530 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4531 if (th->th.th_affin_mask == NULL) {
4532 KMP_CPU_ALLOC(th->th.th_affin_mask);
4534 KMP_CPU_ZERO(th->th.th_affin_mask);
4541 kmp_affin_mask_t *mask;
4544 if (KMP_AFFINITY_NON_PROC_BIND) {
4545 if ((__kmp_affinity_type == affinity_none) ||
4546 (__kmp_affinity_type == affinity_balanced) ||
4547 KMP_HIDDEN_HELPER_THREAD(gtid)) {
4548#if KMP_GROUP_AFFINITY
4549 if (__kmp_num_proc_groups > 1) {
4553 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4555 mask = __kmp_affin_fullMask;
4557 int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4558 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4559 i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4560 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4563 if ((!isa_root) || KMP_HIDDEN_HELPER_THREAD(gtid) ||
4564 (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4565#if KMP_GROUP_AFFINITY
4566 if (__kmp_num_proc_groups > 1) {
4570 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4572 mask = __kmp_affin_fullMask;
4576 int mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid);
4577 KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4578 i = (mask_idx + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4579 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4583 th->th.th_current_place = i;
4584 if (isa_root || KMP_HIDDEN_HELPER_THREAD(gtid)) {
4585 th->th.th_new_place = i;
4586 th->th.th_first_place = 0;
4587 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4588 }
else if (KMP_AFFINITY_NON_PROC_BIND) {
4591 th->th.th_first_place = 0;
4592 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4595 if (i == KMP_PLACE_ALL) {
4596 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4599 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4603 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4605 if (__kmp_affinity_verbose && !KMP_HIDDEN_HELPER_THREAD(gtid)
4607 && (__kmp_affinity_type == affinity_none ||
4608 (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4609 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4610 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4611 th->th.th_affin_mask);
4612 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4613 __kmp_gettid(), gtid, buf);
4618 if (__kmp_affinity_verbose && KMP_HIDDEN_HELPER_THREAD(gtid)) {
4619 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4620 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4621 th->th.th_affin_mask);
4622 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY (hidden helper thread)",
4623 (kmp_int32)getpid(), __kmp_gettid(), gtid, buf);
4631 if (__kmp_affinity_type == affinity_none) {
4632 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4635 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4638void __kmp_affinity_set_place(
int gtid) {
4639 if (!KMP_AFFINITY_CAPABLE()) {
4643 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4645 KA_TRACE(100, (
"__kmp_affinity_set_place: binding T#%d to place %d (current "
4647 gtid, th->th.th_new_place, th->th.th_current_place));
4650 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4651 KMP_ASSERT(th->th.th_new_place >= 0);
4652 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4653 if (th->th.th_first_place <= th->th.th_last_place) {
4654 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4655 (th->th.th_new_place <= th->th.th_last_place));
4657 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4658 (th->th.th_new_place >= th->th.th_last_place));
4663 kmp_affin_mask_t *mask =
4664 KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4665 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4666 th->th.th_current_place = th->th.th_new_place;
4668 if (__kmp_affinity_verbose) {
4669 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4670 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4671 th->th.th_affin_mask);
4672 KMP_INFORM(BoundToOSProcSet,
"OMP_PROC_BIND", (kmp_int32)getpid(),
4673 __kmp_gettid(), gtid, buf);
4675 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4678int __kmp_aux_set_affinity(
void **mask) {
4683 if (!KMP_AFFINITY_CAPABLE()) {
4687 gtid = __kmp_entry_gtid();
4690 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4691 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4692 (kmp_affin_mask_t *)(*mask));
4694 "kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4698 if (__kmp_env_consistency_check) {
4699 if ((mask == NULL) || (*mask == NULL)) {
4700 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4705 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4706 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4707 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4709 if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4714 if (num_procs == 0) {
4715 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4718#if KMP_GROUP_AFFINITY
4719 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4720 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4726 th = __kmp_threads[gtid];
4727 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4728 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4730 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4733 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4734 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4735 th->th.th_first_place = 0;
4736 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4739 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4744int __kmp_aux_get_affinity(
void **mask) {
4747#if KMP_OS_WINDOWS || KMP_DEBUG
4750 if (!KMP_AFFINITY_CAPABLE()) {
4754 gtid = __kmp_entry_gtid();
4755#if KMP_OS_WINDOWS || KMP_DEBUG
4756 th = __kmp_threads[gtid];
4760 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4764 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4765 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4766 th->th.th_affin_mask);
4768 "kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid,
4772 if (__kmp_env_consistency_check) {
4773 if ((mask == NULL) || (*mask == NULL)) {
4774 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
4780 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4783 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4784 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4785 (kmp_affin_mask_t *)(*mask));
4787 "kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid,
4795 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4801int __kmp_aux_get_affinity_max_proc() {
4802 if (!KMP_AFFINITY_CAPABLE()) {
4805#if KMP_GROUP_AFFINITY
4806 if (__kmp_num_proc_groups > 1) {
4807 return (
int)(__kmp_num_proc_groups *
sizeof(DWORD_PTR) * CHAR_BIT);
4813int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask) {
4814 if (!KMP_AFFINITY_CAPABLE()) {
4820 int gtid = __kmp_entry_gtid();
4821 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4822 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4823 (kmp_affin_mask_t *)(*mask));
4824 __kmp_debug_printf(
"kmp_set_affinity_mask_proc: setting proc %d in "
4825 "affinity mask for thread %d = %s\n",
4829 if (__kmp_env_consistency_check) {
4830 if ((mask == NULL) || (*mask == NULL)) {
4831 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
4835 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4838 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4842 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4846int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask) {
4847 if (!KMP_AFFINITY_CAPABLE()) {
4853 int gtid = __kmp_entry_gtid();
4854 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4855 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4856 (kmp_affin_mask_t *)(*mask));
4857 __kmp_debug_printf(
"kmp_unset_affinity_mask_proc: unsetting proc %d in "
4858 "affinity mask for thread %d = %s\n",
4862 if (__kmp_env_consistency_check) {
4863 if ((mask == NULL) || (*mask == NULL)) {
4864 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
4868 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4871 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4875 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4879int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask) {
4880 if (!KMP_AFFINITY_CAPABLE()) {
4886 int gtid = __kmp_entry_gtid();
4887 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4888 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4889 (kmp_affin_mask_t *)(*mask));
4890 __kmp_debug_printf(
"kmp_get_affinity_mask_proc: getting proc %d in "
4891 "affinity mask for thread %d = %s\n",
4895 if (__kmp_env_consistency_check) {
4896 if ((mask == NULL) || (*mask == NULL)) {
4897 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
4901 if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
4904 if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4908 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4912void __kmp_balanced_affinity(kmp_info_t *th,
int nthreads) {
4913 KMP_DEBUG_ASSERT(th);
4914 bool fine_gran =
true;
4915 int tid = th->th.th_info.ds.ds_tid;
4918 if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th)))
4921 switch (__kmp_affinity_gran) {
4925 if (__kmp_nThreadsPerCore > 1) {
4930 if (nCoresPerPkg > 1) {
4938 if (__kmp_topology->is_uniform()) {
4942 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
4944 int ncores = __kmp_ncores;
4945 if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
4946 __kmp_nth_per_core = __kmp_avail_proc / nPackages;
4950 int chunk = nthreads / ncores;
4952 int big_cores = nthreads % ncores;
4954 int big_nth = (chunk + 1) * big_cores;
4955 if (tid < big_nth) {
4956 coreID = tid / (chunk + 1);
4957 threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
4959 coreID = (tid - big_cores) / chunk;
4960 threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
4962 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
4963 "Illegal set affinity operation when not capable");
4965 kmp_affin_mask_t *mask = th->th.th_affin_mask;
4970 __kmp_topology->at(coreID * __kmp_nth_per_core + threadID).os_id;
4971 KMP_CPU_SET(osID, mask);
4973 for (
int i = 0; i < __kmp_nth_per_core; i++) {
4975 osID = __kmp_topology->at(coreID * __kmp_nth_per_core + i).os_id;
4976 KMP_CPU_SET(osID, mask);
4979 if (__kmp_affinity_verbose) {
4980 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4981 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4982 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4983 __kmp_gettid(), tid, buf);
4985 __kmp_set_system_affinity(mask, TRUE);
4988 kmp_affin_mask_t *mask = th->th.th_affin_mask;
4992 __kmp_affinity_find_core_level(__kmp_avail_proc, __kmp_aff_depth - 1);
4993 int ncores = __kmp_affinity_compute_ncores(__kmp_avail_proc,
4994 __kmp_aff_depth - 1, core_level);
4995 int nth_per_core = __kmp_affinity_max_proc_per_core(
4996 __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5000 if (nthreads == __kmp_avail_proc) {
5002 int osID = __kmp_topology->at(tid).os_id;
5003 KMP_CPU_SET(osID, mask);
5006 __kmp_affinity_find_core(tid, __kmp_aff_depth - 1, core_level);
5007 for (
int i = 0; i < __kmp_avail_proc; i++) {
5008 int osID = __kmp_topology->at(i).os_id;
5009 if (__kmp_affinity_find_core(i, __kmp_aff_depth - 1, core_level) ==
5011 KMP_CPU_SET(osID, mask);
5015 }
else if (nthreads <= ncores) {
5018 for (
int i = 0; i < ncores; i++) {
5021 for (
int j = 0; j < nth_per_core; j++) {
5022 if (procarr[i * nth_per_core + j] != -1) {
5029 for (
int j = 0; j < nth_per_core; j++) {
5030 int osID = procarr[i * nth_per_core + j];
5032 KMP_CPU_SET(osID, mask);
5048 int *nproc_at_core = (
int *)KMP_ALLOCA(
sizeof(
int) * ncores);
5050 int *ncores_with_x_procs =
5051 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5053 int *ncores_with_x_to_max_procs =
5054 (
int *)KMP_ALLOCA(
sizeof(
int) * (nth_per_core + 1));
5056 for (
int i = 0; i <= nth_per_core; i++) {
5057 ncores_with_x_procs[i] = 0;
5058 ncores_with_x_to_max_procs[i] = 0;
5061 for (
int i = 0; i < ncores; i++) {
5063 for (
int j = 0; j < nth_per_core; j++) {
5064 if (procarr[i * nth_per_core + j] != -1) {
5068 nproc_at_core[i] = cnt;
5069 ncores_with_x_procs[cnt]++;
5072 for (
int i = 0; i <= nth_per_core; i++) {
5073 for (
int j = i; j <= nth_per_core; j++) {
5074 ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5079 int nproc = nth_per_core * ncores;
5081 int *newarr = (
int *)__kmp_allocate(
sizeof(
int) * nproc);
5082 for (
int i = 0; i < nproc; i++) {
5089 for (
int j = 1; j <= nth_per_core; j++) {
5090 int cnt = ncores_with_x_to_max_procs[j];
5091 for (
int i = 0; i < ncores; i++) {
5093 if (nproc_at_core[i] == 0) {
5096 for (
int k = 0; k < nth_per_core; k++) {
5097 if (procarr[i * nth_per_core + k] != -1) {
5098 if (newarr[i * nth_per_core + k] == 0) {
5099 newarr[i * nth_per_core + k] = 1;
5105 newarr[i * nth_per_core + k]++;
5113 if (cnt == 0 || nth == 0) {
5124 for (
int i = 0; i < nproc; i++) {
5128 int osID = procarr[i];
5129 KMP_CPU_SET(osID, mask);
5131 int coreID = i / nth_per_core;
5132 for (
int ii = 0; ii < nth_per_core; ii++) {
5133 int osID = procarr[coreID * nth_per_core + ii];
5135 KMP_CPU_SET(osID, mask);
5145 if (__kmp_affinity_verbose) {
5146 char buf[KMP_AFFIN_MASK_PRINT_LEN];
5147 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5148 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
5149 __kmp_gettid(), tid, buf);
5151 __kmp_set_system_affinity(mask, TRUE);
5155#if KMP_OS_LINUX || KMP_OS_FREEBSD
5169 kmp_set_thread_affinity_mask_initial()
5174 int gtid = __kmp_get_gtid();
5177 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5178 "non-omp thread, returning\n"));
5181 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5182 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5183 "affinity not initialized, returning\n"));
5186 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: "
5187 "set full mask for thread %d\n",
5189 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5190 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
int try_open(const char *filename, const char *mode)