16 #ifndef KMP_AFFINITY_H 17 #define KMP_AFFINITY_H 22 #if KMP_AFFINITY_SUPPORTED 24 class KMPHwlocAffinity :
public KMPAffinity {
26 class Mask :
public KMPAffinity::Mask {
31 mask = hwloc_bitmap_alloc();
34 ~Mask() { hwloc_bitmap_free(mask); }
35 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
36 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
37 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
38 void zero()
override { hwloc_bitmap_zero(mask); }
39 void copy(
const KMPAffinity::Mask *src)
override {
40 const Mask *convert =
static_cast<const Mask *
>(src);
41 hwloc_bitmap_copy(mask, convert->mask);
43 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
44 const Mask *convert =
static_cast<const Mask *
>(rhs);
45 hwloc_bitmap_and(mask, mask, convert->mask);
47 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
48 const Mask *convert =
static_cast<const Mask *
>(rhs);
49 hwloc_bitmap_or(mask, mask, convert->mask);
51 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
52 int begin()
const override {
return hwloc_bitmap_first(mask); }
53 int end()
const override {
return -1; }
54 int next(
int previous)
const override {
55 return hwloc_bitmap_next(mask, previous);
57 int get_system_affinity(
bool abort_on_error)
override {
58 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
59 "Illegal get affinity operation when not capable");
61 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
67 __kmp_msg(kmp_ms_fatal, KMP_MSG(FatalSysError), KMP_ERR(error),
72 int set_system_affinity(
bool abort_on_error)
const override {
73 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
74 "Illegal get affinity operation when not capable");
76 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
82 __kmp_msg(kmp_ms_fatal, KMP_MSG(FatalSysError), KMP_ERR(error),
87 int get_proc_group()
const override {
91 if (__kmp_num_proc_groups == 1) {
94 for (i = 0; i < __kmp_num_proc_groups; i++) {
96 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
97 unsigned long second_32_bits =
98 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
99 if (first_32_bits == 0 && second_32_bits == 0) {
111 void determine_capable(
const char *var)
override {
112 const hwloc_topology_support *topology_support;
113 if (__kmp_hwloc_topology == NULL) {
114 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
115 __kmp_hwloc_error = TRUE;
116 if (__kmp_affinity_verbose)
117 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
119 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
120 __kmp_hwloc_error = TRUE;
121 if (__kmp_affinity_verbose)
122 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
125 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
130 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
131 topology_support->cpubind->get_thisthread_cpubind &&
132 topology_support->discovery->pu && !__kmp_hwloc_error) {
134 KMP_AFFINITY_ENABLE(TRUE);
137 __kmp_hwloc_error = TRUE;
138 KMP_AFFINITY_DISABLE();
141 void bind_thread(
int which)
override {
142 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
143 "Illegal set affinity operation when not capable");
144 KMPAffinity::Mask *mask;
145 KMP_CPU_ALLOC_ON_STACK(mask);
147 KMP_CPU_SET(which, mask);
148 __kmp_set_system_affinity(mask, TRUE);
149 KMP_CPU_FREE_FROM_STACK(mask);
151 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
152 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
153 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
154 return new Mask[num];
156 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
157 Mask *hwloc_array =
static_cast<Mask *
>(array);
158 delete[] hwloc_array;
160 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
161 int index)
override {
162 Mask *hwloc_array =
static_cast<Mask *
>(array);
163 return &(hwloc_array[index]);
165 api_type get_api_type()
const override {
return HWLOC; }
174 #include <sys/syscall.h> 175 #if KMP_ARCH_X86 || KMP_ARCH_ARM 176 #ifndef __NR_sched_setaffinity 177 #define __NR_sched_setaffinity 241 178 #elif __NR_sched_setaffinity != 241 179 #error Wrong code for setaffinity system call. 181 #ifndef __NR_sched_getaffinity 182 #define __NR_sched_getaffinity 242 183 #elif __NR_sched_getaffinity != 242 184 #error Wrong code for getaffinity system call. 186 #elif KMP_ARCH_AARCH64 187 #ifndef __NR_sched_setaffinity 188 #define __NR_sched_setaffinity 122 189 #elif __NR_sched_setaffinity != 122 190 #error Wrong code for setaffinity system call. 192 #ifndef __NR_sched_getaffinity 193 #define __NR_sched_getaffinity 123 194 #elif __NR_sched_getaffinity != 123 195 #error Wrong code for getaffinity system call. 197 #elif KMP_ARCH_X86_64 198 #ifndef __NR_sched_setaffinity 199 #define __NR_sched_setaffinity 203 200 #elif __NR_sched_setaffinity != 203 201 #error Wrong code for setaffinity system call. 203 #ifndef __NR_sched_getaffinity 204 #define __NR_sched_getaffinity 204 205 #elif __NR_sched_getaffinity != 204 206 #error Wrong code for getaffinity system call. 209 #ifndef __NR_sched_setaffinity 210 #define __NR_sched_setaffinity 222 211 #elif __NR_sched_setaffinity != 222 212 #error Wrong code for setaffinity system call. 214 #ifndef __NR_sched_getaffinity 215 #define __NR_sched_getaffinity 223 216 #elif __NR_sched_getaffinity != 223 217 #error Wrong code for getaffinity system call. 220 # ifndef __NR_sched_setaffinity 221 # define __NR_sched_setaffinity 4239 222 # elif __NR_sched_setaffinity != 4239 223 # error Wrong code for setaffinity system call. 225 # ifndef __NR_sched_getaffinity 226 # define __NR_sched_getaffinity 4240 227 # elif __NR_sched_getaffinity != 4240 228 # error Wrong code for getaffinity system call. 230 # elif KMP_ARCH_MIPS64 231 # ifndef __NR_sched_setaffinity 232 # define __NR_sched_setaffinity 5195 233 # elif __NR_sched_setaffinity != 5195 234 # error Wrong code for setaffinity system call. 236 # ifndef __NR_sched_getaffinity 237 # define __NR_sched_getaffinity 5196 238 # elif __NR_sched_getaffinity != 5196 239 # error Wrong code for getaffinity system call. 242 #error Unknown or unsupported architecture 244 class KMPNativeAffinity :
public KMPAffinity {
245 class Mask :
public KMPAffinity::Mask {
246 typedef unsigned char mask_t;
247 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
251 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
256 void set(
int i)
override {
257 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
259 bool is_set(
int i)
const override {
260 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
262 void clear(
int i)
override {
263 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
265 void zero()
override {
266 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
269 void copy(
const KMPAffinity::Mask *src)
override {
270 const Mask *convert =
static_cast<const Mask *
>(src);
271 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
272 mask[i] = convert->mask[i];
274 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
275 const Mask *convert =
static_cast<const Mask *
>(rhs);
276 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
277 mask[i] &= convert->mask[i];
279 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
280 const Mask *convert =
static_cast<const Mask *
>(rhs);
281 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
282 mask[i] |= convert->mask[i];
284 void bitwise_not()
override {
285 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
286 mask[i] = ~(mask[i]);
288 int begin()
const override {
290 while (retval < end() && !is_set(retval))
294 int end()
const override {
return __kmp_affin_mask_size * BITS_PER_MASK_T; }
295 int next(
int previous)
const override {
296 int retval = previous + 1;
297 while (retval < end() && !is_set(retval))
301 int get_system_affinity(
bool abort_on_error)
override {
302 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
303 "Illegal get affinity operation when not capable");
305 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
310 if (abort_on_error) {
311 __kmp_msg(kmp_ms_fatal, KMP_MSG(FatalSysError), KMP_ERR(error),
316 int set_system_affinity(
bool abort_on_error)
const override {
317 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
318 "Illegal get affinity operation when not capable");
320 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
325 if (abort_on_error) {
326 __kmp_msg(kmp_ms_fatal, KMP_MSG(FatalSysError), KMP_ERR(error),
332 void determine_capable(
const char *env_var)
override {
333 __kmp_affinity_determine_capable(env_var);
335 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
336 KMPAffinity::Mask *allocate_mask()
override {
337 KMPNativeAffinity::Mask *retval =
new Mask();
340 void deallocate_mask(KMPAffinity::Mask *m)
override {
341 KMPNativeAffinity::Mask *native_mask =
342 static_cast<KMPNativeAffinity::Mask *
>(m);
345 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
346 return new Mask[num];
348 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
349 Mask *linux_array =
static_cast<Mask *
>(array);
350 delete[] linux_array;
352 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
353 int index)
override {
354 Mask *linux_array =
static_cast<Mask *
>(array);
355 return &(linux_array[index]);
357 api_type get_api_type()
const override {
return NATIVE_OS; }
362 class KMPNativeAffinity :
public KMPAffinity {
363 class Mask :
public KMPAffinity::Mask {
364 typedef ULONG_PTR mask_t;
365 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
370 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
376 void set(
int i)
override {
377 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
379 bool is_set(
int i)
const override {
380 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
382 void clear(
int i)
override {
383 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
385 void zero()
override {
386 for (
size_t i = 0; i < __kmp_num_proc_groups; ++i)
389 void copy(
const KMPAffinity::Mask *src)
override {
390 const Mask *convert =
static_cast<const Mask *
>(src);
391 for (
size_t i = 0; i < __kmp_num_proc_groups; ++i)
392 mask[i] = convert->mask[i];
394 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
395 const Mask *convert =
static_cast<const Mask *
>(rhs);
396 for (
size_t i = 0; i < __kmp_num_proc_groups; ++i)
397 mask[i] &= convert->mask[i];
399 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
400 const Mask *convert =
static_cast<const Mask *
>(rhs);
401 for (
size_t i = 0; i < __kmp_num_proc_groups; ++i)
402 mask[i] |= convert->mask[i];
404 void bitwise_not()
override {
405 for (
size_t i = 0; i < __kmp_num_proc_groups; ++i)
406 mask[i] = ~(mask[i]);
408 int begin()
const override {
410 while (retval < end() && !is_set(retval))
414 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
415 int next(
int previous)
const override {
416 int retval = previous + 1;
417 while (retval < end() && !is_set(retval))
421 int set_system_affinity(
bool abort_on_error)
const override {
422 if (__kmp_num_proc_groups > 1) {
425 int group = get_proc_group();
427 if (abort_on_error) {
428 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
435 ga.Mask = mask[group];
436 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
438 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
439 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
440 DWORD error = GetLastError();
441 if (abort_on_error) {
442 __kmp_msg(kmp_ms_fatal, KMP_MSG(CantSetThreadAffMask),
443 KMP_ERR(error), __kmp_msg_null);
448 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
449 DWORD error = GetLastError();
450 if (abort_on_error) {
451 __kmp_msg(kmp_ms_fatal, KMP_MSG(CantSetThreadAffMask),
452 KMP_ERR(error), __kmp_msg_null);
459 int get_system_affinity(
bool abort_on_error)
override {
460 if (__kmp_num_proc_groups > 1) {
463 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
464 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
465 DWORD error = GetLastError();
466 if (abort_on_error) {
467 __kmp_msg(kmp_ms_fatal,
468 KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
469 KMP_ERR(error), __kmp_msg_null);
473 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
477 mask[ga.Group] = ga.Mask;
479 mask_t newMask, sysMask, retval;
480 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
481 DWORD error = GetLastError();
482 if (abort_on_error) {
483 __kmp_msg(kmp_ms_fatal,
484 KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
485 KMP_ERR(error), __kmp_msg_null);
489 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
491 DWORD error = GetLastError();
492 if (abort_on_error) {
493 __kmp_msg(kmp_ms_fatal,
494 KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
495 KMP_ERR(error), __kmp_msg_null);
499 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
501 DWORD error = GetLastError();
502 if (abort_on_error) {
503 __kmp_msg(kmp_ms_fatal,
504 KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
505 KMP_ERR(error), __kmp_msg_null);
512 int get_proc_group()
const override {
514 if (__kmp_num_proc_groups == 1) {
517 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
527 void determine_capable(
const char *env_var)
override {
528 __kmp_affinity_determine_capable(env_var);
530 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
531 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
532 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
533 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
534 return new Mask[num];
536 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
537 Mask *windows_array =
static_cast<Mask *
>(array);
538 delete[] windows_array;
540 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
541 int index)
override {
542 Mask *windows_array =
static_cast<Mask *
>(array);
543 return &(windows_array[index]);
545 api_type get_api_type()
const override {
return NATIVE_OS; }
552 static const unsigned maxDepth = 32;
553 unsigned labels[maxDepth];
554 unsigned childNums[maxDepth];
557 Address(
unsigned _depth) : depth(_depth), leader(FALSE) {}
558 Address &operator=(
const Address &b) {
560 for (
unsigned i = 0; i < depth; i++) {
561 labels[i] = b.labels[i];
562 childNums[i] = b.childNums[i];
567 bool operator==(
const Address &b)
const {
568 if (depth != b.depth)
570 for (
unsigned i = 0; i < depth; i++)
571 if (labels[i] != b.labels[i])
575 bool isClose(
const Address &b,
int level)
const {
576 if (depth != b.depth)
578 if ((
unsigned)level >= depth)
580 for (
unsigned i = 0; i < (depth - level); i++)
581 if (labels[i] != b.labels[i])
585 bool operator!=(
const Address &b)
const {
return !operator==(b); }
588 printf(
"Depth: %u --- ", depth);
589 for (i = 0; i < depth; i++) {
590 printf(
"%u ", labels[i]);
599 AddrUnsPair(Address _first,
unsigned _second)
600 : first(_first), second(_second) {}
601 AddrUnsPair &operator=(
const AddrUnsPair &b) {
609 printf(
" --- second = %u", second);
611 bool operator==(
const AddrUnsPair &b)
const {
612 if (first != b.first)
614 if (second != b.second)
618 bool operator!=(
const AddrUnsPair &b)
const {
return !operator==(b); }
621 static int __kmp_affinity_cmp_Address_labels(
const void *a,
const void *b) {
622 const Address *aa = &(((
const AddrUnsPair *)a)->first);
623 const Address *bb = &(((
const AddrUnsPair *)b)->first);
624 unsigned depth = aa->depth;
626 KMP_DEBUG_ASSERT(depth == bb->depth);
627 for (i = 0; i < depth; i++) {
628 if (aa->labels[i] < bb->labels[i])
630 if (aa->labels[i] > bb->labels[i])
642 class hierarchy_info {
646 static const kmp_uint32 maxLeaves = 4;
647 static const kmp_uint32 minBranch = 4;
653 kmp_uint32 maxLevels;
660 kmp_uint32 base_num_threads;
661 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
662 volatile kmp_int8 uninitialized;
664 volatile kmp_int8 resizing;
670 kmp_uint32 *numPerLevel;
671 kmp_uint32 *skipPerLevel;
673 void deriveLevels(AddrUnsPair *adr2os,
int num_addrs) {
674 int hier_depth = adr2os[0].first.depth;
676 for (
int i = hier_depth - 1; i >= 0; --i) {
678 for (
int j = 0; j < num_addrs; ++j) {
679 int next = adr2os[j].first.childNums[i];
683 numPerLevel[level] = max + 1;
689 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
692 if (!uninitialized && numPerLevel) {
693 __kmp_free(numPerLevel);
695 uninitialized = not_initialized;
699 void init(AddrUnsPair *adr2os,
int num_addrs) {
700 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
701 &uninitialized, not_initialized, initializing);
702 if (bool_result == 0) {
703 while (TCR_1(uninitialized) != initialized)
707 KMP_DEBUG_ASSERT(bool_result == 1);
717 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
718 skipPerLevel = &(numPerLevel[maxLevels]);
719 for (kmp_uint32 i = 0; i < maxLevels;
727 qsort(adr2os, num_addrs,
sizeof(*adr2os),
728 __kmp_affinity_cmp_Address_labels);
729 deriveLevels(adr2os, num_addrs);
731 numPerLevel[0] = maxLeaves;
732 numPerLevel[1] = num_addrs / maxLeaves;
733 if (num_addrs % maxLeaves)
737 base_num_threads = num_addrs;
738 for (
int i = maxLevels - 1; i >= 0;
740 if (numPerLevel[i] != 1 || depth > 1)
743 kmp_uint32 branch = minBranch;
744 if (numPerLevel[0] == 1)
745 branch = num_addrs / maxLeaves;
746 if (branch < minBranch)
748 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
749 while (numPerLevel[d] > branch ||
750 (d == 0 && numPerLevel[d] > maxLeaves)) {
751 if (numPerLevel[d] & 1)
753 numPerLevel[d] = numPerLevel[d] >> 1;
754 if (numPerLevel[d + 1] == 1)
756 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
758 if (numPerLevel[0] == 1) {
759 branch = branch >> 1;
765 for (kmp_uint32 i = 1; i < depth; ++i)
766 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
768 for (kmp_uint32 i = depth; i < maxLevels; ++i)
769 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
771 uninitialized = initialized;
775 void resize(kmp_uint32 nproc) {
776 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
777 while (bool_result == 0) {
779 if (nproc <= base_num_threads)
782 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
784 KMP_DEBUG_ASSERT(bool_result != 0);
785 if (nproc <= base_num_threads)
789 kmp_uint32 old_sz = skipPerLevel[depth - 1];
790 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
792 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
793 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
794 numPerLevel[i - 1] *= 2;
798 if (nproc > old_sz) {
799 while (nproc > old_sz) {
807 kmp_uint32 *old_numPerLevel = numPerLevel;
808 kmp_uint32 *old_skipPerLevel = skipPerLevel;
809 numPerLevel = skipPerLevel = NULL;
811 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
812 skipPerLevel = &(numPerLevel[maxLevels]);
815 for (kmp_uint32 i = 0; i < old_maxLevels;
817 numPerLevel[i] = old_numPerLevel[i];
818 skipPerLevel[i] = old_skipPerLevel[i];
822 for (kmp_uint32 i = old_maxLevels; i < maxLevels;
829 __kmp_free(old_numPerLevel);
833 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
834 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
836 base_num_threads = nproc;
840 #endif // KMP_AFFINITY_H