15 #ifndef KMP_AFFINITY_H 16 #define KMP_AFFINITY_H 21 #if KMP_AFFINITY_SUPPORTED 23 class KMPHwlocAffinity:
public KMPAffinity {
25 class Mask :
public KMPAffinity::Mask {
28 Mask() { mask = hwloc_bitmap_alloc(); this->zero(); }
29 ~Mask() { hwloc_bitmap_free(mask); }
30 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
31 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
32 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
33 void zero()
override { hwloc_bitmap_zero(mask); }
34 void copy(
const KMPAffinity::Mask* src)
override {
35 const Mask* convert =
static_cast<const Mask*
>(src);
36 hwloc_bitmap_copy(mask, convert->mask);
38 void bitwise_and(
const KMPAffinity::Mask* rhs)
override {
39 const Mask* convert =
static_cast<const Mask*
>(rhs);
40 hwloc_bitmap_and(mask, mask, convert->mask);
42 void bitwise_or(
const KMPAffinity::Mask * rhs)
override {
43 const Mask* convert =
static_cast<const Mask*
>(rhs);
44 hwloc_bitmap_or(mask, mask, convert->mask);
46 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
47 int begin()
const override {
return hwloc_bitmap_first(mask); }
48 int end()
const override {
return -1; }
49 int next(
int previous)
const override {
return hwloc_bitmap_next(mask, previous); }
50 int get_system_affinity(
bool abort_on_error)
override {
51 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
52 "Illegal get affinity operation when not capable");
53 int retval = hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
59 __kmp_msg(kmp_ms_fatal, KMP_MSG( FatalSysError ), KMP_ERR( error ), __kmp_msg_null);
63 int set_system_affinity(
bool abort_on_error)
const override {
64 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
65 "Illegal get affinity operation when not capable");
66 int retval = hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
72 __kmp_msg(kmp_ms_fatal, KMP_MSG( FatalSysError ), KMP_ERR( error ), __kmp_msg_null);
76 int get_proc_group()
const override {
80 if (__kmp_num_proc_groups == 1) {
83 for (i = 0; i < __kmp_num_proc_groups; i++) {
85 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i*2);
86 unsigned long second_32_bits = hwloc_bitmap_to_ith_ulong(mask, i*2+1);
87 if (first_32_bits == 0 && second_32_bits == 0) {
99 void determine_capable(
const char* var)
override {
100 const hwloc_topology_support* topology_support;
101 if(__kmp_hwloc_topology == NULL) {
102 if(hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
103 __kmp_hwloc_error = TRUE;
104 if(__kmp_affinity_verbose)
105 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
107 if(hwloc_topology_load(__kmp_hwloc_topology) < 0) {
108 __kmp_hwloc_error = TRUE;
109 if(__kmp_affinity_verbose)
110 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
113 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
117 if(topology_support && topology_support->cpubind->set_thisthread_cpubind &&
118 topology_support->cpubind->get_thisthread_cpubind &&
119 topology_support->discovery->pu &&
123 KMP_AFFINITY_ENABLE(TRUE);
126 __kmp_hwloc_error = TRUE;
127 KMP_AFFINITY_DISABLE();
130 void bind_thread(
int which)
override {
131 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
132 "Illegal set affinity operation when not capable");
133 KMPAffinity::Mask *mask;
134 KMP_CPU_ALLOC_ON_STACK(mask);
136 KMP_CPU_SET(which, mask);
137 __kmp_set_system_affinity(mask, TRUE);
138 KMP_CPU_FREE_FROM_STACK(mask);
140 KMPAffinity::Mask* allocate_mask()
override {
return new Mask(); }
141 void deallocate_mask(KMPAffinity::Mask* m)
override {
delete m; }
142 KMPAffinity::Mask* allocate_mask_array(
int num)
override {
return new Mask[num]; }
143 void deallocate_mask_array(KMPAffinity::Mask* array)
override {
144 Mask* hwloc_array =
static_cast<Mask*
>(array);
145 delete[] hwloc_array;
147 KMPAffinity::Mask* index_mask_array(KMPAffinity::Mask* array,
int index)
override {
148 Mask* hwloc_array =
static_cast<Mask*
>(array);
149 return &(hwloc_array[index]);
151 api_type get_api_type()
const override {
return HWLOC; }
162 #include <sys/syscall.h> 163 # if KMP_ARCH_X86 || KMP_ARCH_ARM 164 # ifndef __NR_sched_setaffinity 165 # define __NR_sched_setaffinity 241 166 # elif __NR_sched_setaffinity != 241 167 # error Wrong code for setaffinity system call. 169 # ifndef __NR_sched_getaffinity 170 # define __NR_sched_getaffinity 242 171 # elif __NR_sched_getaffinity != 242 172 # error Wrong code for getaffinity system call. 174 # elif KMP_ARCH_AARCH64 175 # ifndef __NR_sched_setaffinity 176 # define __NR_sched_setaffinity 122 177 # elif __NR_sched_setaffinity != 122 178 # error Wrong code for setaffinity system call. 180 # ifndef __NR_sched_getaffinity 181 # define __NR_sched_getaffinity 123 182 # elif __NR_sched_getaffinity != 123 183 # error Wrong code for getaffinity system call. 185 # elif KMP_ARCH_X86_64 186 # ifndef __NR_sched_setaffinity 187 # define __NR_sched_setaffinity 203 188 # elif __NR_sched_setaffinity != 203 189 # error Wrong code for setaffinity system call. 191 # ifndef __NR_sched_getaffinity 192 # define __NR_sched_getaffinity 204 193 # elif __NR_sched_getaffinity != 204 194 # error Wrong code for getaffinity system call. 196 # elif KMP_ARCH_PPC64 197 # ifndef __NR_sched_setaffinity 198 # define __NR_sched_setaffinity 222 199 # elif __NR_sched_setaffinity != 222 200 # error Wrong code for setaffinity system call. 202 # ifndef __NR_sched_getaffinity 203 # define __NR_sched_getaffinity 223 204 # elif __NR_sched_getaffinity != 223 205 # error Wrong code for getaffinity system call. 208 # ifndef __NR_sched_setaffinity 209 # define __NR_sched_setaffinity 4239 210 # elif __NR_sched_setaffinity != 4239 211 # error Wrong code for setaffinity system call. 213 # ifndef __NR_sched_getaffinity 214 # define __NR_sched_getaffinity 4240 215 # elif __NR_sched_getaffinity != 4240 216 # error Wrong code for getaffinity system call. 218 # elif KMP_ARCH_MIPS64 219 # ifndef __NR_sched_setaffinity 220 # define __NR_sched_setaffinity 5195 221 # elif __NR_sched_setaffinity != 5195 222 # error Wrong code for setaffinity system call. 224 # ifndef __NR_sched_getaffinity 225 # define __NR_sched_getaffinity 5196 226 # elif __NR_sched_getaffinity != 5196 227 # error Wrong code for getaffinity system call. 230 # error Unknown or unsupported architecture 232 class KMPNativeAffinity :
public KMPAffinity {
233 class Mask :
public KMPAffinity::Mask {
234 typedef unsigned char mask_t;
235 static const int BITS_PER_MASK_T =
sizeof(mask_t)*CHAR_BIT;
238 Mask() { mask = (mask_t*)__kmp_allocate(__kmp_affin_mask_size); }
239 ~Mask() {
if (mask) __kmp_free(mask); }
240 void set(
int i)
override { mask[i/BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T)); }
241 bool is_set(
int i)
const override {
return (mask[i/BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T))); }
242 void clear(
int i)
override { mask[i/BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T)); }
243 void zero()
override {
244 for (
size_t i=0; i<__kmp_affin_mask_size; ++i)
247 void copy(
const KMPAffinity::Mask* src)
override {
248 const Mask * convert =
static_cast<const Mask*
>(src);
249 for (
size_t i=0; i<__kmp_affin_mask_size; ++i)
250 mask[i] = convert->mask[i];
252 void bitwise_and(
const KMPAffinity::Mask* rhs)
override {
253 const Mask * convert =
static_cast<const Mask*
>(rhs);
254 for (
size_t i=0; i<__kmp_affin_mask_size; ++i)
255 mask[i] &= convert->mask[i];
257 void bitwise_or(
const KMPAffinity::Mask* rhs)
override {
258 const Mask * convert =
static_cast<const Mask*
>(rhs);
259 for (
size_t i=0; i<__kmp_affin_mask_size; ++i)
260 mask[i] |= convert->mask[i];
262 void bitwise_not()
override {
263 for (
size_t i=0; i<__kmp_affin_mask_size; ++i)
264 mask[i] = ~(mask[i]);
266 int begin()
const override {
268 while (retval < end() && !is_set(retval))
272 int end()
const override {
return __kmp_affin_mask_size*BITS_PER_MASK_T; }
273 int next(
int previous)
const override {
274 int retval = previous+1;
275 while (retval < end() && !is_set(retval))
279 int get_system_affinity(
bool abort_on_error)
override {
280 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
281 "Illegal get affinity operation when not capable");
282 int retval = syscall( __NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask );
287 if (abort_on_error) {
288 __kmp_msg(kmp_ms_fatal, KMP_MSG( FatalSysError ), KMP_ERR( error ), __kmp_msg_null);
292 int set_system_affinity(
bool abort_on_error)
const override {
293 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
294 "Illegal get affinity operation when not capable");
295 int retval = syscall( __NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask );
300 if (abort_on_error) {
301 __kmp_msg(kmp_ms_fatal, KMP_MSG( FatalSysError ), KMP_ERR( error ), __kmp_msg_null);
306 void determine_capable(
const char* env_var)
override {
307 __kmp_affinity_determine_capable(env_var);
309 void bind_thread(
int which)
override {
310 __kmp_affinity_bind_thread(which);
312 KMPAffinity::Mask* allocate_mask()
override {
313 KMPNativeAffinity::Mask* retval =
new Mask();
316 void deallocate_mask(KMPAffinity::Mask* m)
override {
317 KMPNativeAffinity::Mask* native_mask =
static_cast<KMPNativeAffinity::Mask*
>(m);
320 KMPAffinity::Mask* allocate_mask_array(
int num)
override {
return new Mask[num]; }
321 void deallocate_mask_array(KMPAffinity::Mask* array)
override {
322 Mask* linux_array =
static_cast<Mask*
>(array);
323 delete[] linux_array;
325 KMPAffinity::Mask* index_mask_array(KMPAffinity::Mask* array,
int index)
override {
326 Mask* linux_array =
static_cast<Mask*
>(array);
327 return &(linux_array[index]);
329 api_type get_api_type()
const override {
return NATIVE_OS; }
334 class KMPNativeAffinity :
public KMPAffinity {
335 class Mask :
public KMPAffinity::Mask {
336 typedef ULONG_PTR mask_t;
337 static const int BITS_PER_MASK_T =
sizeof(mask_t)*CHAR_BIT;
340 Mask() { mask = (mask_t*)__kmp_allocate(
sizeof(mask_t)*__kmp_num_proc_groups); }
341 ~Mask() {
if (mask) __kmp_free(mask); }
342 void set(
int i)
override { mask[i/BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T)); }
343 bool is_set(
int i)
const override {
return (mask[i/BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T))); }
344 void clear(
int i)
override { mask[i/BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T)); }
345 void zero()
override {
346 for (
size_t i=0; i<__kmp_num_proc_groups; ++i)
349 void copy(
const KMPAffinity::Mask* src)
override {
350 const Mask * convert =
static_cast<const Mask*
>(src);
351 for (
size_t i=0; i<__kmp_num_proc_groups; ++i)
352 mask[i] = convert->mask[i];
354 void bitwise_and(
const KMPAffinity::Mask* rhs)
override {
355 const Mask * convert =
static_cast<const Mask*
>(rhs);
356 for (
size_t i=0; i<__kmp_num_proc_groups; ++i)
357 mask[i] &= convert->mask[i];
359 void bitwise_or(
const KMPAffinity::Mask* rhs)
override {
360 const Mask * convert =
static_cast<const Mask*
>(rhs);
361 for (
size_t i=0; i<__kmp_num_proc_groups; ++i)
362 mask[i] |= convert->mask[i];
364 void bitwise_not()
override {
365 for (
size_t i=0; i<__kmp_num_proc_groups; ++i)
366 mask[i] = ~(mask[i]);
368 int begin()
const override {
370 while (retval < end() && !is_set(retval))
374 int end()
const override {
return __kmp_num_proc_groups*BITS_PER_MASK_T; }
375 int next(
int previous)
const override {
376 int retval = previous+1;
377 while (retval < end() && !is_set(retval))
381 int set_system_affinity(
bool abort_on_error)
const override {
382 if (__kmp_num_proc_groups > 1) {
385 int group = get_proc_group();
387 if (abort_on_error) {
388 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
395 ga.Mask = mask[group];
396 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
398 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
399 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
400 DWORD error = GetLastError();
401 if (abort_on_error) {
402 __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetThreadAffMask ),
403 KMP_ERR( error ), __kmp_msg_null);
408 if (!SetThreadAffinityMask( GetCurrentThread(), *mask )) {
409 DWORD error = GetLastError();
410 if (abort_on_error) {
411 __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetThreadAffMask ),
412 KMP_ERR( error ), __kmp_msg_null);
419 int get_system_affinity(
bool abort_on_error)
override {
420 if (__kmp_num_proc_groups > 1) {
423 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
424 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
425 DWORD error = GetLastError();
426 if (abort_on_error) {
427 __kmp_msg(kmp_ms_fatal, KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
428 KMP_ERR(error), __kmp_msg_null);
432 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) || (ga.Mask == 0)) {
435 mask[ga.Group] = ga.Mask;
437 mask_t newMask, sysMask, retval;
438 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
439 DWORD error = GetLastError();
440 if (abort_on_error) {
441 __kmp_msg(kmp_ms_fatal, KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
442 KMP_ERR(error), __kmp_msg_null);
446 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
448 DWORD error = GetLastError();
449 if (abort_on_error) {
450 __kmp_msg(kmp_ms_fatal, KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
451 KMP_ERR(error), __kmp_msg_null);
455 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
457 DWORD error = GetLastError();
458 if (abort_on_error) {
459 __kmp_msg(kmp_ms_fatal, KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
460 KMP_ERR(error), __kmp_msg_null);
467 int get_proc_group()
const override {
469 if (__kmp_num_proc_groups == 1) {
472 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
482 void determine_capable(
const char* env_var)
override {
483 __kmp_affinity_determine_capable(env_var);
485 void bind_thread(
int which)
override {
486 __kmp_affinity_bind_thread(which);
488 KMPAffinity::Mask* allocate_mask()
override {
return new Mask(); }
489 void deallocate_mask(KMPAffinity::Mask* m)
override {
delete m; }
490 KMPAffinity::Mask* allocate_mask_array(
int num)
override {
return new Mask[num]; }
491 void deallocate_mask_array(KMPAffinity::Mask* array)
override {
492 Mask* windows_array =
static_cast<Mask*
>(array);
493 delete[] windows_array;
495 KMPAffinity::Mask* index_mask_array(KMPAffinity::Mask* array,
int index)
override {
496 Mask* windows_array =
static_cast<Mask*
>(array);
497 return &(windows_array[index]);
499 api_type get_api_type()
const override {
return NATIVE_OS; }
506 static const unsigned maxDepth = 32;
507 unsigned labels[maxDepth];
508 unsigned childNums[maxDepth];
511 Address(
unsigned _depth)
512 : depth(_depth), leader(FALSE) {
514 Address &operator=(
const Address &b) {
516 for (
unsigned i = 0; i < depth; i++) {
517 labels[i] = b.labels[i];
518 childNums[i] = b.childNums[i];
523 bool operator==(
const Address &b)
const {
524 if (depth != b.depth)
526 for (
unsigned i = 0; i < depth; i++)
527 if(labels[i] != b.labels[i])
531 bool isClose(
const Address &b,
int level)
const {
532 if (depth != b.depth)
534 if ((
unsigned)level >= depth)
536 for (
unsigned i = 0; i < (depth - level); i++)
537 if(labels[i] != b.labels[i])
541 bool operator!=(
const Address &b)
const {
542 return !operator==(b);
546 printf(
"Depth: %u --- ", depth);
547 for(i=0;i<depth;i++) {
548 printf(
"%u ", labels[i]);
557 AddrUnsPair(Address _first,
unsigned _second)
558 : first(_first), second(_second) {
560 AddrUnsPair &operator=(
const AddrUnsPair &b)
567 printf(
"first = "); first.print();
568 printf(
" --- second = %u", second);
570 bool operator==(
const AddrUnsPair &b)
const {
571 if(first != b.first)
return false;
572 if(second != b.second)
return false;
575 bool operator!=(
const AddrUnsPair &b)
const {
576 return !operator==(b);
582 __kmp_affinity_cmp_Address_labels(
const void *a,
const void *b)
584 const Address *aa = (
const Address *)&(((AddrUnsPair *)a)
586 const Address *bb = (
const Address *)&(((AddrUnsPair *)b)
588 unsigned depth = aa->depth;
590 KMP_DEBUG_ASSERT(depth == bb->depth);
591 for (i = 0; i < depth; i++) {
592 if (aa->labels[i] < bb->labels[i])
return -1;
593 if (aa->labels[i] > bb->labels[i])
return 1;
608 static const kmp_uint32 maxLeaves=4;
609 static const kmp_uint32 minBranch=4;
620 kmp_uint32 base_num_threads;
621 enum init_status { initialized=0, not_initialized=1, initializing=2 };
622 volatile kmp_int8 uninitialized;
623 volatile kmp_int8 resizing;
629 kmp_uint32 *skipPerLevel;
631 void deriveLevels(AddrUnsPair *adr2os,
int num_addrs) {
632 int hier_depth = adr2os[0].first.depth;
634 for (
int i=hier_depth-1; i>=0; --i) {
636 for (
int j=0; j<num_addrs; ++j) {
637 int next = adr2os[j].first.childNums[i];
638 if (next > max) max = next;
640 numPerLevel[level] = max+1;
645 hierarchy_info() : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
647 void fini() {
if (!uninitialized && numPerLevel) __kmp_free(numPerLevel); }
649 void init(AddrUnsPair *adr2os,
int num_addrs)
651 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&uninitialized, not_initialized, initializing);
652 if (bool_result == 0) {
653 while (TCR_1(uninitialized) != initialized) KMP_CPU_PAUSE();
656 KMP_DEBUG_ASSERT(bool_result==1);
664 numPerLevel = (kmp_uint32 *)__kmp_allocate(maxLevels*2*
sizeof(kmp_uint32));
665 skipPerLevel = &(numPerLevel[maxLevels]);
666 for (kmp_uint32 i=0; i<maxLevels; ++i) {
673 qsort(adr2os, num_addrs,
sizeof(*adr2os), __kmp_affinity_cmp_Address_labels);
674 deriveLevels(adr2os, num_addrs);
677 numPerLevel[0] = maxLeaves;
678 numPerLevel[1] = num_addrs/maxLeaves;
679 if (num_addrs%maxLeaves) numPerLevel[1]++;
682 base_num_threads = num_addrs;
683 for (
int i=maxLevels-1; i>=0; --i)
684 if (numPerLevel[i] != 1 || depth > 1)
687 kmp_uint32 branch = minBranch;
688 if (numPerLevel[0] == 1) branch = num_addrs/maxLeaves;
689 if (branch<minBranch) branch=minBranch;
690 for (kmp_uint32 d=0; d<depth-1; ++d) {
691 while (numPerLevel[d] > branch || (d==0 && numPerLevel[d]>maxLeaves)) {
692 if (numPerLevel[d] & 1) numPerLevel[d]++;
693 numPerLevel[d] = numPerLevel[d] >> 1;
694 if (numPerLevel[d+1] == 1) depth++;
695 numPerLevel[d+1] = numPerLevel[d+1] << 1;
697 if(numPerLevel[0] == 1) {
698 branch = branch >> 1;
699 if (branch<4) branch = minBranch;
703 for (kmp_uint32 i=1; i<depth; ++i)
704 skipPerLevel[i] = numPerLevel[i-1] * skipPerLevel[i-1];
706 for (kmp_uint32 i=depth; i<maxLevels; ++i)
707 skipPerLevel[i] = 2*skipPerLevel[i-1];
709 uninitialized = initialized;
714 void resize(kmp_uint32 nproc)
716 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
717 while (bool_result == 0) {
719 if (nproc <= base_num_threads)
722 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
724 KMP_DEBUG_ASSERT(bool_result!=0);
725 if (nproc <= base_num_threads)
return;
728 kmp_uint32 old_sz = skipPerLevel[depth-1];
729 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
731 for (kmp_uint32 i=depth; i<maxLevels && nproc>old_sz; ++i) {
732 skipPerLevel[i] = 2*skipPerLevel[i-1];
733 numPerLevel[i-1] *= 2;
737 if (nproc > old_sz) {
738 while (nproc > old_sz) {
746 kmp_uint32 *old_numPerLevel = numPerLevel;
747 kmp_uint32 *old_skipPerLevel = skipPerLevel;
748 numPerLevel = skipPerLevel = NULL;
749 numPerLevel = (kmp_uint32 *)__kmp_allocate(maxLevels*2*
sizeof(kmp_uint32));
750 skipPerLevel = &(numPerLevel[maxLevels]);
753 for (kmp_uint32 i=0; i<old_maxLevels; ++i) {
754 numPerLevel[i] = old_numPerLevel[i];
755 skipPerLevel[i] = old_skipPerLevel[i];
759 for (kmp_uint32 i=old_maxLevels; i<maxLevels; ++i) {
765 __kmp_free(old_numPerLevel);
769 for (kmp_uint32 i=old_maxLevels; i<maxLevels; ++i)
770 skipPerLevel[i] = 2*skipPerLevel[i-1];
772 base_num_threads = nproc;
777 #endif // KMP_AFFINITY_H