blob: 9d08254da829efeeac87bb7fbe08ec2be52e944f [file] [log] [blame]
/*
* kmp_affinity.cpp -- affinity management
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "kmp.h"
#include "kmp_affinity.h"
#include "kmp_i18n.h"
#include "kmp_io.h"
#include "kmp_str.h"
#include "kmp_wrapper_getpid.h"
#if KMP_USE_HIER_SCHED
#include "kmp_dispatch_hier.h"
#endif
// Store the real or imagined machine hierarchy here
static hierarchy_info machine_hierarchy;
void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
kmp_uint32 depth;
// The test below is true if affinity is available, but set to "none". Need to
// init on first use of hierarchical barrier.
if (TCR_1(machine_hierarchy.uninitialized))
machine_hierarchy.init(NULL, nproc);
// Adjust the hierarchy in case num threads exceeds original
if (nproc > machine_hierarchy.base_num_threads)
machine_hierarchy.resize(nproc);
depth = machine_hierarchy.depth;
KMP_DEBUG_ASSERT(depth > 0);
thr_bar->depth = depth;
thr_bar->base_leaf_kids = (kmp_uint8)machine_hierarchy.numPerLevel[0] - 1;
thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
}
#if KMP_AFFINITY_SUPPORTED
bool KMPAffinity::picked_api = false;
void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); }
void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); }
void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); }
void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); }
void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); }
void KMPAffinity::operator delete(void *p) { __kmp_free(p); }
void KMPAffinity::pick_api() {
KMPAffinity *affinity_dispatch;
if (picked_api)
return;
#if KMP_USE_HWLOC
// Only use Hwloc if affinity isn't explicitly disabled and
// user requests Hwloc topology method
if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
__kmp_affinity_type != affinity_disabled) {
affinity_dispatch = new KMPHwlocAffinity();
} else
#endif
{
affinity_dispatch = new KMPNativeAffinity();
}
__kmp_affinity_dispatch = affinity_dispatch;
picked_api = true;
}
void KMPAffinity::destroy_api() {
if (__kmp_affinity_dispatch != NULL) {
delete __kmp_affinity_dispatch;
__kmp_affinity_dispatch = NULL;
picked_api = false;
}
}
#define KMP_ADVANCE_SCAN(scan) \
while (*scan != '\0') { \
scan++; \
}
// Print the affinity mask to the character array in a pretty format.
// The format is a comma separated list of non-negative integers or integer
// ranges: e.g., 1,2,3-5,7,9-15
// The format can also be the string "{<empty>}" if no bits are set in mask
char *__kmp_affinity_print_mask(char *buf, int buf_len,
kmp_affin_mask_t *mask) {
int start = 0, finish = 0, previous = 0;
bool first_range;
KMP_ASSERT(buf);
KMP_ASSERT(buf_len >= 40);
KMP_ASSERT(mask);
char *scan = buf;
char *end = buf + buf_len - 1;
// Check for empty set.
if (mask->begin() == mask->end()) {
KMP_SNPRINTF(scan, end - scan + 1, "{<empty>}");
KMP_ADVANCE_SCAN(scan);
KMP_ASSERT(scan <= end);
return buf;
}
first_range = true;
start = mask->begin();
while (1) {
// Find next range
// [start, previous] is inclusive range of contiguous bits in mask
for (finish = mask->next(start), previous = start;
finish == previous + 1 && finish != mask->end();
finish = mask->next(finish)) {
previous = finish;
}
// The first range does not need a comma printed before it, but the rest
// of the ranges do need a comma beforehand
if (!first_range) {
KMP_SNPRINTF(scan, end - scan + 1, "%s", ",");
KMP_ADVANCE_SCAN(scan);
} else {
first_range = false;
}
// Range with three or more contiguous bits in the affinity mask
if (previous - start > 1) {
KMP_SNPRINTF(scan, end - scan + 1, "%d-%d", static_cast<int>(start),
static_cast<int>(previous));
} else {
// Range with one or two contiguous bits in the affinity mask
KMP_SNPRINTF(scan, end - scan + 1, "%d", static_cast<int>(start));
KMP_ADVANCE_SCAN(scan);
if (previous - start > 0) {
KMP_SNPRINTF(scan, end - scan + 1, ",%d", static_cast<int>(previous));
}
}
KMP_ADVANCE_SCAN(scan);
// Start over with new start point
start = finish;
if (start == mask->end())
break;
// Check for overflow
if (end - scan < 2)
break;
}
// Check for overflow
KMP_ASSERT(scan <= end);
return buf;
}
#undef KMP_ADVANCE_SCAN
// Print the affinity mask to the string buffer object in a pretty format
// The format is a comma separated list of non-negative integers or integer
// ranges: e.g., 1,2,3-5,7,9-15
// The format can also be the string "{<empty>}" if no bits are set in mask
kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
kmp_affin_mask_t *mask) {
int start = 0, finish = 0, previous = 0;
bool first_range;
KMP_ASSERT(buf);
KMP_ASSERT(mask);
__kmp_str_buf_clear(buf);
// Check for empty set.
if (mask->begin() == mask->end()) {
__kmp_str_buf_print(buf, "%s", "{<empty>}");
return buf;
}
first_range = true;
start = mask->begin();
while (1) {
// Find next range
// [start, previous] is inclusive range of contiguous bits in mask
for (finish = mask->next(start), previous = start;
finish == previous + 1 && finish != mask->end();
finish = mask->next(finish)) {
previous = finish;
}
// The first range does not need a comma printed before it, but the rest
// of the ranges do need a comma beforehand
if (!first_range) {
__kmp_str_buf_print(buf, "%s", ",");
} else {
first_range = false;
}
// Range with three or more contiguous bits in the affinity mask
if (previous - start > 1) {
__kmp_str_buf_print(buf, "%d-%d", static_cast<int>(start),
static_cast<int>(previous));
} else {
// Range with one or two contiguous bits in the affinity mask
__kmp_str_buf_print(buf, "%d", static_cast<int>(start));
if (previous - start > 0) {
__kmp_str_buf_print(buf, ",%d", static_cast<int>(previous));
}
}
// Start over with new start point
start = finish;
if (start == mask->end())
break;
}
return buf;
}
void __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
KMP_CPU_ZERO(mask);
#if KMP_GROUP_AFFINITY
if (__kmp_num_proc_groups > 1) {
int group;
KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
for (group = 0; group < __kmp_num_proc_groups; group++) {
int i;
int num = __kmp_GetActiveProcessorCount(group);
for (i = 0; i < num; i++) {
KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
}
}
} else
#endif /* KMP_GROUP_AFFINITY */
{
int proc;
for (proc = 0; proc < __kmp_xproc; proc++) {
KMP_CPU_SET(proc, mask);
}
}
}
// When sorting by labels, __kmp_affinity_assign_child_nums() must first be
// called to renumber the labels from [0..n] and place them into the child_num
// vector of the address object. This is done in case the labels used for
// the children at one node of the hierarchy differ from those used for
// another node at the same level. Example: suppose the machine has 2 nodes
// with 2 packages each. The first node contains packages 601 and 602, and
// second node contains packages 603 and 604. If we try to sort the table
// for "scatter" affinity, the table will still be sorted 601, 602, 603, 604
// because we are paying attention to the labels themselves, not the ordinal
// child numbers. By using the child numbers in the sort, the result is
// {0,0}=601, {0,1}=603, {1,0}=602, {1,1}=604.
static void __kmp_affinity_assign_child_nums(AddrUnsPair *address2os,
int numAddrs) {
KMP_DEBUG_ASSERT(numAddrs > 0);
int depth = address2os->first.depth;
unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
unsigned *lastLabel = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
int labCt;
for (labCt = 0; labCt < depth; labCt++) {
address2os[0].first.childNums[labCt] = counts[labCt] = 0;
lastLabel[labCt] = address2os[0].first.labels[labCt];
}
int i;
for (i = 1; i < numAddrs; i++) {
for (labCt = 0; labCt < depth; labCt++) {
if (address2os[i].first.labels[labCt] != lastLabel[labCt]) {
int labCt2;
for (labCt2 = labCt + 1; labCt2 < depth; labCt2++) {
counts[labCt2] = 0;
lastLabel[labCt2] = address2os[i].first.labels[labCt2];
}
counts[labCt]++;
lastLabel[labCt] = address2os[i].first.labels[labCt];
break;
}
}
for (labCt = 0; labCt < depth; labCt++) {
address2os[i].first.childNums[labCt] = counts[labCt];
}
for (; labCt < (int)Address::maxDepth; labCt++) {
address2os[i].first.childNums[labCt] = 0;
}
}
__kmp_free(lastLabel);
__kmp_free(counts);
}
// All of the __kmp_affinity_create_*_map() routines should set
// __kmp_affinity_masks to a vector of affinity mask objects of length
// __kmp_affinity_num_masks, if __kmp_affinity_type != affinity_none, and return
// the number of levels in the machine topology tree (zero if
// __kmp_affinity_type == affinity_none).
//
// All of the __kmp_affinity_create_*_map() routines should set
// *__kmp_affin_fullMask to the affinity mask for the initialization thread.
// They need to save and restore the mask, and it could be needed later, so
// saving it is just an optimization to avoid calling kmp_get_system_affinity()
// again.
kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
static int nCoresPerPkg, nPackages;
static int __kmp_nThreadsPerCore;
#ifndef KMP_DFLT_NTH_CORES
static int __kmp_ncores;
#endif
static int *__kmp_pu_os_idx = NULL;
// __kmp_affinity_uniform_topology() doesn't work when called from
// places which support arbitrarily many levels in the machine topology
// map, i.e. the non-default cases in __kmp_affinity_create_cpuinfo_map()
// __kmp_affinity_create_x2apicid_map().
inline static bool __kmp_affinity_uniform_topology() {
return __kmp_avail_proc == (__kmp_nThreadsPerCore * nCoresPerPkg * nPackages);
}
// Print out the detailed machine topology map, i.e. the physical locations
// of each OS proc.
static void __kmp_affinity_print_topology(AddrUnsPair *address2os, int len,
int depth, int pkgLevel,
int coreLevel, int threadLevel) {
int proc;
KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
for (proc = 0; proc < len; proc++) {
int level;
kmp_str_buf_t buf;
__kmp_str_buf_init(&buf);
for (level = 0; level < depth; level++) {
if (level == threadLevel) {
__kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Thread));
} else if (level == coreLevel) {
__kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Core));
} else if (level == pkgLevel) {
__kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Package));
} else if (level > pkgLevel) {
__kmp_str_buf_print(&buf, "%s_%d ", KMP_I18N_STR(Node),
level - pkgLevel - 1);
} else {
__kmp_str_buf_print(&buf, "L%d ", level);
}
__kmp_str_buf_print(&buf, "%d ", address2os[proc].first.labels[level]);
}
KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", address2os[proc].second,
buf.str);
__kmp_str_buf_free(&buf);
}
}
#if KMP_USE_HWLOC
static void __kmp_affinity_print_hwloc_tp(AddrUnsPair *addrP, int len,
int depth, int *levels) {
int proc;
kmp_str_buf_t buf;
__kmp_str_buf_init(&buf);
KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
for (proc = 0; proc < len; proc++) {
__kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Package),
addrP[proc].first.labels[0]);
if (depth > 1) {
int level = 1; // iterate over levels
int label = 1; // iterate over labels
if (__kmp_numa_detected)
// node level follows package
if (levels[level++] > 0)
__kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Node),
addrP[proc].first.labels[label++]);
if (__kmp_tile_depth > 0)
// tile level follows node if any, or package
if (levels[level++] > 0)
__kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Tile),
addrP[proc].first.labels[label++]);
if (levels[level++] > 0)
// core level follows
__kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Core),
addrP[proc].first.labels[label++]);
if (levels[level++] > 0)
// thread level is the latest
__kmp_str_buf_print(&buf, "%s %d ", KMP_I18N_STR(Thread),
addrP[proc].first.labels[label++]);
KMP_DEBUG_ASSERT(label == depth);
}
KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", addrP[proc].second, buf.str);
__kmp_str_buf_clear(&buf);
}
__kmp_str_buf_free(&buf);
}
static int nNodePerPkg, nTilePerPkg, nTilePerNode, nCorePerNode, nCorePerTile;
// This function removes the topology levels that are radix 1 and don't offer
// further information about the topology. The most common example is when you
// have one thread context per core, we don't want the extra thread context
// level if it offers no unique labels. So they are removed.
// return value: the new depth of address2os
static int __kmp_affinity_remove_radix_one_levels(AddrUnsPair *addrP, int nTh,
int depth, int *levels) {
int level;
int i;
int radix1_detected;
int new_depth = depth;
for (level = depth - 1; level > 0; --level) {
// Detect if this level is radix 1
radix1_detected = 1;
for (i = 1; i < nTh; ++i) {
if (addrP[0].first.labels[level] != addrP[i].first.labels[level]) {
// There are differing label values for this level so it stays
radix1_detected = 0;
break;
}
}
if (!radix1_detected)
continue;
// Radix 1 was detected
--new_depth;
levels[level] = -1; // mark level as not present in address2os array
if (level == new_depth) {
// "turn off" deepest level, just decrement the depth that removes
// the level from address2os array
for (i = 0; i < nTh; ++i) {
addrP[i].first.depth--;
}
} else {
// For other levels, we move labels over and also reduce the depth
int j;
for (j = level; j < new_depth; ++j) {
for (i = 0; i < nTh; ++i) {
addrP[i].first.labels[j] = addrP[i].first.labels[j + 1];
addrP[i].first.depth--;
}
levels[j + 1] -= 1;
}
}
}
return new_depth;
}
// Returns the number of objects of type 'type' below 'obj' within the topology
// tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
// HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
// object.
static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
hwloc_obj_type_t type) {
int retval = 0;
hwloc_obj_t first;
for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
obj->logical_index, type, 0);
first != NULL &&
hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, obj->type, first) ==
obj;
first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
first)) {
++retval;
}
return retval;
}
static int __kmp_hwloc_count_children_by_depth(hwloc_topology_t t,
hwloc_obj_t o, unsigned depth,
hwloc_obj_t *f) {
if (o->depth == depth) {
if (*f == NULL)
*f = o; // output first descendant found
return 1;
}
int sum = 0;
for (unsigned i = 0; i < o->arity; i++)
sum += __kmp_hwloc_count_children_by_depth(t, o->children[i], depth, f);
return sum; // will be 0 if no one found (as PU arity is 0)
}
static int __kmp_hwloc_count_children_by_type(hwloc_topology_t t, hwloc_obj_t o,
hwloc_obj_type_t type,
hwloc_obj_t *f) {
if (!hwloc_compare_types(o->type, type)) {
if (*f == NULL)
*f = o; // output first descendant found
return 1;
}
int sum = 0;
for (unsigned i = 0; i < o->arity; i++)
sum += __kmp_hwloc_count_children_by_type(t, o->children[i], type, f);
return sum; // will be 0 if no one found (as PU arity is 0)
}
static int __kmp_hwloc_process_obj_core_pu(AddrUnsPair *addrPair,
int &nActiveThreads,
int &num_active_cores,
hwloc_obj_t obj, int depth,
int *labels) {
hwloc_obj_t core = NULL;
hwloc_topology_t &tp = __kmp_hwloc_topology;
int NC = __kmp_hwloc_count_children_by_type(tp, obj, HWLOC_OBJ_CORE, &core);
for (int core_id = 0; core_id < NC; ++core_id, core = core->next_cousin) {
hwloc_obj_t pu = NULL;
KMP_DEBUG_ASSERT(core != NULL);
int num_active_threads = 0;
int NT = __kmp_hwloc_count_children_by_type(tp, core, HWLOC_OBJ_PU, &pu);
// int NT = core->arity; pu = core->first_child; // faster?
for (int pu_id = 0; pu_id < NT; ++pu_id, pu = pu->next_cousin) {
KMP_DEBUG_ASSERT(pu != NULL);
if (!KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask))
continue; // skip inactive (inaccessible) unit
Address addr(depth + 2);
KA_TRACE(20, ("Hwloc inserting %d (%d) %d (%d) %d (%d) into address2os\n",
obj->os_index, obj->logical_index, core->os_index,
core->logical_index, pu->os_index, pu->logical_index));
for (int i = 0; i < depth; ++i)
addr.labels[i] = labels[i]; // package, etc.
addr.labels[depth] = core_id; // core
addr.labels[depth + 1] = pu_id; // pu
addrPair[nActiveThreads] = AddrUnsPair(addr, pu->os_index);
__kmp_pu_os_idx[nActiveThreads] = pu->os_index;
nActiveThreads++;
++num_active_threads; // count active threads per core
}
if (num_active_threads) { // were there any active threads on the core?
++__kmp_ncores; // count total active cores
++num_active_cores; // count active cores per socket
if (num_active_threads > __kmp_nThreadsPerCore)
__kmp_nThreadsPerCore = num_active_threads; // calc maximum
}
}
return 0;
}
// Check if NUMA node detected below the package,
// and if tile object is detected and return its depth
static int __kmp_hwloc_check_numa() {
hwloc_topology_t &tp = __kmp_hwloc_topology;
hwloc_obj_t hT, hC, hL, hN, hS; // hwloc objects (pointers to)
int depth;
// Get some PU
hT = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PU, 0);
if (hT == NULL) // something has gone wrong
return 1;
// check NUMA node below PACKAGE
hN = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hT);
hS = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hT);
KMP_DEBUG_ASSERT(hS != NULL);
if (hN != NULL && hN->depth > hS->depth) {
__kmp_numa_detected = TRUE; // socket includes node(s)
if (__kmp_affinity_gran == affinity_gran_node) {
__kmp_affinity_gran = affinity_gran_numa;
}
}
// check tile, get object by depth because of multiple caches possible
depth = hwloc_get_cache_type_depth(tp, 2, HWLOC_OBJ_CACHE_UNIFIED);
hL = hwloc_get_ancestor_obj_by_depth(tp, depth, hT);
hC = NULL; // not used, but reset it here just in case
if (hL != NULL &&
__kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC) > 1)
__kmp_tile_depth = depth; // tile consists of multiple cores
return 0;
}
static int __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
kmp_i18n_id_t *const msg_id) {
hwloc_topology_t &tp = __kmp_hwloc_topology; // shortcut of a long name
*address2os = NULL;
*msg_id = kmp_i18n_null;
// Save the affinity mask for the current thread.
kmp_affin_mask_t *oldMask;
KMP_CPU_ALLOC(oldMask);
__kmp_get_system_affinity(oldMask, TRUE);
__kmp_hwloc_check_numa();
if (!KMP_AFFINITY_CAPABLE()) {
// Hack to try and infer the machine topology using only the data
// available from cpuid on the current thread, and __kmp_xproc.
KMP_ASSERT(__kmp_affinity_type == affinity_none);
nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(
hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0), HWLOC_OBJ_CORE);
__kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(
hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0), HWLOC_OBJ_PU);
__kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
if (__kmp_affinity_verbose) {
KMP_INFORM(AffNotCapableUseLocCpuidL11, "KMP_AFFINITY");
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
if (__kmp_affinity_uniform_topology()) {
KMP_INFORM(Uniform, "KMP_AFFINITY");
} else {
KMP_INFORM(NonUniform, "KMP_AFFINITY");
}
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
KMP_CPU_FREE(oldMask);
return 0;
}
int depth = 3;
int levels[5] = {0, 1, 2, 3, 4}; // package, [node,] [tile,] core, thread
int labels[3] = {0}; // package [,node] [,tile] - head of lables array
if (__kmp_numa_detected)
++depth;
if (__kmp_tile_depth)
++depth;
// Allocate the data structure to be returned.
AddrUnsPair *retval =
(AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
__kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
// When affinity is off, this routine will still be called to set
// __kmp_ncores, as well as __kmp_nThreadsPerCore,
// nCoresPerPkg, & nPackages. Make sure all these vars are set
// correctly, and return if affinity is not enabled.
hwloc_obj_t socket, node, tile;
int nActiveThreads = 0;
int socket_id = 0;
// re-calculate globals to count only accessible resources
__kmp_ncores = nPackages = nCoresPerPkg = __kmp_nThreadsPerCore = 0;
nNodePerPkg = nTilePerPkg = nTilePerNode = nCorePerNode = nCorePerTile = 0;
for (socket = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0); socket != NULL;
socket = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PACKAGE, socket),
socket_id++) {
labels[0] = socket_id;
if (__kmp_numa_detected) {
int NN;
int n_active_nodes = 0;
node = NULL;
NN = __kmp_hwloc_count_children_by_type(tp, socket, HWLOC_OBJ_NUMANODE,
&node);
for (int node_id = 0; node_id < NN; ++node_id, node = node->next_cousin) {
labels[1] = node_id;
if (__kmp_tile_depth) {
// NUMA + tiles
int NT;
int n_active_tiles = 0;
tile = NULL;
NT = __kmp_hwloc_count_children_by_depth(tp, node, __kmp_tile_depth,
&tile);
for (int tl_id = 0; tl_id < NT; ++tl_id, tile = tile->next_cousin) {
labels[2] = tl_id;
int n_active_cores = 0;
__kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
n_active_cores, tile, 3, labels);
if (n_active_cores) { // were there any active cores on the socket?
++n_active_tiles; // count active tiles per node
if (n_active_cores > nCorePerTile)
nCorePerTile = n_active_cores; // calc maximum
}
}
if (n_active_tiles) { // were there any active tiles on the socket?
++n_active_nodes; // count active nodes per package
if (n_active_tiles > nTilePerNode)
nTilePerNode = n_active_tiles; // calc maximum
}
} else {
// NUMA, no tiles
int n_active_cores = 0;
__kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
n_active_cores, node, 2, labels);
if (n_active_cores) { // were there any active cores on the socket?
++n_active_nodes; // count active nodes per package
if (n_active_cores > nCorePerNode)
nCorePerNode = n_active_cores; // calc maximum
}
}
}
if (n_active_nodes) { // were there any active nodes on the socket?
++nPackages; // count total active packages
if (n_active_nodes > nNodePerPkg)
nNodePerPkg = n_active_nodes; // calc maximum
}
} else {
if (__kmp_tile_depth) {
// no NUMA, tiles
int NT;
int n_active_tiles = 0;
tile = NULL;
NT = __kmp_hwloc_count_children_by_depth(tp, socket, __kmp_tile_depth,
&tile);
for (int tl_id = 0; tl_id < NT; ++tl_id, tile = tile->next_cousin) {
labels[1] = tl_id;
int n_active_cores = 0;
__kmp_hwloc_process_obj_core_pu(retval, nActiveThreads,
n_active_cores, tile, 2, labels);
if (n_active_cores) { // were there any active cores on the socket?
++n_active_tiles; // count active tiles per package
if (n_active_cores > nCorePerTile)
nCorePerTile = n_active_cores; // calc maximum
}
}
if (n_active_tiles) { // were there any active tiles on the socket?
++nPackages; // count total active packages
if (n_active_tiles > nTilePerPkg)
nTilePerPkg = n_active_tiles; // calc maximum
}
} else {
// no NUMA, no tiles
int n_active_cores = 0;
__kmp_hwloc_process_obj_core_pu(retval, nActiveThreads, n_active_cores,
socket, 1, labels);
if (n_active_cores) { // were there any active cores on the socket?
++nPackages; // count total active packages
if (n_active_cores > nCoresPerPkg)
nCoresPerPkg = n_active_cores; // calc maximum
}
}
}
}
// If there's only one thread context to bind to, return now.
KMP_DEBUG_ASSERT(nActiveThreads == __kmp_avail_proc);
KMP_ASSERT(nActiveThreads > 0);
if (nActiveThreads == 1) {
__kmp_ncores = nPackages = 1;
__kmp_nThreadsPerCore = nCoresPerPkg = 1;
if (__kmp_affinity_verbose) {
char buf[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
} else {
KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
}
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
KMP_INFORM(Uniform, "KMP_AFFINITY");
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
if (__kmp_affinity_type == affinity_none) {
__kmp_free(retval);
KMP_CPU_FREE(oldMask);
return 0;
}
// Form an Address object which only includes the package level.
Address addr(1);
addr.labels[0] = retval[0].first.labels[0];
retval[0].first = addr;
if (__kmp_affinity_gran_levels < 0) {
__kmp_affinity_gran_levels = 0;
}
if (__kmp_affinity_verbose) {
__kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
}
*address2os = retval;
KMP_CPU_FREE(oldMask);
return 1;
}
// Sort the table by physical Id.
qsort(retval, nActiveThreads, sizeof(*retval),
__kmp_affinity_cmp_Address_labels);
// Check to see if the machine topology is uniform
int nPUs = nPackages * __kmp_nThreadsPerCore;
if (__kmp_numa_detected) {
if (__kmp_tile_depth) { // NUMA + tiles
nPUs *= (nNodePerPkg * nTilePerNode * nCorePerTile);
} else { // NUMA, no tiles
nPUs *= (nNodePerPkg * nCorePerNode);
}
} else {
if (__kmp_tile_depth) { // no NUMA, tiles
nPUs *= (nTilePerPkg * nCorePerTile);
} else { // no NUMA, no tiles
nPUs *= nCoresPerPkg;
}
}
unsigned uniform = (nPUs == nActiveThreads);
// Print the machine topology summary.
if (__kmp_affinity_verbose) {
char mask[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", mask);
} else {
KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", mask);
}
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
if (uniform) {
KMP_INFORM(Uniform, "KMP_AFFINITY");
} else {
KMP_INFORM(NonUniform, "KMP_AFFINITY");
}
if (__kmp_numa_detected) {
if (__kmp_tile_depth) { // NUMA + tiles
KMP_INFORM(TopologyExtraNoTi, "KMP_AFFINITY", nPackages, nNodePerPkg,
nTilePerNode, nCorePerTile, __kmp_nThreadsPerCore,
__kmp_ncores);
} else { // NUMA, no tiles
KMP_INFORM(TopologyExtraNode, "KMP_AFFINITY", nPackages, nNodePerPkg,
nCorePerNode, __kmp_nThreadsPerCore, __kmp_ncores);
nPUs *= (nNodePerPkg * nCorePerNode);
}
} else {
if (__kmp_tile_depth) { // no NUMA, tiles
KMP_INFORM(TopologyExtraTile, "KMP_AFFINITY", nPackages, nTilePerPkg,
nCorePerTile, __kmp_nThreadsPerCore, __kmp_ncores);
} else { // no NUMA, no tiles
kmp_str_buf_t buf;
__kmp_str_buf_init(&buf);
__kmp_str_buf_print(&buf, "%d", nPackages);
KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
__kmp_str_buf_free(&buf);
}
}
}
if (__kmp_affinity_type == affinity_none) {
__kmp_free(retval);
KMP_CPU_FREE(oldMask);
return 0;
}
int depth_full = depth; // number of levels before compressing
// Find any levels with radiix 1, and remove them from the map
// (except for the package level).
depth = __kmp_affinity_remove_radix_one_levels(retval, nActiveThreads, depth,
levels);
KMP_DEBUG_ASSERT(__kmp_affinity_gran != affinity_gran_default);
if (__kmp_affinity_gran_levels < 0) {
// Set the granularity level based on what levels are modeled
// in the machine topology map.
__kmp_affinity_gran_levels = 0; // lowest level (e.g. fine)
if (__kmp_affinity_gran > affinity_gran_thread) {
for (int i = 1; i <= depth_full; ++i) {
if (__kmp_affinity_gran <= i) // only count deeper levels
break;
if (levels[depth_full - i] > 0)
__kmp_affinity_gran_levels++;
}
}
if (__kmp_affinity_gran > affinity_gran_package)
__kmp_affinity_gran_levels++; // e.g. granularity = group
}
if (__kmp_affinity_verbose)
__kmp_affinity_print_hwloc_tp(retval, nActiveThreads, depth, levels);
KMP_CPU_FREE(oldMask);
*address2os = retval;
return depth;
}
#endif // KMP_USE_HWLOC
// If we don't know how to retrieve the machine's processor topology, or
// encounter an error in doing so, this routine is called to form a "flat"
// mapping of os thread id's <-> processor id's.
static int __kmp_affinity_create_flat_map(AddrUnsPair **address2os,
kmp_i18n_id_t *const msg_id) {
*address2os = NULL;
*msg_id = kmp_i18n_null;
// Even if __kmp_affinity_type == affinity_none, this routine might still
// called to set __kmp_ncores, as well as
// __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
if (!KMP_AFFINITY_CAPABLE()) {
KMP_ASSERT(__kmp_affinity_type == affinity_none);
__kmp_ncores = nPackages = __kmp_xproc;
__kmp_nThreadsPerCore = nCoresPerPkg = 1;
if (__kmp_affinity_verbose) {
KMP_INFORM(AffFlatTopology, "KMP_AFFINITY");
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
KMP_INFORM(Uniform, "KMP_AFFINITY");
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
return 0;
}
// When affinity is off, this routine will still be called to set
// __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
// Make sure all these vars are set correctly, and return now if affinity is
// not enabled.
__kmp_ncores = nPackages = __kmp_avail_proc;
__kmp_nThreadsPerCore = nCoresPerPkg = 1;
if (__kmp_affinity_verbose) {
char buf[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
__kmp_affin_fullMask);
KMP_INFORM(AffCapableUseFlat, "KMP_AFFINITY");
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
} else {
KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
}
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
KMP_INFORM(Uniform, "KMP_AFFINITY");
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
__kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
if (__kmp_affinity_type == affinity_none) {
int avail_ct = 0;
int i;
KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask))
continue;
__kmp_pu_os_idx[avail_ct++] = i; // suppose indices are flat
}
return 0;
}
// Contruct the data structure to be returned.
*address2os =
(AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
int avail_ct = 0;
int i;
KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
// Skip this proc if it is not included in the machine model.
if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
continue;
}
__kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
Address addr(1);
addr.labels[0] = i;
(*address2os)[avail_ct++] = AddrUnsPair(addr, i);
}
if (__kmp_affinity_verbose) {
KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
}
if (__kmp_affinity_gran_levels < 0) {
// Only the package level is modeled in the machine topology map,
// so the #levels of granularity is either 0 or 1.
if (__kmp_affinity_gran > affinity_gran_package) {
__kmp_affinity_gran_levels = 1;
} else {
__kmp_affinity_gran_levels = 0;
}
}
return 1;
}
#if KMP_GROUP_AFFINITY
// If multiple Windows* OS processor groups exist, we can create a 2-level
// topology map with the groups at level 0 and the individual procs at level 1.
// This facilitates letting the threads float among all procs in a group,
// if granularity=group (the default when there are multiple groups).
static int __kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
kmp_i18n_id_t *const msg_id) {
*address2os = NULL;
*msg_id = kmp_i18n_null;
// If we aren't affinity capable, then return now.
// The flat mapping will be used.
if (!KMP_AFFINITY_CAPABLE()) {
// FIXME set *msg_id
return -1;
}
// Contruct the data structure to be returned.
*address2os =
(AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
__kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
int avail_ct = 0;
int i;
KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
// Skip this proc if it is not included in the machine model.
if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
continue;
}
__kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
Address addr(2);
addr.labels[0] = i / (CHAR_BIT * sizeof(DWORD_PTR));
addr.labels[1] = i % (CHAR_BIT * sizeof(DWORD_PTR));
(*address2os)[avail_ct++] = AddrUnsPair(addr, i);
if (__kmp_affinity_verbose) {
KMP_INFORM(AffOSProcToGroup, "KMP_AFFINITY", i, addr.labels[0],
addr.labels[1]);
}
}
if (__kmp_affinity_gran_levels < 0) {
if (__kmp_affinity_gran == affinity_gran_group) {
__kmp_affinity_gran_levels = 1;
} else if ((__kmp_affinity_gran == affinity_gran_fine) ||
(__kmp_affinity_gran == affinity_gran_thread)) {
__kmp_affinity_gran_levels = 0;
} else {
const char *gran_str = NULL;
if (__kmp_affinity_gran == affinity_gran_core) {
gran_str = "core";
} else if (__kmp_affinity_gran == affinity_gran_package) {
gran_str = "package";
} else if (__kmp_affinity_gran == affinity_gran_node) {
gran_str = "node";
} else {
KMP_ASSERT(0);
}
// Warning: can't use affinity granularity \"gran\" with group topology
// method, using "thread"
__kmp_affinity_gran_levels = 0;
}
}
return 2;
}
#endif /* KMP_GROUP_AFFINITY */
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
static int __kmp_cpuid_mask_width(int count) {
int r = 0;
while ((1 << r) < count)
++r;
return r;
}
class apicThreadInfo {
public:
unsigned osId; // param to __kmp_affinity_bind_thread
unsigned apicId; // from cpuid after binding
unsigned maxCoresPerPkg; // ""
unsigned maxThreadsPerPkg; // ""
unsigned pkgId; // inferred from above values
unsigned coreId; // ""
unsigned threadId; // ""
};
static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a,
const void *b) {
const apicThreadInfo *aa = (const apicThreadInfo *)a;
const apicThreadInfo *bb = (const apicThreadInfo *)b;
if (aa->pkgId < bb->pkgId)
return -1;
if (aa->pkgId > bb->pkgId)
return 1;
if (aa->coreId < bb->coreId)
return -1;
if (aa->coreId > bb->coreId)
return 1;
if (aa->threadId < bb->threadId)
return -1;
if (aa->threadId > bb->threadId)
return 1;
return 0;
}
// On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
// an algorithm which cycles through the available os threads, setting
// the current thread's affinity mask to that thread, and then retrieves
// the Apic Id for each thread context using the cpuid instruction.
static int __kmp_affinity_create_apicid_map(AddrUnsPair **address2os,
kmp_i18n_id_t *const msg_id) {
kmp_cpuid buf;
*address2os = NULL;
*msg_id = kmp_i18n_null;
// Check if cpuid leaf 4 is supported.
__kmp_x86_cpuid(0, 0, &buf);
if (buf.eax < 4) {
*msg_id = kmp_i18n_str_NoLeaf4Support;
return -1;
}
// The algorithm used starts by setting the affinity to each available thread
// and retrieving info from the cpuid instruction, so if we are not capable of
// calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
// need to do something else - use the defaults that we calculated from
// issuing cpuid without binding to each proc.
if (!KMP_AFFINITY_CAPABLE()) {
// Hack to try and infer the machine topology using only the data
// available from cpuid on the current thread, and __kmp_xproc.
KMP_ASSERT(__kmp_affinity_type == affinity_none);
// Get an upper bound on the number of threads per package using cpuid(1).
// On some OS/chps combinations where HT is supported by the chip but is
// disabled, this value will be 2 on a single core chip. Usually, it will be
// 2 if HT is enabled and 1 if HT is disabled.
__kmp_x86_cpuid(1, 0, &buf);
int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
if (maxThreadsPerPkg == 0) {
maxThreadsPerPkg = 1;
}
// The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
// value.
//
// The author of cpu_count.cpp treated this only an upper bound on the
// number of cores, but I haven't seen any cases where it was greater than
// the actual number of cores, so we will treat it as exact in this block of
// code.
//
// First, we need to check if cpuid(4) is supported on this chip. To see if
// cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
// greater.
__kmp_x86_cpuid(0, 0, &buf);
if (buf.eax >= 4) {
__kmp_x86_cpuid(4, 0, &buf);
nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
} else {
nCoresPerPkg = 1;
}
// There is no way to reliably tell if HT is enabled without issuing the
// cpuid instruction from every thread, can correlating the cpuid info, so
// if the machine is not affinity capable, we assume that HT is off. We have
// seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
// does not support HT.
//
// - Older OSes are usually found on machines with older chips, which do not
// support HT.
// - The performance penalty for mistakenly identifying a machine as HT when
// it isn't (which results in blocktime being incorrecly set to 0) is
// greater than the penalty when for mistakenly identifying a machine as
// being 1 thread/core when it is really HT enabled (which results in
// blocktime being incorrectly set to a positive value).
__kmp_ncores = __kmp_xproc;
nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
__kmp_nThreadsPerCore = 1;
if (__kmp_affinity_verbose) {
KMP_INFORM(AffNotCapableUseLocCpuid, "KMP_AFFINITY");
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
if (__kmp_affinity_uniform_topology()) {
KMP_INFORM(Uniform, "KMP_AFFINITY");
} else {
KMP_INFORM(NonUniform, "KMP_AFFINITY");
}
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
return 0;
}
// From here on, we can assume that it is safe to call
// __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
// __kmp_affinity_type = affinity_none.
// Save the affinity mask for the current thread.
kmp_affin_mask_t *oldMask;
KMP_CPU_ALLOC(oldMask);
KMP_ASSERT(oldMask != NULL);
__kmp_get_system_affinity(oldMask, TRUE);
// Run through each of the available contexts, binding the current thread
// to it, and obtaining the pertinent information using the cpuid instr.
//
// The relevant information is:
// - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
// has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
// - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
// of this field determines the width of the core# + thread# fields in the
// Apic Id. It is also an upper bound on the number of threads per
// package, but it has been verified that situations happen were it is not
// exact. In particular, on certain OS/chip combinations where Intel(R)
// Hyper-Threading Technology is supported by the chip but has been
// disabled, the value of this field will be 2 (for a single core chip).
// On other OS/chip combinations supporting Intel(R) Hyper-Threading
// Technology, the value of this field will be 1 when Intel(R)
// Hyper-Threading Technology is disabled and 2 when it is enabled.
// - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value
// of this field (+1) determines the width of the core# field in the Apic
// Id. The comments in "cpucount.cpp" say that this value is an upper
// bound, but the IA-32 architecture manual says that it is exactly the
// number of cores per package, and I haven't seen any case where it
// wasn't.
//
// From this information, deduce the package Id, core Id, and thread Id,
// and set the corresponding fields in the apicThreadInfo struct.
unsigned i;
apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
__kmp_avail_proc * sizeof(apicThreadInfo));
unsigned nApics = 0;
KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
// Skip this proc if it is not included in the machine model.
if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
continue;
}
KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
__kmp_affinity_dispatch->bind_thread(i);
threadInfo[nApics].osId = i;
// The apic id and max threads per pkg come from cpuid(1).
__kmp_x86_cpuid(1, 0, &buf);
if (((buf.edx >> 9) & 1) == 0) {
__kmp_set_system_affinity(oldMask, TRUE);
__kmp_free(threadInfo);
KMP_CPU_FREE(oldMask);
*msg_id = kmp_i18n_str_ApicNotPresent;
return -1;
}
threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
if (threadInfo[nApics].maxThreadsPerPkg == 0) {
threadInfo[nApics].maxThreadsPerPkg = 1;
}
// Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
// value.
//
// First, we need to check if cpuid(4) is supported on this chip. To see if
// cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
// or greater.
__kmp_x86_cpuid(0, 0, &buf);
if (buf.eax >= 4) {
__kmp_x86_cpuid(4, 0, &buf);
threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
} else {
threadInfo[nApics].maxCoresPerPkg = 1;
}
// Infer the pkgId / coreId / threadId using only the info obtained locally.
int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
int widthT = widthCT - widthC;
if (widthT < 0) {
// I've never seen this one happen, but I suppose it could, if the cpuid
// instruction on a chip was really screwed up. Make sure to restore the
// affinity mask before the tail call.
__kmp_set_system_affinity(oldMask, TRUE);
__kmp_free(threadInfo);
KMP_CPU_FREE(oldMask);
*msg_id = kmp_i18n_str_InvalidCpuidInfo;
return -1;
}
int maskC = (1 << widthC) - 1;
threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
int maskT = (1 << widthT) - 1;
threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
nApics++;
}
// We've collected all the info we need.
// Restore the old affinity mask for this thread.
__kmp_set_system_affinity(oldMask, TRUE);
// If there's only one thread context to bind to, form an Address object
// with depth 1 and return immediately (or, if affinity is off, set
// address2os to NULL and return).
//
// If it is configured to omit the package level when there is only a single
// package, the logic at the end of this routine won't work if there is only
// a single thread - it would try to form an Address object with depth 0.
KMP_ASSERT(nApics > 0);
if (nApics == 1) {
__kmp_ncores = nPackages = 1;
__kmp_nThreadsPerCore = nCoresPerPkg = 1;
if (__kmp_affinity_verbose) {
char buf[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
} else {
KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
}
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
KMP_INFORM(Uniform, "KMP_AFFINITY");
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
if (__kmp_affinity_type == affinity_none) {
__kmp_free(threadInfo);
KMP_CPU_FREE(oldMask);
return 0;
}
*address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
Address addr(1);
addr.labels[0] = threadInfo[0].pkgId;
(*address2os)[0] = AddrUnsPair(addr, threadInfo[0].osId);
if (__kmp_affinity_gran_levels < 0) {
__kmp_affinity_gran_levels = 0;
}
if (__kmp_affinity_verbose) {
__kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
}
__kmp_free(threadInfo);
KMP_CPU_FREE(oldMask);
return 1;
}
// Sort the threadInfo table by physical Id.
qsort(threadInfo, nApics, sizeof(*threadInfo),
__kmp_affinity_cmp_apicThreadInfo_phys_id);
// The table is now sorted by pkgId / coreId / threadId, but we really don't
// know the radix of any of the fields. pkgId's may be sparsely assigned among
// the chips on a system. Although coreId's are usually assigned
// [0 .. coresPerPkg-1] and threadId's are usually assigned
// [0..threadsPerCore-1], we don't want to make any such assumptions.
//
// For that matter, we don't know what coresPerPkg and threadsPerCore (or the
// total # packages) are at this point - we want to determine that now. We
// only have an upper bound on the first two figures.
//
// We also perform a consistency check at this point: the values returned by
// the cpuid instruction for any thread bound to a given package had better
// return the same info for maxThreadsPerPkg and maxCoresPerPkg.
nPackages = 1;
nCoresPerPkg = 1;
__kmp_nThreadsPerCore = 1;
unsigned nCores = 1;
unsigned pkgCt = 1; // to determine radii
unsigned lastPkgId = threadInfo[0].pkgId;
unsigned coreCt = 1;
unsigned lastCoreId = threadInfo[0].coreId;
unsigned threadCt = 1;
unsigned lastThreadId = threadInfo[0].threadId;
// intra-pkg consist checks
unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
for (i = 1; i < nApics; i++) {
if (threadInfo[i].pkgId != lastPkgId) {
nCores++;
pkgCt++;
lastPkgId = threadInfo[i].pkgId;
if ((int)coreCt > nCoresPerPkg)
nCoresPerPkg = coreCt;
coreCt = 1;
lastCoreId = threadInfo[i].coreId;
if ((int)threadCt > __kmp_nThreadsPerCore)
__kmp_nThreadsPerCore = threadCt;
threadCt = 1;
lastThreadId = threadInfo[i].threadId;
// This is a different package, so go on to the next iteration without
// doing any consistency checks. Reset the consistency check vars, though.
prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
continue;
}
if (threadInfo[i].coreId != lastCoreId) {
nCores++;
coreCt++;
lastCoreId = threadInfo[i].coreId;
if ((int)threadCt > __kmp_nThreadsPerCore)
__kmp_nThreadsPerCore = threadCt;
threadCt = 1;
lastThreadId = threadInfo[i].threadId;
} else if (threadInfo[i].threadId != lastThreadId) {
threadCt++;
lastThreadId = threadInfo[i].threadId;
} else {
__kmp_free(threadInfo);
KMP_CPU_FREE(oldMask);
*msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
return -1;
}
// Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
// fields agree between all the threads bounds to a given package.
if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
(prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
__kmp_free(threadInfo);
KMP_CPU_FREE(oldMask);
*msg_id = kmp_i18n_str_InconsistentCpuidInfo;
return -1;
}
}
nPackages = pkgCt;
if ((int)coreCt > nCoresPerPkg)
nCoresPerPkg = coreCt;
if ((int)threadCt > __kmp_nThreadsPerCore)
__kmp_nThreadsPerCore = threadCt;
// When affinity is off, this routine will still be called to set
// __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
// Make sure all these vars are set correctly, and return now if affinity is
// not enabled.
__kmp_ncores = nCores;
if (__kmp_affinity_verbose) {
char buf[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
} else {
KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
}
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
if (__kmp_affinity_uniform_topology()) {
KMP_INFORM(Uniform, "KMP_AFFINITY");
} else {
KMP_INFORM(NonUniform, "KMP_AFFINITY");
}
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc);
__kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
for (i = 0; i < nApics; ++i) {
__kmp_pu_os_idx[i] = threadInfo[i].osId;
}
if (__kmp_affinity_type == affinity_none) {
__kmp_free(threadInfo);
KMP_CPU_FREE(oldMask);
return 0;
}
// Now that we've determined the number of packages, the number of cores per
// package, and the number of threads per core, we can construct the data
// structure that is to be returned.
int pkgLevel = 0;
int coreLevel = (nCoresPerPkg <= 1) ? -1 : 1;
int threadLevel =
(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
unsigned depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
KMP_ASSERT(depth > 0);
*address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
for (i = 0; i < nApics; ++i) {
Address addr(depth);
unsigned os = threadInfo[i].osId;
int d = 0;
if (pkgLevel >= 0) {
addr.labels[d++] = threadInfo[i].pkgId;
}
if (coreLevel >= 0) {
addr.labels[d++] = threadInfo[i].coreId;
}
if (threadLevel >= 0) {
addr.labels[d++] = threadInfo[i].threadId;
}
(*address2os)[i] = AddrUnsPair(addr, os);
}
if (__kmp_affinity_gran_levels < 0) {
// Set the granularity level based on what levels are modeled in the machine
// topology map.
__kmp_affinity_gran_levels = 0;
if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
__kmp_affinity_gran_levels++;
}
if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
__kmp_affinity_gran_levels++;
}
if ((pkgLevel >= 0) && (__kmp_affinity_gran > affinity_gran_package)) {
__kmp_affinity_gran_levels++;
}
}
if (__kmp_affinity_verbose) {
__kmp_affinity_print_topology(*address2os, nApics, depth, pkgLevel,
coreLevel, threadLevel);
}
__kmp_free(threadInfo);
KMP_CPU_FREE(oldMask);
return depth;
}
// Intel(R) microarchitecture code name Nehalem, Dunnington and later
// architectures support a newer interface for specifying the x2APIC Ids,
// based on cpuid leaf 11.
static int __kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
kmp_i18n_id_t *const msg_id) {
kmp_cpuid buf;
*address2os = NULL;
*msg_id = kmp_i18n_null;
// Check to see if cpuid leaf 11 is supported.
__kmp_x86_cpuid(0, 0, &buf);
if (buf.eax < 11) {
*msg_id = kmp_i18n_str_NoLeaf11Support;
return -1;
}
__kmp_x86_cpuid(11, 0, &buf);
if (buf.ebx == 0) {
*msg_id = kmp_i18n_str_NoLeaf11Support;
return -1;
}
// Find the number of levels in the machine topology. While we're at it, get
// the default values for __kmp_nThreadsPerCore & nCoresPerPkg. We will try to
// get more accurate values later by explicitly counting them, but get
// reasonable defaults now, in case we return early.
int level;
int threadLevel = -1;
int coreLevel = -1;
int pkgLevel = -1;
__kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
for (level = 0;; level++) {
if (level > 31) {
// FIXME: Hack for DPD200163180
//
// If level is big then something went wrong -> exiting
//
// There could actually be 32 valid levels in the machine topology, but so
// far, the only machine we have seen which does not exit this loop before
// iteration 32 has fubar x2APIC settings.
//
// For now, just reject this case based upon loop trip count.
*msg_id = kmp_i18n_str_InvalidCpuidInfo;
return -1;
}
__kmp_x86_cpuid(11, level, &buf);
if (buf.ebx == 0) {
if (pkgLevel < 0) {
// Will infer nPackages from __kmp_xproc
pkgLevel = level;
level++;
}
break;
}
int kind = (buf.ecx >> 8) & 0xff;
if (kind == 1) {
// SMT level
threadLevel = level;
coreLevel = -1;
pkgLevel = -1;
__kmp_nThreadsPerCore = buf.ebx & 0xffff;
if (__kmp_nThreadsPerCore == 0) {
*msg_id = kmp_i18n_str_InvalidCpuidInfo;
return -1;
}
} else if (kind == 2) {
// core level
coreLevel = level;
pkgLevel = -1;
nCoresPerPkg = buf.ebx & 0xffff;
if (nCoresPerPkg == 0) {
*msg_id = kmp_i18n_str_InvalidCpuidInfo;
return -1;
}
} else {
if (level <= 0) {
*msg_id = kmp_i18n_str_InvalidCpuidInfo;
return -1;
}
if (pkgLevel >= 0) {
continue;
}
pkgLevel = level;
nPackages = buf.ebx & 0xffff;
if (nPackages == 0) {
*msg_id = kmp_i18n_str_InvalidCpuidInfo;
return -1;
}
}
}
int depth = level;
// In the above loop, "level" was counted from the finest level (usually
// thread) to the coarsest. The caller expects that we will place the labels
// in (*address2os)[].first.labels[] in the inverse order, so we need to
// invert the vars saying which level means what.
if (threadLevel >= 0) {
threadLevel = depth - threadLevel - 1;
}
if (coreLevel >= 0) {
coreLevel = depth - coreLevel - 1;
}
KMP_DEBUG_ASSERT(pkgLevel >= 0);
pkgLevel = depth - pkgLevel - 1;
// The algorithm used starts by setting the affinity to each available thread
// and retrieving info from the cpuid instruction, so if we are not capable of
// calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
// need to do something else - use the defaults that we calculated from
// issuing cpuid without binding to each proc.
if (!KMP_AFFINITY_CAPABLE()) {
// Hack to try and infer the machine topology using only the data
// available from cpuid on the current thread, and __kmp_xproc.
KMP_ASSERT(__kmp_affinity_type == affinity_none);
__kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
if (__kmp_affinity_verbose) {
KMP_INFORM(AffNotCapableUseLocCpuidL11, "KMP_AFFINITY");
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
if (__kmp_affinity_uniform_topology()) {
KMP_INFORM(Uniform, "KMP_AFFINITY");
} else {
KMP_INFORM(NonUniform, "KMP_AFFINITY");
}
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
return 0;
}
// From here on, we can assume that it is safe to call
// __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
// __kmp_affinity_type = affinity_none.
// Save the affinity mask for the current thread.
kmp_affin_mask_t *oldMask;
KMP_CPU_ALLOC(oldMask);
__kmp_get_system_affinity(oldMask, TRUE);
// Allocate the data structure to be returned.
AddrUnsPair *retval =
(AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
// Run through each of the available contexts, binding the current thread
// to it, and obtaining the pertinent information using the cpuid instr.
unsigned int proc;
int nApics = 0;
KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
// Skip this proc if it is not included in the machine model.
if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
continue;
}
KMP_DEBUG_ASSERT(nApics < __kmp_avail_proc);
__kmp_affinity_dispatch->bind_thread(proc);
// Extract labels for each level in the machine topology map from Apic ID.
Address addr(depth);
int prev_shift = 0;
for (level = 0; level < depth; level++) {
__kmp_x86_cpuid(11, level, &buf);
unsigned apicId = buf.edx;
if (buf.ebx == 0) {
if (level != depth - 1) {
KMP_CPU_FREE(oldMask);
*msg_id = kmp_i18n_str_InconsistentCpuidInfo;
return -1;
}
addr.labels[depth - level - 1] = apicId >> prev_shift;
level++;
break;
}
int shift = buf.eax & 0x1f;
int mask = (1 << shift) - 1;
addr.labels[depth - level - 1] = (apicId & mask) >> prev_shift;
prev_shift = shift;
}
if (level != depth) {
KMP_CPU_FREE(oldMask);
*msg_id = kmp_i18n_str_InconsistentCpuidInfo;
return -1;
}
retval[nApics] = AddrUnsPair(addr, proc);
nApics++;
}
// We've collected all the info we need.
// Restore the old affinity mask for this thread.
__kmp_set_system_affinity(oldMask, TRUE);
// If there's only one thread context to bind to, return now.
KMP_ASSERT(nApics > 0);
if (nApics == 1) {
__kmp_ncores = nPackages = 1;
__kmp_nThreadsPerCore = nCoresPerPkg = 1;
if (__kmp_affinity_verbose) {
char buf[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
KMP_INFORM(AffUseGlobCpuidL11, "KMP_AFFINITY");
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
} else {
KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
}
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
KMP_INFORM(Uniform, "KMP_AFFINITY");
KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
}
if (__kmp_affinity_type == affinity_none) {
__kmp_free(retval);
KMP_CPU_FREE(oldMask);
return 0;
}
// Form an Address object which only includes the package level.
Address addr(1);
addr.labels[0] = retval[0].first.labels[pkgLevel];
retval[0].first = addr;
if (__kmp_affinity_gran_levels < 0) {
__kmp_affinity_gran_levels = 0;
}
if (__kmp_affinity_verbose) {
__kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
}
*address2os = retval;
KMP_CPU_FREE(oldMask);
return 1;
}
// Sort the table by physical Id.
qsort(retval, nApics, sizeof(*retval), __kmp_affinity_cmp_Address_labels);
// Find the radix at each of the levels.
unsigned *totals = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
unsigned *maxCt = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
unsigned *last = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
for (level = 0; level < depth; level++) {
totals[level] = 1;
maxCt[level] = 1;
counts[level] = 1;
last[level] = retval[0].first.labels[level];
}
// From here on, the iteration variable "level" runs from the finest level to
// the coarsest, i.e. we iterate forward through
// (*address2os)[].first.labels[] - in the previous loops, we iterated
// backwards.
for (proc = 1; (int)proc < nApics; proc++) {
int level;
for (level = 0; level < depth; level++) {
if (retval[proc].first.labels[level] != last[level]) {
int j;
for (j = level + 1; j < depth; j++) {
totals[j]++;
counts[j] = 1;
// The line below causes printing incorrect topology information in
// case the max value for some level (maxCt[level]) is encountered
// earlier than some less value while going through the array. For
// example, let pkg0 has 4 cores and pkg1 has 2 cores. Then
// maxCt[1] == 2
// whereas it must be 4.
// TODO!!! Check if it can be commented safely
// maxCt[j] = 1;
last[j] = retval[proc].first.labels[j];
}
totals[level]++;
counts[level]++;
if (counts[level] > maxCt[level]) {
maxCt[level] = counts[level];
}
last[level] = retval[proc].first.labels[level];
break;
} else if (level == depth - 1) {
__kmp_free(last);
__kmp_free(maxCt);
__kmp_free(counts);
__kmp_free(totals);
__kmp_free(retval);
KMP_CPU_FREE(oldMask);
*msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
return -1;
}
}
}
// When affinity is off, this routine will still be called to set
// __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
// Make sure all these vars are set correctly, and return if affinity is not
// enabled.
if (threadLevel >= 0) {
__kmp_nThreadsPerCore = maxCt[threadLevel];
} else {
__kmp_nThreadsPerCore = 1;
}
nPackages = totals[pkgLevel];
if (coreLevel >= 0) {
__kmp_ncores = totals[coreLevel];
nCoresPerPkg = maxCt[coreLevel];
} else {
__kmp_ncores = nPackages;
nCoresPerPkg = 1;
}
// Check to see if the machine topology is uniform
unsigned prod = maxCt[0];
for (level = 1; level < depth; level++) {
prod *= maxCt[level];
}
bool uniform = (prod == totals[level - 1]);
// Print the machine topology summary.
if (__kmp_affinity_verbose) {
char mask[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
KMP_INFORM(AffUseGlobCpuidL11, "KMP_AFFINITY");
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", mask);
} else {
KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", mask);
}
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
if (uniform) {
KMP_INFORM(Uniform, "KMP_AFFINITY");
} else {
KMP_INFORM(NonUniform, "KMP_AFFINITY");
}
kmp_str_buf_t buf;
__kmp_str_buf_init(&buf);
__kmp_str_buf_print(&buf, "%d", totals[0]);
for (level = 1; level <= pkgLevel; level++) {
__kmp_str_buf_print(&buf, " x %d", maxCt[level]);
}
KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, nCoresPerPkg,
__kmp_nThreadsPerCore, __kmp_ncores);
__kmp_str_buf_free(&buf);
}
KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
KMP_DEBUG_ASSERT(nApics == __kmp_avail_proc);
__kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
for (proc = 0; (int)proc < nApics; ++proc) {
__kmp_pu_os_idx[proc] = retval[proc].second;
}
if (__kmp_affinity_type == affinity_none) {
__kmp_free(last);
__kmp_free(maxCt);
__kmp_free(counts);
__kmp_free(totals);
__kmp_free(retval);
KMP_CPU_FREE(oldMask);
return 0;
}
// Find any levels with radiix 1, and remove them from the map
// (except for the package level).
int new_depth = 0;
for (level = 0; level < depth; level++) {
if ((maxCt[level] == 1) && (level != pkgLevel)) {
continue;
}
new_depth++;
}
// If we are removing any levels, allocate a new vector to return,
// and copy the relevant information to it.
if (new_depth != depth) {
AddrUnsPair *new_retval =
(AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
for (proc = 0; (int)proc < nApics; proc++) {
Address addr(new_depth);
new_retval[proc] = AddrUnsPair(addr, retval[proc].second);
}
int new_level = 0;
int newPkgLevel = -1;
int newCoreLevel = -1;
int newThreadLevel = -1;
for (level = 0; level < depth; level++) {
if ((maxCt[level] == 1) && (level != pkgLevel)) {
// Remove this level. Never remove the package level
continue;
}
if (level == pkgLevel) {
newPkgLevel = new_level;
}
if (level == coreLevel) {
newCoreLevel = new_level;
}
if (level == threadLevel) {
newThreadLevel = new_level;
}
for (proc = 0; (int)proc < nApics; proc++) {
new_retval[proc].first.labels[new_level] =
retval[proc].first.labels[level];
}
new_level++;
}
__kmp_free(retval);
retval = new_retval;
depth = new_depth;
pkgLevel = newPkgLevel;
coreLevel = newCoreLevel;
threadLevel = newThreadLevel;
}
if (__kmp_affinity_gran_levels < 0) {
// Set the granularity level based on what levels are modeled
// in the machine topology map.
__kmp_affinity_gran_levels = 0;
if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
__kmp_affinity_gran_levels++;
}
if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
__kmp_affinity_gran_levels++;
}
if (__kmp_affinity_gran > affinity_gran_package) {
__kmp_affinity_gran_levels++;
}
}
if (__kmp_affinity_verbose) {
__kmp_affinity_print_topology(retval, nApics, depth, pkgLevel, coreLevel,
threadLevel);
}
__kmp_free(last);
__kmp_free(maxCt);
__kmp_free(counts);
__kmp_free(totals);
KMP_CPU_FREE(oldMask);
*address2os = retval;
return depth;
}
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
#define osIdIndex 0
#define threadIdIndex 1
#define coreIdIndex 2
#define pkgIdIndex 3
#define nodeIdIndex 4
typedef unsigned *ProcCpuInfo;
static unsigned maxIndex = pkgIdIndex;
static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a,
const void *b) {
unsigned i;
const unsigned *aa = *(unsigned *const *)a;
const unsigned *bb = *(unsigned *const *)b;
for (i = maxIndex;; i--) {
if (aa[i] < bb[i])
return -1;
if (aa[i] > bb[i])
return 1;
if (i == osIdIndex)
break;
}
return 0;
}
#if KMP_USE_HIER_SCHED
// Set the array sizes for the hierarchy layers
static void __kmp_dispatch_set_hierarchy_values() {
// Set the maximum number of L1's to number of cores
// Set the maximum number of L2's to to either number of cores / 2 for
// Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
// Or the number of cores for Intel(R) Xeon(R) processors
// Set the maximum number of NUMA nodes and L3's to number of packages
__kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
__kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
if (__kmp_mic_type >= mic3)
__kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
else
#endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
__kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
__kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
__kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
__kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
// Set the number of threads per unit
// Number of hardware threads per L1/L2/L3/NUMA/LOOP
__kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
__kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
__kmp_nThreadsPerCore;
#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
if (__kmp_mic_type >= mic3)
__kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2 * __kmp_nThreadsPerCore;
else
#endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
__kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
__kmp_nThreadsPerCore;
__kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
nCoresPerPkg * __kmp_nThreadsPerCore;
__kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
nCoresPerPkg * __kmp_nThreadsPerCore;
__kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
}
// Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
// i.e., this thread's L1 or this thread's L2, etc.
int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) {
int index = type + 1;
int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
if (type == kmp_hier_layer_e::LAYER_THREAD)
return tid;
else if (type == kmp_hier_layer_e::LAYER_LOOP)
return 0;
KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
if (tid >= num_hw_threads)
tid = tid % num_hw_threads;
return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
}
// Return the number of t1's per t2
int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
int i1 = t1 + 1;
int i2 = t2 + 1;
KMP_DEBUG_ASSERT(i1 <= i2);
KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
// (nthreads/t2) / (nthreads/t1) = t1 / t2
return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
}
#endif // KMP_USE_HIER_SCHED
// Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
// affinity map.
static int __kmp_affinity_create_cpuinfo_map(AddrUnsPair **address2os,
int *line,
kmp_i18n_id_t *const msg_id,
FILE *f) {
*address2os = NULL;
*msg_id = kmp_i18n_null;
// Scan of the file, and count the number of "processor" (osId) fields,
// and find the highest value of <n> for a node_<n> field.
char buf[256];
unsigned num_records = 0;
while (!feof(f)) {
buf[sizeof(buf) - 1] = 1;
if (!fgets(buf, sizeof(buf), f)) {
// Read errors presumably because of EOF
break;
}
char s1[] = "processor";
if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
num_records++;
continue;
}
// FIXME - this will match "node_<n> <garbage>"
unsigned level;
if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
if (nodeIdIndex + level >= maxIndex) {
maxIndex = nodeIdIndex + level;
}
continue;
}
}
// Check for empty file / no valid processor records, or too many. The number
// of records can't exceed the number of valid bits in the affinity mask.
if (num_records == 0) {
*line = 0;
*msg_id = kmp_i18n_str_NoProcRecords;
return -1;
}
if (num_records > (unsigned)__kmp_xproc) {
*line = 0;
*msg_id = kmp_i18n_str_TooManyProcRecords;
return -1;
}
// Set the file pointer back to the begginning, so that we can scan the file
// again, this time performing a full parse of the data. Allocate a vector of
// ProcCpuInfo object, where we will place the data. Adding an extra element
// at the end allows us to remove a lot of extra checks for termination
// conditions.
if (fseek(f, 0, SEEK_SET) != 0) {
*line = 0;
*msg_id = kmp_i18n_str_CantRewindCpuinfo;
return -1;
}
// Allocate the array of records to store the proc info in. The dummy
// element at the end makes the logic in filling them out easier to code.
unsigned **threadInfo =
(unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *));
unsigned i;
for (i = 0; i <= num_records; i++) {
threadInfo[i] =
(unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
}
#define CLEANUP_THREAD_INFO \
for (i = 0; i <= num_records; i++) { \
__kmp_free(threadInfo[i]); \
} \
__kmp_free(threadInfo);
// A value of UINT_MAX means that we didn't find the field
unsigned __index;
#define INIT_PROC_INFO(p) \
for (__index = 0; __index <= maxIndex; __index++) { \
(p)[__index] = UINT_MAX; \
}
for (i = 0; i <= num_records; i++) {
INIT_PROC_INFO(threadInfo[i]);
}
unsigned num_avail = 0;
*line = 0;
while (!feof(f)) {
// Create an inner scoping level, so that all the goto targets at the end of
// the loop appear in an outer scoping level. This avoids warnings about
// jumping past an initialization to a target in the same block.
{
buf[sizeof(buf) - 1] = 1;
bool long_line = false;
if (!fgets(buf, sizeof(buf), f)) {
// Read errors presumably because of EOF
// If there is valid data in threadInfo[num_avail], then fake
// a blank line in ensure that the last address gets parsed.
bool valid = false;
for (i = 0; i <= maxIndex; i++) {
if (threadInfo[num_avail][i] != UINT_MAX) {
valid = true;
}
}
if (!valid) {
break;
}
buf[0] = 0;
} else if (!buf[sizeof(buf) - 1]) {
// The line is longer than the buffer. Set a flag and don't
// emit an error if we were going to ignore the line, anyway.
long_line = true;
#define CHECK_LINE \
if (long_line) { \
CLEANUP_THREAD_INFO; \
*msg_id = kmp_i18n_str_LongLineCpuinfo; \
return -1; \
}
}
(*line)++;
char s1[] = "processor";
if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
CHECK_LINE;
char *p = strchr(buf + sizeof(s1) - 1, ':');
unsigned val;
if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
goto no_val;
if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
#if KMP_ARCH_AARCH64
// Handle the old AArch64 /proc/cpuinfo layout differently,
// it contains all of the 'processor' entries listed in a
// single 'Processor' section, therefore the normal looking
// for duplicates in that section will always fail.
num_avail++;
#else
goto dup_field;
#endif
threadInfo[num_avail][osIdIndex] = val;
#if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
char path[256];
KMP_SNPRINTF(
path, sizeof(path),
"/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
threadInfo[num_avail][osIdIndex]);
__kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
KMP_SNPRINTF(path, sizeof(path),
"/sys/devices/system/cpu/cpu%u/topology/core_id",
threadInfo[num_avail][osIdIndex]);
__kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
continue;
#else
}
char s2[] = "physical id";
if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
CHECK_LINE;
char *p = strchr(buf + sizeof(s2) - 1, ':');
unsigned val;
if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
goto no_val;
if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
goto dup_field;
threadInfo[num_avail][pkgIdIndex] = val;
continue;
}
char s3[] = "core id";
if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
CHECK_LINE;
char *p = strchr(buf + sizeof(s3) - 1, ':');
unsigned val;
if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
goto no_val;
if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
goto dup_field;
threadInfo[num_avail][coreIdIndex] = val;
continue;
#endif // KMP_OS_LINUX && USE_SYSFS_INFO
}
char s4[] = "thread id";
if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
CHECK_LINE;
char *p = strchr(buf + sizeof(s4) - 1, ':');
unsigned val;
if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
goto no_val;
if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
goto dup_field;
threadInfo[num_avail][threadIdIndex] = val;
continue;
}
unsigned level;
if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
CHECK_LINE;
char *p = strchr(buf + sizeof(s4) - 1, ':');
unsigned val;
if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
goto no_val;
KMP_ASSERT(nodeIdIndex + level <= maxIndex);
if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
goto dup_field;
threadInfo[num_avail][nodeIdIndex + level] = val;
continue;
}
// We didn't recognize the leading token on the line. There are lots of
// leading tokens that we don't recognize - if the line isn't empty, go on
// to the next line.
if ((*buf != 0) && (*buf != '\n')) {
// If the line is longer than the buffer, read characters
// until we find a newline.
if (long_line) {
int ch;
while (((ch = fgetc(f)) != EOF) && (ch != '\n'))
;
}
continue;
}
// A newline has signalled the end of the processor record.
// Check that there aren't too many procs specified.
if ((int)num_avail == __kmp_xproc) {
CLEANUP_THREAD_INFO;
*msg_id = kmp_i18n_str_TooManyEntries;
return -1;
}
// Check for missing fields. The osId field must be there, and we
// currently require that the physical id field is specified, also.
if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
CLEANUP_THREAD_INFO;
*msg_id = kmp_i18n_str_MissingProcField;
return -1;
}
if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
CLEANUP_THREAD_INFO;
*msg_id = kmp_i18n_str_MissingPhysicalIDField;
return -1;
}
// Skip this proc if it is not included in the machine model.
if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
__kmp_affin_fullMask)) {
INIT_PROC_INFO(threadInfo[num_avail]);
continue;
}
// We have a successful parse of this proc's info.
// Increment the counter, and prepare for the next proc.
num_avail++;
KMP_ASSERT(num_avail <= num_records);
INIT_PROC_INFO(threadInfo[num_avail]);
}
continue;
no_val:
CLEANUP_THREAD_INFO;
*msg_id = kmp_i18n_str_MissingValCpuinfo;
return -1;
dup_field:
CLEANUP_THREAD_INFO;
*msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
return -1;
}
*line = 0;
#if KMP_MIC && REDUCE_TEAM_SIZE
unsigned teamSize = 0;
#endif // KMP_MIC && REDUCE_TEAM_SIZE
// check for num_records == __kmp_xproc ???
// If there's only one thread context to bind to, form an Address object with
// depth 1 and return immediately (or, if affinity is off, set address2os to
// NULL and return).
//
// If it is configured to omit the package level when there is only a single
// package, the logic at the end of this routine won't work if there is only a
// single thread - it would try to form an Address object with depth 0.
KMP_ASSERT(num_avail > 0);
KMP_ASSERT(num_avail <= num_records);
if (num_avail == 1) {
__kmp_ncores = 1;
__kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
if (__kmp_affinity_verbose) {
if (!KMP_AFFINITY_CAPABLE()) {
KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
KMP_INFORM(Uniform, "KMP_AFFINITY");
} else {
char buf[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
__kmp_affin_fullMask);
KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
} else {
KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
}
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
KMP_INFORM(Uniform, "KMP_AFFINITY");
}
int index;
kmp_str_buf_t buf;
__kmp_str_buf_init(&buf);
__kmp_str_buf_print(&buf, "1");
for (index = maxIndex - 1; index > pkgIdIndex; index--) {
__kmp_str_buf_print(&buf, " x 1");
}
KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, 1, 1, 1);
__kmp_str_buf_free(&buf);
}
if (__kmp_affinity_type == affinity_none) {
CLEANUP_THREAD_INFO;
return 0;
}
*address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
Address addr(1);
addr.labels[0] = threadInfo[0][pkgIdIndex];
(*address2os)[0] = AddrUnsPair(addr, threadInfo[0][osIdIndex]);
if (__kmp_affinity_gran_levels < 0) {
__kmp_affinity_gran_levels = 0;
}
if (__kmp_affinity_verbose) {
__kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
}
CLEANUP_THREAD_INFO;
return 1;
}
// Sort the threadInfo table by physical Id.
qsort(threadInfo, num_avail, sizeof(*threadInfo),
__kmp_affinity_cmp_ProcCpuInfo_phys_id);
// The table is now sorted by pkgId / coreId / threadId, but we really don't
// know the radix of any of the fields. pkgId's may be sparsely assigned among
// the chips on a system. Although coreId's are usually assigned
// [0 .. coresPerPkg-1] and threadId's are usually assigned
// [0..threadsPerCore-1], we don't want to make any such assumptions.
//
// For that matter, we don't know what coresPerPkg and threadsPerCore (or the
// total # packages) are at this point - we want to determine that now. We
// only have an upper bound on the first two figures.
unsigned *counts =
(unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
unsigned *maxCt =
(unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
unsigned *totals =
(unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
unsigned *lastId =
(unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
bool assign_thread_ids = false;
unsigned threadIdCt;
unsigned index;
restart_radix_check:
threadIdCt = 0;
// Initialize the counter arrays with data from threadInfo[0].
if (assign_thread_ids) {
if (threadInfo[0][threadIdIndex] == UINT_MAX) {
threadInfo[0][threadIdIndex] = threadIdCt++;
} else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
threadIdCt = threadInfo[0][threadIdIndex] + 1;
}
}
for (index = 0; index <= maxIndex; index++) {
counts[index] = 1;
maxCt[index] = 1;
totals[index] = 1;
lastId[index] = threadInfo[0][index];
;
}
// Run through the rest of the OS procs.
for (i = 1; i < num_avail; i++) {
// Find the most significant index whose id differs from the id for the
// previous OS proc.
for (index = maxIndex; index >= threadIdIndex; index--) {
if (assign_thread_ids && (index == threadIdIndex)) {
// Auto-assign the thread id field if it wasn't specified.
if (threadInfo[i][threadIdIndex] == UINT_MAX) {
threadInfo[i][threadIdIndex] = threadIdCt++;
}
// Apparently the thread id field was specified for some entries and not
// others. Start the thread id counter off at the next higher thread id.
else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
threadIdCt = threadInfo[i][threadIdIndex] + 1;
}
}
if (threadInfo[i][index] != lastId[index]) {
// Run through all indices which are less significant, and reset the
// counts to 1. At all levels up to and including index, we need to
// increment the totals and record the last id.
unsigned index2;
for (index2 = threadIdIndex; index2 < index; index2++) {
totals[index2]++;
if (counts[index2] > maxCt[index2]) {
maxCt[index2] = counts[index2];
}
counts[index2] = 1;
lastId[index2] = threadInfo[i][index2];
}
counts[index]++;
totals[index]++;
lastId[index] = threadInfo[i][index];
if (assign_thread_ids && (index > threadIdIndex)) {
#if KMP_MIC && REDUCE_TEAM_SIZE
// The default team size is the total #threads in the machine
// minus 1 thread for every core that has 3 or more threads.
teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
#endif // KMP_MIC && REDUCE_TEAM_SIZE
// Restart the thread counter, as we are on a new core.
threadIdCt = 0;
// Auto-assign the thread id field if it wasn't specified.
if (threadInfo[i][threadIdIndex] == UINT_MAX) {
threadInfo[i][threadIdIndex] = threadIdCt++;
}
// Aparrently the thread id field was specified for some entries and
// not others. Start the thread id counter off at the next higher
// thread id.
else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
threadIdCt = threadInfo[i][threadIdIndex] + 1;
}
}
break;
}
}
if (index < threadIdIndex) {
// If thread ids were specified, it is an error if they are not unique.
// Also, check that we waven't already restarted the loop (to be safe -
// shouldn't need to).
if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
__kmp_free(lastId);
__kmp_free(totals);
__kmp_free(maxCt);
__kmp_free(counts);
CLEANUP_THREAD_INFO;
*msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
return -1;
}
// If the thread ids were not specified and we see entries entries that
// are duplicates, start the loop over and assign the thread ids manually.
assign_thread_ids = true;
goto restart_radix_check;
}
}
#if KMP_MIC && REDUCE_TEAM_SIZE
// The default team size is the total #threads in the machine
// minus 1 thread for every core that has 3 or more threads.
teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
#endif // KMP_MIC && REDUCE_TEAM_SIZE
for (index = threadIdIndex; index <= maxIndex; index++) {
if (counts[index] > maxCt[index]) {
maxCt[index] = counts[index];
}
}
__kmp_nThreadsPerCore = maxCt[threadIdIndex];
nCoresPerPkg = maxCt[coreIdIndex];
nPackages = totals[pkgIdIndex];
// Check to see if the machine topology is uniform
unsigned prod = totals[maxIndex];
for (index = threadIdIndex; index < maxIndex; index++) {
prod *= maxCt[index];
}
bool uniform = (prod == totals[threadIdIndex]);
// When affinity is off, this routine will still be called to set
// __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
// Make sure all these vars are set correctly, and return now if affinity is
// not enabled.
__kmp_ncores = totals[coreIdIndex];
if (__kmp_affinity_verbose) {
if (!KMP_AFFINITY_CAPABLE()) {
KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
if (uniform) {
KMP_INFORM(Uniform, "KMP_AFFINITY");
} else {
KMP_INFORM(NonUniform, "KMP_AFFINITY");
}
} else {
char buf[KMP_AFFIN_MASK_PRINT_LEN];
__kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
__kmp_affin_fullMask);
KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
if (__kmp_affinity_respect_mask) {
KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
} else {
KMP_INFORM(InitOSProcSetNotR