Hardware Locality (hwloc)  v2.0-20191027.0400.gite37e7d8
helper.h
00001 /*
00002  * Copyright © 2009 CNRS
00003  * Copyright © 2009-2019 Inria.  All rights reserved.
00004  * Copyright © 2009-2012 Université Bordeaux
00005  * Copyright © 2009-2010 Cisco Systems, Inc.  All rights reserved.
00006  * See COPYING in top-level directory.
00007  */
00008 
00013 #ifndef HWLOC_HELPER_H
00014 #define HWLOC_HELPER_H
00015 
00016 #ifndef HWLOC_H
00017 #error Please include the main hwloc.h instead
00018 #endif
00019 
00020 #include <stdlib.h>
00021 #include <errno.h>
00022 
00023 
00024 #ifdef __cplusplus
00025 extern "C" {
00026 #endif
00027 
00028 
00041 static __hwloc_inline hwloc_obj_t
00042 hwloc_get_first_largest_obj_inside_cpuset(hwloc_topology_t topology, hwloc_const_cpuset_t set)
00043 {
00044   hwloc_obj_t obj = hwloc_get_root_obj(topology);
00045   if (!hwloc_bitmap_intersects(obj->cpuset, set))
00046     return NULL;
00047   while (!hwloc_bitmap_isincluded(obj->cpuset, set)) {
00048     /* while the object intersects without being included, look at its children */
00049     hwloc_obj_t child = obj->first_child;
00050     while (child) {
00051       if (hwloc_bitmap_intersects(child->cpuset, set))
00052         break;
00053       child = child->next_sibling;
00054     }
00055     if (!child)
00056       /* no child intersects, return their father */
00057       return obj;
00058     /* found one intersecting child, look at its children */
00059     obj = child;
00060   }
00061   /* obj is included, return it */
00062   return obj;
00063 }
00064 
00069 HWLOC_DECLSPEC int hwloc_get_largest_objs_inside_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00070                                                  hwloc_obj_t * __hwloc_restrict objs, int max);
00071 
00084 static __hwloc_inline hwloc_obj_t
00085 hwloc_get_next_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00086                                            int depth, hwloc_obj_t prev)
00087 {
00088   hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev);
00089   if (!next)
00090     return NULL;
00091   while (next && (hwloc_bitmap_iszero(next->cpuset) || !hwloc_bitmap_isincluded(next->cpuset, set)))
00092     next = next->next_cousin;
00093   return next;
00094 }
00095 
00108 static __hwloc_inline hwloc_obj_t
00109 hwloc_get_next_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00110                                           hwloc_obj_type_t type, hwloc_obj_t prev)
00111 {
00112   int depth = hwloc_get_type_depth(topology, type);
00113   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
00114     return NULL;
00115   return hwloc_get_next_obj_inside_cpuset_by_depth(topology, set, depth, prev);
00116 }
00117 
00126 static __hwloc_inline hwloc_obj_t
00127 hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00128                                       int depth, unsigned idx) __hwloc_attribute_pure;
00129 static __hwloc_inline hwloc_obj_t
00130 hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00131                                       int depth, unsigned idx)
00132 {
00133   hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0);
00134   unsigned count = 0;
00135   if (!obj)
00136     return NULL;
00137   while (obj) {
00138     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) {
00139       if (count == idx)
00140         return obj;
00141       count++;
00142     }
00143     obj = obj->next_cousin;
00144   }
00145   return NULL;
00146 }
00147 
00160 static __hwloc_inline hwloc_obj_t
00161 hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00162                                      hwloc_obj_type_t type, unsigned idx) __hwloc_attribute_pure;
00163 static __hwloc_inline hwloc_obj_t
00164 hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00165                                      hwloc_obj_type_t type, unsigned idx)
00166 {
00167   int depth = hwloc_get_type_depth(topology, type);
00168   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
00169     return NULL;
00170   return hwloc_get_obj_inside_cpuset_by_depth(topology, set, depth, idx);
00171 }
00172 
00181 static __hwloc_inline unsigned
00182 hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00183                                          int depth) __hwloc_attribute_pure;
00184 static __hwloc_inline unsigned
00185 hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00186                                          int depth)
00187 {
00188   hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0);
00189   unsigned count = 0;
00190   if (!obj)
00191     return 0;
00192   while (obj) {
00193     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set))
00194       count++;
00195     obj = obj->next_cousin;
00196   }
00197   return count;
00198 }
00199 
00212 static __hwloc_inline int
00213 hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00214                                         hwloc_obj_type_t type) __hwloc_attribute_pure;
00215 static __hwloc_inline int
00216 hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00217                                         hwloc_obj_type_t type)
00218 {
00219   int depth = hwloc_get_type_depth(topology, type);
00220   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN)
00221     return 0;
00222   if (depth == HWLOC_TYPE_DEPTH_MULTIPLE)
00223     return -1; /* FIXME: agregate nbobjs from different levels? */
00224   return (int) hwloc_get_nbobjs_inside_cpuset_by_depth(topology, set, depth);
00225 }
00226 
00240 static __hwloc_inline int
00241 hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
00242                                    hwloc_obj_t obj) __hwloc_attribute_pure;
00243 static __hwloc_inline int
00244 hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
00245                                    hwloc_obj_t obj)
00246 {
00247   int idx = 0;
00248   if (!hwloc_bitmap_isincluded(obj->cpuset, set))
00249     return -1;
00250   /* count how many objects are inside the cpuset on the way from us to the beginning of the level */
00251   while ((obj = obj->prev_cousin) != NULL)
00252     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set))
00253       idx++;
00254   return idx;
00255 }
00256 
00271 static __hwloc_inline hwloc_obj_t
00272 hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
00273                                 hwloc_obj_t parent) __hwloc_attribute_pure;
00274 static __hwloc_inline hwloc_obj_t
00275 hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
00276                                 hwloc_obj_t parent)
00277 {
00278   hwloc_obj_t child;
00279   if (hwloc_bitmap_iszero(set))
00280     return NULL;
00281   child = parent->first_child;
00282   while (child) {
00283     if (child->cpuset && hwloc_bitmap_isincluded(set, child->cpuset))
00284       return child;
00285     child = child->next_sibling;
00286   }
00287   return NULL;
00288 }
00289 
00294 static __hwloc_inline hwloc_obj_t
00295 hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure;
00296 static __hwloc_inline hwloc_obj_t
00297 hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set)
00298 {
00299   struct hwloc_obj *current = hwloc_get_root_obj(topology);
00300   if (hwloc_bitmap_iszero(set) || !hwloc_bitmap_isincluded(set, current->cpuset))
00301     return NULL;
00302   while (1) {
00303     hwloc_obj_t child = hwloc_get_child_covering_cpuset(topology, set, current);
00304     if (!child)
00305       return current;
00306     current = child;
00307   }
00308 }
00309 
00320 static __hwloc_inline hwloc_obj_t
00321 hwloc_get_next_obj_covering_cpuset_by_depth(hwloc_topology_t topology, hwloc_const_cpuset_t set,
00322                                             int depth, hwloc_obj_t prev)
00323 {
00324   hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev);
00325   if (!next)
00326     return NULL;
00327   while (next && !hwloc_bitmap_intersects(set, next->cpuset))
00328     next = next->next_cousin;
00329   return next;
00330 }
00331 
00347 static __hwloc_inline hwloc_obj_t
00348 hwloc_get_next_obj_covering_cpuset_by_type(hwloc_topology_t topology, hwloc_const_cpuset_t set,
00349                                            hwloc_obj_type_t type, hwloc_obj_t prev)
00350 {
00351   int depth = hwloc_get_type_depth(topology, type);
00352   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
00353     return NULL;
00354   return hwloc_get_next_obj_covering_cpuset_by_depth(topology, set, depth, prev);
00355 }
00356 
00377 static __hwloc_inline hwloc_obj_t
00378 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj) __hwloc_attribute_pure;
00379 static __hwloc_inline hwloc_obj_t
00380 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj)
00381 {
00382   hwloc_obj_t ancestor = obj;
00383   if (obj->depth < depth)
00384     return NULL;
00385   while (ancestor && ancestor->depth > depth)
00386     ancestor = ancestor->parent;
00387   return ancestor;
00388 }
00389 
00397 static __hwloc_inline hwloc_obj_t
00398 hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj) __hwloc_attribute_pure;
00399 static __hwloc_inline hwloc_obj_t
00400 hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj)
00401 {
00402   hwloc_obj_t ancestor = obj->parent;
00403   while (ancestor && ancestor->type != type)
00404     ancestor = ancestor->parent;
00405   return ancestor;
00406 }
00407 
00409 static __hwloc_inline hwloc_obj_t
00410 hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2) __hwloc_attribute_pure;
00411 static __hwloc_inline hwloc_obj_t
00412 hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2)
00413 {
00414   /* the loop isn't so easy since intermediate ancestors may have
00415    * different depth, causing us to alternate between using obj1->parent
00416    * and obj2->parent. Also, even if at some point we find ancestors of
00417    * of the same depth, their ancestors may have different depth again.
00418    */
00419   while (obj1 != obj2) {
00420     while (obj1->depth > obj2->depth)
00421       obj1 = obj1->parent;
00422     while (obj2->depth > obj1->depth)
00423       obj2 = obj2->parent;
00424     if (obj1 != obj2 && obj1->depth == obj2->depth) {
00425       obj1 = obj1->parent;
00426       obj2 = obj2->parent;
00427     }
00428   }
00429   return obj1;
00430 }
00431 
00437 static __hwloc_inline int
00438 hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root) __hwloc_attribute_pure;
00439 static __hwloc_inline int
00440 hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root)
00441 {
00442   return obj->cpuset && subtree_root->cpuset && hwloc_bitmap_isincluded(obj->cpuset, subtree_root->cpuset);
00443 }
00444 
00455 static __hwloc_inline hwloc_obj_t
00456 hwloc_get_next_child (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t parent, hwloc_obj_t prev)
00457 {
00458   hwloc_obj_t obj;
00459   int state = 0;
00460   if (prev) {
00461     if (prev->type == HWLOC_OBJ_MISC)
00462       state = 3;
00463     else if (prev->type == HWLOC_OBJ_BRIDGE || prev->type == HWLOC_OBJ_PCI_DEVICE || prev->type == HWLOC_OBJ_OS_DEVICE)
00464       state = 2;
00465     else if (prev->type == HWLOC_OBJ_NUMANODE)
00466       state = 1;
00467     obj = prev->next_sibling;
00468   } else {
00469     obj = parent->first_child;
00470   }
00471   if (!obj && state == 0) {
00472     obj = parent->memory_first_child;
00473     state = 1;
00474   }
00475   if (!obj && state == 1) {
00476     obj = parent->io_first_child;
00477     state = 2;
00478   }
00479   if (!obj && state == 2) {
00480     obj = parent->misc_first_child;
00481     state = 3;
00482   }
00483   return obj;
00484 }
00485 
00512 HWLOC_DECLSPEC int
00513 hwloc_obj_type_is_normal(hwloc_obj_type_t type);
00514 
00523 HWLOC_DECLSPEC int
00524 hwloc_obj_type_is_io(hwloc_obj_type_t type);
00525 
00534 HWLOC_DECLSPEC int
00535 hwloc_obj_type_is_memory(hwloc_obj_type_t type);
00536 
00541 HWLOC_DECLSPEC int
00542 hwloc_obj_type_is_cache(hwloc_obj_type_t type);
00543 
00548 HWLOC_DECLSPEC int
00549 hwloc_obj_type_is_dcache(hwloc_obj_type_t type);
00550 
00555 HWLOC_DECLSPEC int
00556 hwloc_obj_type_is_icache(hwloc_obj_type_t type);
00557 
00587 static __hwloc_inline int
00588 hwloc_get_cache_type_depth (hwloc_topology_t topology,
00589                             unsigned cachelevel, hwloc_obj_cache_type_t cachetype)
00590 {
00591   int depth;
00592   int found = HWLOC_TYPE_DEPTH_UNKNOWN;
00593   for (depth=0; ; depth++) {
00594     hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, 0);
00595     if (!obj)
00596       break;
00597     if (!hwloc_obj_type_is_dcache(obj->type) || obj->attr->cache.depth != cachelevel)
00598       /* doesn't match, try next depth */
00599       continue;
00600     if (cachetype == (hwloc_obj_cache_type_t) -1) {
00601       if (found != HWLOC_TYPE_DEPTH_UNKNOWN) {
00602         /* second match, return MULTIPLE */
00603         return HWLOC_TYPE_DEPTH_MULTIPLE;
00604       }
00605       /* first match, mark it as found */
00606       found = depth;
00607       continue;
00608     }
00609     if (obj->attr->cache.type == cachetype || obj->attr->cache.type == HWLOC_OBJ_CACHE_UNIFIED)
00610       /* exact match (either unified is alone, or we match instruction or data), return immediately */
00611       return depth;
00612   }
00613   /* went to the bottom, return what we found */
00614   return found;
00615 }
00616 
00621 static __hwloc_inline hwloc_obj_t
00622 hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure;
00623 static __hwloc_inline hwloc_obj_t
00624 hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set)
00625 {
00626   hwloc_obj_t current = hwloc_get_obj_covering_cpuset(topology, set);
00627   while (current) {
00628     if (hwloc_obj_type_is_dcache(current->type))
00629       return current;
00630     current = current->parent;
00631   }
00632   return NULL;
00633 }
00634 
00639 static __hwloc_inline hwloc_obj_t
00640 hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj) __hwloc_attribute_pure;
00641 static __hwloc_inline hwloc_obj_t
00642 hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj)
00643 {
00644   hwloc_obj_t current = obj->parent;
00645   if (!obj->cpuset)
00646     return NULL;
00647   while (current) {
00648     if (!hwloc_bitmap_isequal(current->cpuset, obj->cpuset)
00649         && hwloc_obj_type_is_dcache(current->type))
00650       return current;
00651     current = current->parent;
00652   }
00653   return NULL;
00654 }
00655 
00678 static __hwloc_inline hwloc_obj_t
00679 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure;
00680 static __hwloc_inline hwloc_obj_t
00681 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index)
00682 {
00683   hwloc_obj_t obj = NULL;
00684   while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, obj)) != NULL)
00685     if (obj->os_index == os_index)
00686       return obj;
00687   return NULL;
00688 }
00689 
00699 static __hwloc_inline hwloc_obj_t
00700 hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure;
00701 static __hwloc_inline hwloc_obj_t
00702 hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index)
00703 {
00704   hwloc_obj_t obj = NULL;
00705   while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_NUMANODE, obj)) != NULL)
00706     if (obj->os_index == os_index)
00707       return obj;
00708   return NULL;
00709 }
00710 
00722 /* TODO: rather provide an iterator? Provide a way to know how much should be allocated? By returning the total number of objects instead? */
00723 HWLOC_DECLSPEC unsigned hwloc_get_closest_objs (hwloc_topology_t topology, hwloc_obj_t src, hwloc_obj_t * __hwloc_restrict objs, unsigned max);
00724 
00737 static __hwloc_inline hwloc_obj_t
00738 hwloc_get_obj_below_by_type (hwloc_topology_t topology,
00739                              hwloc_obj_type_t type1, unsigned idx1,
00740                              hwloc_obj_type_t type2, unsigned idx2) __hwloc_attribute_pure;
00741 static __hwloc_inline hwloc_obj_t
00742 hwloc_get_obj_below_by_type (hwloc_topology_t topology,
00743                              hwloc_obj_type_t type1, unsigned idx1,
00744                              hwloc_obj_type_t type2, unsigned idx2)
00745 {
00746   hwloc_obj_t obj;
00747   obj = hwloc_get_obj_by_type (topology, type1, idx1);
00748   if (!obj)
00749     return NULL;
00750   return hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, type2, idx2);
00751 }
00752 
00771 static __hwloc_inline hwloc_obj_t
00772 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv) __hwloc_attribute_pure;
00773 static __hwloc_inline hwloc_obj_t
00774 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv)
00775 {
00776   hwloc_obj_t obj = hwloc_get_root_obj(topology);
00777   int i;
00778   for(i=0; i<nr; i++) {
00779     if (!obj)
00780       return NULL;
00781     obj = hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, typev[i], idxv[i]);
00782   }
00783   return obj;
00784 }
00785 
00796 enum hwloc_distrib_flags_e {
00800   HWLOC_DISTRIB_FLAG_REVERSE = (1UL<<0)
00801 };
00802 
00826 static __hwloc_inline int
00827 hwloc_distrib(hwloc_topology_t topology,
00828               hwloc_obj_t *roots, unsigned n_roots,
00829               hwloc_cpuset_t *set,
00830               unsigned n,
00831               int until, unsigned long flags)
00832 {
00833   unsigned i;
00834   unsigned tot_weight;
00835   unsigned given, givenweight;
00836   hwloc_cpuset_t *cpusetp = set;
00837 
00838   if (flags & ~HWLOC_DISTRIB_FLAG_REVERSE) {
00839     errno = EINVAL;
00840     return -1;
00841   }
00842 
00843   tot_weight = 0;
00844   for (i = 0; i < n_roots; i++)
00845     tot_weight += (unsigned) hwloc_bitmap_weight(roots[i]->cpuset);
00846 
00847   for (i = 0, given = 0, givenweight = 0; i < n_roots; i++) {
00848     unsigned chunk, weight;
00849     hwloc_obj_t root = roots[flags & HWLOC_DISTRIB_FLAG_REVERSE ? n_roots-1-i : i];
00850     hwloc_cpuset_t cpuset = root->cpuset;
00851     if (root->type == HWLOC_OBJ_NUMANODE)
00852       /* NUMANodes have same cpuset as their parent, but we need normal objects below */
00853       root = root->parent;
00854     weight = (unsigned) hwloc_bitmap_weight(cpuset);
00855     if (!weight)
00856       continue;
00857     /* Give to root a chunk proportional to its weight.
00858      * If previous chunks got rounded-up, we may get a bit less. */
00859     chunk = (( (givenweight+weight) * n  + tot_weight-1) / tot_weight)
00860           - ((  givenweight         * n  + tot_weight-1) / tot_weight);
00861     if (!root->arity || chunk <= 1 || root->depth >= until) {
00862       /* We can't split any more, put everything there.  */
00863       if (chunk) {
00864         /* Fill cpusets with ours */
00865         unsigned j;
00866         for (j=0; j < chunk; j++)
00867           cpusetp[j] = hwloc_bitmap_dup(cpuset);
00868       } else {
00869         /* We got no chunk, just merge our cpuset to a previous one
00870          * (the first chunk cannot be empty)
00871          * so that this root doesn't get ignored.
00872          */
00873         assert(given);
00874         hwloc_bitmap_or(cpusetp[-1], cpusetp[-1], cpuset);
00875       }
00876     } else {
00877       /* Still more to distribute, recurse into children */
00878       hwloc_distrib(topology, root->children, root->arity, cpusetp, chunk, until, flags);
00879     }
00880     cpusetp += chunk;
00881     given += chunk;
00882     givenweight += weight;
00883   }
00884 
00885   return 0;
00886 }
00887 
00905 HWLOC_DECLSPEC hwloc_const_cpuset_t
00906 hwloc_topology_get_complete_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
00907 
00919 HWLOC_DECLSPEC hwloc_const_cpuset_t
00920 hwloc_topology_get_topology_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
00921 
00938 HWLOC_DECLSPEC hwloc_const_cpuset_t
00939 hwloc_topology_get_allowed_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
00940 
00950 HWLOC_DECLSPEC hwloc_const_nodeset_t
00951 hwloc_topology_get_complete_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
00952 
00964 HWLOC_DECLSPEC hwloc_const_nodeset_t
00965 hwloc_topology_get_topology_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
00966 
00983 HWLOC_DECLSPEC hwloc_const_nodeset_t
00984 hwloc_topology_get_allowed_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
00985 
01005 static __hwloc_inline int
01006 hwloc_cpuset_to_nodeset(hwloc_topology_t topology, hwloc_const_cpuset_t _cpuset, hwloc_nodeset_t nodeset)
01007 {
01008         int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE);
01009         hwloc_obj_t obj = NULL;
01010         assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN);
01011         hwloc_bitmap_zero(nodeset);
01012         while ((obj = hwloc_get_next_obj_covering_cpuset_by_depth(topology, _cpuset, depth, obj)) != NULL)
01013                 if (hwloc_bitmap_set(nodeset, obj->os_index) < 0)
01014                         return -1;
01015         return 0;
01016 }
01017 
01026 static __hwloc_inline int
01027 hwloc_cpuset_from_nodeset(hwloc_topology_t topology, hwloc_cpuset_t _cpuset, hwloc_const_nodeset_t nodeset)
01028 {
01029         int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE);
01030         hwloc_obj_t obj = NULL;
01031         assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN);
01032         hwloc_bitmap_zero(_cpuset);
01033         while ((obj = hwloc_get_next_obj_by_depth(topology, depth, obj)) != NULL) {
01034                 if (hwloc_bitmap_isset(nodeset, obj->os_index))
01035                         /* no need to check obj->cpuset because objects in levels always have a cpuset */
01036                         if (hwloc_bitmap_or(_cpuset, _cpuset, obj->cpuset) < 0)
01037                                 return -1;
01038         }
01039         return 0;
01040 }
01041 
01061 static __hwloc_inline hwloc_obj_t
01062 hwloc_get_non_io_ancestor_obj(hwloc_topology_t topology __hwloc_attribute_unused,
01063                               hwloc_obj_t ioobj)
01064 {
01065   hwloc_obj_t obj = ioobj;
01066   while (obj && !obj->cpuset) {
01067     obj = obj->parent;
01068   }
01069   return obj;
01070 }
01071 
01076 static __hwloc_inline hwloc_obj_t
01077 hwloc_get_next_pcidev(hwloc_topology_t topology, hwloc_obj_t prev)
01078 {
01079   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PCI_DEVICE, prev);
01080 }
01081 
01085 static __hwloc_inline hwloc_obj_t
01086 hwloc_get_pcidev_by_busid(hwloc_topology_t topology,
01087                           unsigned domain, unsigned bus, unsigned dev, unsigned func)
01088 {
01089   hwloc_obj_t obj = NULL;
01090   while ((obj = hwloc_get_next_pcidev(topology, obj)) != NULL) {
01091     if (obj->attr->pcidev.domain == domain
01092         && obj->attr->pcidev.bus == bus
01093         && obj->attr->pcidev.dev == dev
01094         && obj->attr->pcidev.func == func)
01095       return obj;
01096   }
01097   return NULL;
01098 }
01099 
01103 static __hwloc_inline hwloc_obj_t
01104 hwloc_get_pcidev_by_busidstring(hwloc_topology_t topology, const char *busid)
01105 {
01106   unsigned domain = 0; /* default */
01107   unsigned bus, dev, func;
01108 
01109   if (sscanf(busid, "%x:%x.%x", &bus, &dev, &func) != 3
01110       && sscanf(busid, "%x:%x:%x.%x", &domain, &bus, &dev, &func) != 4) {
01111     errno = EINVAL;
01112     return NULL;
01113   }
01114 
01115   return hwloc_get_pcidev_by_busid(topology, domain, bus, dev, func);
01116 }
01117 
01122 static __hwloc_inline hwloc_obj_t
01123 hwloc_get_next_osdev(hwloc_topology_t topology, hwloc_obj_t prev)
01124 {
01125   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_OS_DEVICE, prev);
01126 }
01127 
01132 static __hwloc_inline hwloc_obj_t
01133 hwloc_get_next_bridge(hwloc_topology_t topology, hwloc_obj_t prev)
01134 {
01135   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_BRIDGE, prev);
01136 }
01137 
01138 /* \brief Checks whether a given bridge covers a given PCI bus.
01139  */
01140 static __hwloc_inline int
01141 hwloc_bridge_covers_pcibus(hwloc_obj_t bridge,
01142                            unsigned domain, unsigned bus)
01143 {
01144   return bridge->type == HWLOC_OBJ_BRIDGE
01145     && bridge->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI
01146     && bridge->attr->bridge.downstream.pci.domain == domain
01147     && bridge->attr->bridge.downstream.pci.secondary_bus <= bus
01148     && bridge->attr->bridge.downstream.pci.subordinate_bus >= bus;
01149 }
01150 
01155 #ifdef __cplusplus
01156 } /* extern "C" */
01157 #endif
01158 
01159 
01160 #endif /* HWLOC_HELPER_H */