Hardware Locality (hwloc)  v2.1-20200123.0330.git0a8b367
helper.h
00001 /*
00002  * Copyright © 2009 CNRS
00003  * Copyright © 2009-2019 Inria.  All rights reserved.
00004  * Copyright © 2009-2012 Université Bordeaux
00005  * Copyright © 2009-2010 Cisco Systems, Inc.  All rights reserved.
00006  * See COPYING in top-level directory.
00007  */
00008 
00013 #ifndef HWLOC_HELPER_H
00014 #define HWLOC_HELPER_H
00015 
00016 #ifndef HWLOC_H
00017 #error Please include the main hwloc.h instead
00018 #endif
00019 
00020 #include <stdlib.h>
00021 #include <errno.h>
00022 
00023 
00024 #ifdef __cplusplus
00025 extern "C" {
00026 #endif
00027 
00028 
00041 static __hwloc_inline hwloc_obj_t
00042 hwloc_get_first_largest_obj_inside_cpuset(hwloc_topology_t topology, hwloc_const_cpuset_t set)
00043 {
00044   hwloc_obj_t obj = hwloc_get_root_obj(topology);
00045   if (!hwloc_bitmap_intersects(obj->cpuset, set))
00046     return NULL;
00047   while (!hwloc_bitmap_isincluded(obj->cpuset, set)) {
00048     /* while the object intersects without being included, look at its children */
00049     hwloc_obj_t child = obj->first_child;
00050     while (child) {
00051       if (hwloc_bitmap_intersects(child->cpuset, set))
00052         break;
00053       child = child->next_sibling;
00054     }
00055     if (!child)
00056       /* no child intersects, return their father */
00057       return obj;
00058     /* found one intersecting child, look at its children */
00059     obj = child;
00060   }
00061   /* obj is included, return it */
00062   return obj;
00063 }
00064 
00069 HWLOC_DECLSPEC int hwloc_get_largest_objs_inside_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00070                                                  hwloc_obj_t * __hwloc_restrict objs, int max);
00071 
00084 static __hwloc_inline hwloc_obj_t
00085 hwloc_get_next_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00086                                            int depth, hwloc_obj_t prev)
00087 {
00088   hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev);
00089   if (!next)
00090     return NULL;
00091   while (next && (hwloc_bitmap_iszero(next->cpuset) || !hwloc_bitmap_isincluded(next->cpuset, set)))
00092     next = next->next_cousin;
00093   return next;
00094 }
00095 
00108 static __hwloc_inline hwloc_obj_t
00109 hwloc_get_next_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00110                                           hwloc_obj_type_t type, hwloc_obj_t prev)
00111 {
00112   int depth = hwloc_get_type_depth(topology, type);
00113   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
00114     return NULL;
00115   return hwloc_get_next_obj_inside_cpuset_by_depth(topology, set, depth, prev);
00116 }
00117 
00126 static __hwloc_inline hwloc_obj_t
00127 hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00128                                       int depth, unsigned idx) __hwloc_attribute_pure;
00129 static __hwloc_inline hwloc_obj_t
00130 hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00131                                       int depth, unsigned idx)
00132 {
00133   hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0);
00134   unsigned count = 0;
00135   if (!obj)
00136     return NULL;
00137   while (obj) {
00138     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) {
00139       if (count == idx)
00140         return obj;
00141       count++;
00142     }
00143     obj = obj->next_cousin;
00144   }
00145   return NULL;
00146 }
00147 
00160 static __hwloc_inline hwloc_obj_t
00161 hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00162                                      hwloc_obj_type_t type, unsigned idx) __hwloc_attribute_pure;
00163 static __hwloc_inline hwloc_obj_t
00164 hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00165                                      hwloc_obj_type_t type, unsigned idx)
00166 {
00167   int depth = hwloc_get_type_depth(topology, type);
00168   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
00169     return NULL;
00170   return hwloc_get_obj_inside_cpuset_by_depth(topology, set, depth, idx);
00171 }
00172 
00181 static __hwloc_inline unsigned
00182 hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00183                                          int depth) __hwloc_attribute_pure;
00184 static __hwloc_inline unsigned
00185 hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00186                                          int depth)
00187 {
00188   hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0);
00189   unsigned count = 0;
00190   if (!obj)
00191     return 0;
00192   while (obj) {
00193     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set))
00194       count++;
00195     obj = obj->next_cousin;
00196   }
00197   return count;
00198 }
00199 
00212 static __hwloc_inline int
00213 hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00214                                         hwloc_obj_type_t type) __hwloc_attribute_pure;
00215 static __hwloc_inline int
00216 hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set,
00217                                         hwloc_obj_type_t type)
00218 {
00219   int depth = hwloc_get_type_depth(topology, type);
00220   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN)
00221     return 0;
00222   if (depth == HWLOC_TYPE_DEPTH_MULTIPLE)
00223     return -1; /* FIXME: agregate nbobjs from different levels? */
00224   return (int) hwloc_get_nbobjs_inside_cpuset_by_depth(topology, set, depth);
00225 }
00226 
00240 static __hwloc_inline int
00241 hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
00242                                    hwloc_obj_t obj) __hwloc_attribute_pure;
00243 static __hwloc_inline int
00244 hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
00245                                    hwloc_obj_t obj)
00246 {
00247   int idx = 0;
00248   if (!hwloc_bitmap_isincluded(obj->cpuset, set))
00249     return -1;
00250   /* count how many objects are inside the cpuset on the way from us to the beginning of the level */
00251   while ((obj = obj->prev_cousin) != NULL)
00252     if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set))
00253       idx++;
00254   return idx;
00255 }
00256 
00271 static __hwloc_inline hwloc_obj_t
00272 hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
00273                                 hwloc_obj_t parent) __hwloc_attribute_pure;
00274 static __hwloc_inline hwloc_obj_t
00275 hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set,
00276                                 hwloc_obj_t parent)
00277 {
00278   hwloc_obj_t child;
00279   if (hwloc_bitmap_iszero(set))
00280     return NULL;
00281   child = parent->first_child;
00282   while (child) {
00283     if (child->cpuset && hwloc_bitmap_isincluded(set, child->cpuset))
00284       return child;
00285     child = child->next_sibling;
00286   }
00287   return NULL;
00288 }
00289 
00294 static __hwloc_inline hwloc_obj_t
00295 hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure;
00296 static __hwloc_inline hwloc_obj_t
00297 hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set)
00298 {
00299   struct hwloc_obj *current = hwloc_get_root_obj(topology);
00300   if (hwloc_bitmap_iszero(set) || !hwloc_bitmap_isincluded(set, current->cpuset))
00301     return NULL;
00302   while (1) {
00303     hwloc_obj_t child = hwloc_get_child_covering_cpuset(topology, set, current);
00304     if (!child)
00305       return current;
00306     current = child;
00307   }
00308 }
00309 
00320 static __hwloc_inline hwloc_obj_t
00321 hwloc_get_next_obj_covering_cpuset_by_depth(hwloc_topology_t topology, hwloc_const_cpuset_t set,
00322                                             int depth, hwloc_obj_t prev)
00323 {
00324   hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev);
00325   if (!next)
00326     return NULL;
00327   while (next && !hwloc_bitmap_intersects(set, next->cpuset))
00328     next = next->next_cousin;
00329   return next;
00330 }
00331 
00347 static __hwloc_inline hwloc_obj_t
00348 hwloc_get_next_obj_covering_cpuset_by_type(hwloc_topology_t topology, hwloc_const_cpuset_t set,
00349                                            hwloc_obj_type_t type, hwloc_obj_t prev)
00350 {
00351   int depth = hwloc_get_type_depth(topology, type);
00352   if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE)
00353     return NULL;
00354   return hwloc_get_next_obj_covering_cpuset_by_depth(topology, set, depth, prev);
00355 }
00356 
00377 static __hwloc_inline hwloc_obj_t
00378 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj) __hwloc_attribute_pure;
00379 static __hwloc_inline hwloc_obj_t
00380 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj)
00381 {
00382   hwloc_obj_t ancestor = obj;
00383   if (obj->depth < depth)
00384     return NULL;
00385   while (ancestor && ancestor->depth > depth)
00386     ancestor = ancestor->parent;
00387   return ancestor;
00388 }
00389 
00397 static __hwloc_inline hwloc_obj_t
00398 hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj) __hwloc_attribute_pure;
00399 static __hwloc_inline hwloc_obj_t
00400 hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj)
00401 {
00402   hwloc_obj_t ancestor = obj->parent;
00403   while (ancestor && ancestor->type != type)
00404     ancestor = ancestor->parent;
00405   return ancestor;
00406 }
00407 
00409 static __hwloc_inline hwloc_obj_t
00410 hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2) __hwloc_attribute_pure;
00411 static __hwloc_inline hwloc_obj_t
00412 hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2)
00413 {
00414   /* the loop isn't so easy since intermediate ancestors may have
00415    * different depth, causing us to alternate between using obj1->parent
00416    * and obj2->parent. Also, even if at some point we find ancestors of
00417    * of the same depth, their ancestors may have different depth again.
00418    */
00419   while (obj1 != obj2) {
00420     while (obj1->depth > obj2->depth)
00421       obj1 = obj1->parent;
00422     while (obj2->depth > obj1->depth)
00423       obj2 = obj2->parent;
00424     if (obj1 != obj2 && obj1->depth == obj2->depth) {
00425       obj1 = obj1->parent;
00426       obj2 = obj2->parent;
00427     }
00428   }
00429   return obj1;
00430 }
00431 
00437 static __hwloc_inline int
00438 hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root) __hwloc_attribute_pure;
00439 static __hwloc_inline int
00440 hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root)
00441 {
00442   return obj->cpuset && subtree_root->cpuset && hwloc_bitmap_isincluded(obj->cpuset, subtree_root->cpuset);
00443 }
00444 
00455 static __hwloc_inline hwloc_obj_t
00456 hwloc_get_next_child (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t parent, hwloc_obj_t prev)
00457 {
00458   hwloc_obj_t obj;
00459   int state = 0;
00460   if (prev) {
00461     if (prev->type == HWLOC_OBJ_MISC)
00462       state = 3;
00463     else if (prev->type == HWLOC_OBJ_BRIDGE || prev->type == HWLOC_OBJ_PCI_DEVICE || prev->type == HWLOC_OBJ_OS_DEVICE)
00464       state = 2;
00465     else if (prev->type == HWLOC_OBJ_NUMANODE)
00466       state = 1;
00467     obj = prev->next_sibling;
00468   } else {
00469     obj = parent->first_child;
00470   }
00471   if (!obj && state == 0) {
00472     obj = parent->memory_first_child;
00473     state = 1;
00474   }
00475   if (!obj && state == 1) {
00476     obj = parent->io_first_child;
00477     state = 2;
00478   }
00479   if (!obj && state == 2) {
00480     obj = parent->misc_first_child;
00481     state = 3;
00482   }
00483   return obj;
00484 }
00485 
00512 HWLOC_DECLSPEC int
00513 hwloc_obj_type_is_normal(hwloc_obj_type_t type);
00514 
00523 HWLOC_DECLSPEC int
00524 hwloc_obj_type_is_io(hwloc_obj_type_t type);
00525 
00534 HWLOC_DECLSPEC int
00535 hwloc_obj_type_is_memory(hwloc_obj_type_t type);
00536 
00543 HWLOC_DECLSPEC int
00544 hwloc_obj_type_is_cache(hwloc_obj_type_t type);
00545 
00552 HWLOC_DECLSPEC int
00553 hwloc_obj_type_is_dcache(hwloc_obj_type_t type);
00554 
00561 HWLOC_DECLSPEC int
00562 hwloc_obj_type_is_icache(hwloc_obj_type_t type);
00563 
00593 static __hwloc_inline int
00594 hwloc_get_cache_type_depth (hwloc_topology_t topology,
00595                             unsigned cachelevel, hwloc_obj_cache_type_t cachetype)
00596 {
00597   int depth;
00598   int found = HWLOC_TYPE_DEPTH_UNKNOWN;
00599   for (depth=0; ; depth++) {
00600     hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, 0);
00601     if (!obj)
00602       break;
00603     if (!hwloc_obj_type_is_dcache(obj->type) || obj->attr->cache.depth != cachelevel)
00604       /* doesn't match, try next depth */
00605       continue;
00606     if (cachetype == (hwloc_obj_cache_type_t) -1) {
00607       if (found != HWLOC_TYPE_DEPTH_UNKNOWN) {
00608         /* second match, return MULTIPLE */
00609         return HWLOC_TYPE_DEPTH_MULTIPLE;
00610       }
00611       /* first match, mark it as found */
00612       found = depth;
00613       continue;
00614     }
00615     if (obj->attr->cache.type == cachetype || obj->attr->cache.type == HWLOC_OBJ_CACHE_UNIFIED)
00616       /* exact match (either unified is alone, or we match instruction or data), return immediately */
00617       return depth;
00618   }
00619   /* went to the bottom, return what we found */
00620   return found;
00621 }
00622 
00627 static __hwloc_inline hwloc_obj_t
00628 hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure;
00629 static __hwloc_inline hwloc_obj_t
00630 hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set)
00631 {
00632   hwloc_obj_t current = hwloc_get_obj_covering_cpuset(topology, set);
00633   while (current) {
00634     if (hwloc_obj_type_is_dcache(current->type))
00635       return current;
00636     current = current->parent;
00637   }
00638   return NULL;
00639 }
00640 
00645 static __hwloc_inline hwloc_obj_t
00646 hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj) __hwloc_attribute_pure;
00647 static __hwloc_inline hwloc_obj_t
00648 hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj)
00649 {
00650   hwloc_obj_t current = obj->parent;
00651   if (!obj->cpuset)
00652     return NULL;
00653   while (current) {
00654     if (!hwloc_bitmap_isequal(current->cpuset, obj->cpuset)
00655         && hwloc_obj_type_is_dcache(current->type))
00656       return current;
00657     current = current->parent;
00658   }
00659   return NULL;
00660 }
00661 
00684 static __hwloc_inline hwloc_obj_t
00685 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure;
00686 static __hwloc_inline hwloc_obj_t
00687 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index)
00688 {
00689   hwloc_obj_t obj = NULL;
00690   while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, obj)) != NULL)
00691     if (obj->os_index == os_index)
00692       return obj;
00693   return NULL;
00694 }
00695 
00705 static __hwloc_inline hwloc_obj_t
00706 hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure;
00707 static __hwloc_inline hwloc_obj_t
00708 hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index)
00709 {
00710   hwloc_obj_t obj = NULL;
00711   while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_NUMANODE, obj)) != NULL)
00712     if (obj->os_index == os_index)
00713       return obj;
00714   return NULL;
00715 }
00716 
00728 /* TODO: rather provide an iterator? Provide a way to know how much should be allocated? By returning the total number of objects instead? */
00729 HWLOC_DECLSPEC unsigned hwloc_get_closest_objs (hwloc_topology_t topology, hwloc_obj_t src, hwloc_obj_t * __hwloc_restrict objs, unsigned max);
00730 
00743 static __hwloc_inline hwloc_obj_t
00744 hwloc_get_obj_below_by_type (hwloc_topology_t topology,
00745                              hwloc_obj_type_t type1, unsigned idx1,
00746                              hwloc_obj_type_t type2, unsigned idx2) __hwloc_attribute_pure;
00747 static __hwloc_inline hwloc_obj_t
00748 hwloc_get_obj_below_by_type (hwloc_topology_t topology,
00749                              hwloc_obj_type_t type1, unsigned idx1,
00750                              hwloc_obj_type_t type2, unsigned idx2)
00751 {
00752   hwloc_obj_t obj;
00753   obj = hwloc_get_obj_by_type (topology, type1, idx1);
00754   if (!obj)
00755     return NULL;
00756   return hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, type2, idx2);
00757 }
00758 
00777 static __hwloc_inline hwloc_obj_t
00778 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv) __hwloc_attribute_pure;
00779 static __hwloc_inline hwloc_obj_t
00780 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv)
00781 {
00782   hwloc_obj_t obj = hwloc_get_root_obj(topology);
00783   int i;
00784   for(i=0; i<nr; i++) {
00785     if (!obj)
00786       return NULL;
00787     obj = hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, typev[i], idxv[i]);
00788   }
00789   return obj;
00790 }
00791 
00802 enum hwloc_distrib_flags_e {
00806   HWLOC_DISTRIB_FLAG_REVERSE = (1UL<<0)
00807 };
00808 
00832 static __hwloc_inline int
00833 hwloc_distrib(hwloc_topology_t topology,
00834               hwloc_obj_t *roots, unsigned n_roots,
00835               hwloc_cpuset_t *set,
00836               unsigned n,
00837               int until, unsigned long flags)
00838 {
00839   unsigned i;
00840   unsigned tot_weight;
00841   unsigned given, givenweight;
00842   hwloc_cpuset_t *cpusetp = set;
00843 
00844   if (flags & ~HWLOC_DISTRIB_FLAG_REVERSE) {
00845     errno = EINVAL;
00846     return -1;
00847   }
00848 
00849   tot_weight = 0;
00850   for (i = 0; i < n_roots; i++)
00851     tot_weight += (unsigned) hwloc_bitmap_weight(roots[i]->cpuset);
00852 
00853   for (i = 0, given = 0, givenweight = 0; i < n_roots; i++) {
00854     unsigned chunk, weight;
00855     hwloc_obj_t root = roots[flags & HWLOC_DISTRIB_FLAG_REVERSE ? n_roots-1-i : i];
00856     hwloc_cpuset_t cpuset = root->cpuset;
00857     if (root->type == HWLOC_OBJ_NUMANODE)
00858       /* NUMANodes have same cpuset as their parent, but we need normal objects below */
00859       root = root->parent;
00860     weight = (unsigned) hwloc_bitmap_weight(cpuset);
00861     if (!weight)
00862       continue;
00863     /* Give to root a chunk proportional to its weight.
00864      * If previous chunks got rounded-up, we may get a bit less. */
00865     chunk = (( (givenweight+weight) * n  + tot_weight-1) / tot_weight)
00866           - ((  givenweight         * n  + tot_weight-1) / tot_weight);
00867     if (!root->arity || chunk <= 1 || root->depth >= until) {
00868       /* We can't split any more, put everything there.  */
00869       if (chunk) {
00870         /* Fill cpusets with ours */
00871         unsigned j;
00872         for (j=0; j < chunk; j++)
00873           cpusetp[j] = hwloc_bitmap_dup(cpuset);
00874       } else {
00875         /* We got no chunk, just merge our cpuset to a previous one
00876          * (the first chunk cannot be empty)
00877          * so that this root doesn't get ignored.
00878          */
00879         assert(given);
00880         hwloc_bitmap_or(cpusetp[-1], cpusetp[-1], cpuset);
00881       }
00882     } else {
00883       /* Still more to distribute, recurse into children */
00884       hwloc_distrib(topology, root->children, root->arity, cpusetp, chunk, until, flags);
00885     }
00886     cpusetp += chunk;
00887     given += chunk;
00888     givenweight += weight;
00889   }
00890 
00891   return 0;
00892 }
00893 
00911 HWLOC_DECLSPEC hwloc_const_cpuset_t
00912 hwloc_topology_get_complete_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
00913 
00925 HWLOC_DECLSPEC hwloc_const_cpuset_t
00926 hwloc_topology_get_topology_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
00927 
00944 HWLOC_DECLSPEC hwloc_const_cpuset_t
00945 hwloc_topology_get_allowed_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure;
00946 
00956 HWLOC_DECLSPEC hwloc_const_nodeset_t
00957 hwloc_topology_get_complete_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
00958 
00970 HWLOC_DECLSPEC hwloc_const_nodeset_t
00971 hwloc_topology_get_topology_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
00972 
00989 HWLOC_DECLSPEC hwloc_const_nodeset_t
00990 hwloc_topology_get_allowed_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure;
00991 
01011 static __hwloc_inline int
01012 hwloc_cpuset_to_nodeset(hwloc_topology_t topology, hwloc_const_cpuset_t _cpuset, hwloc_nodeset_t nodeset)
01013 {
01014         int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE);
01015         hwloc_obj_t obj = NULL;
01016         assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN);
01017         hwloc_bitmap_zero(nodeset);
01018         while ((obj = hwloc_get_next_obj_covering_cpuset_by_depth(topology, _cpuset, depth, obj)) != NULL)
01019                 if (hwloc_bitmap_set(nodeset, obj->os_index) < 0)
01020                         return -1;
01021         return 0;
01022 }
01023 
01032 static __hwloc_inline int
01033 hwloc_cpuset_from_nodeset(hwloc_topology_t topology, hwloc_cpuset_t _cpuset, hwloc_const_nodeset_t nodeset)
01034 {
01035         int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE);
01036         hwloc_obj_t obj = NULL;
01037         assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN);
01038         hwloc_bitmap_zero(_cpuset);
01039         while ((obj = hwloc_get_next_obj_by_depth(topology, depth, obj)) != NULL) {
01040                 if (hwloc_bitmap_isset(nodeset, obj->os_index))
01041                         /* no need to check obj->cpuset because objects in levels always have a cpuset */
01042                         if (hwloc_bitmap_or(_cpuset, _cpuset, obj->cpuset) < 0)
01043                                 return -1;
01044         }
01045         return 0;
01046 }
01047 
01067 static __hwloc_inline hwloc_obj_t
01068 hwloc_get_non_io_ancestor_obj(hwloc_topology_t topology __hwloc_attribute_unused,
01069                               hwloc_obj_t ioobj)
01070 {
01071   hwloc_obj_t obj = ioobj;
01072   while (obj && !obj->cpuset) {
01073     obj = obj->parent;
01074   }
01075   return obj;
01076 }
01077 
01082 static __hwloc_inline hwloc_obj_t
01083 hwloc_get_next_pcidev(hwloc_topology_t topology, hwloc_obj_t prev)
01084 {
01085   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PCI_DEVICE, prev);
01086 }
01087 
01091 static __hwloc_inline hwloc_obj_t
01092 hwloc_get_pcidev_by_busid(hwloc_topology_t topology,
01093                           unsigned domain, unsigned bus, unsigned dev, unsigned func)
01094 {
01095   hwloc_obj_t obj = NULL;
01096   while ((obj = hwloc_get_next_pcidev(topology, obj)) != NULL) {
01097     if (obj->attr->pcidev.domain == domain
01098         && obj->attr->pcidev.bus == bus
01099         && obj->attr->pcidev.dev == dev
01100         && obj->attr->pcidev.func == func)
01101       return obj;
01102   }
01103   return NULL;
01104 }
01105 
01109 static __hwloc_inline hwloc_obj_t
01110 hwloc_get_pcidev_by_busidstring(hwloc_topology_t topology, const char *busid)
01111 {
01112   unsigned domain = 0; /* default */
01113   unsigned bus, dev, func;
01114 
01115   if (sscanf(busid, "%x:%x.%x", &bus, &dev, &func) != 3
01116       && sscanf(busid, "%x:%x:%x.%x", &domain, &bus, &dev, &func) != 4) {
01117     errno = EINVAL;
01118     return NULL;
01119   }
01120 
01121   return hwloc_get_pcidev_by_busid(topology, domain, bus, dev, func);
01122 }
01123 
01128 static __hwloc_inline hwloc_obj_t
01129 hwloc_get_next_osdev(hwloc_topology_t topology, hwloc_obj_t prev)
01130 {
01131   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_OS_DEVICE, prev);
01132 }
01133 
01138 static __hwloc_inline hwloc_obj_t
01139 hwloc_get_next_bridge(hwloc_topology_t topology, hwloc_obj_t prev)
01140 {
01141   return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_BRIDGE, prev);
01142 }
01143 
01144 /* \brief Checks whether a given bridge covers a given PCI bus.
01145  */
01146 static __hwloc_inline int
01147 hwloc_bridge_covers_pcibus(hwloc_obj_t bridge,
01148                            unsigned domain, unsigned bus)
01149 {
01150   return bridge->type == HWLOC_OBJ_BRIDGE
01151     && bridge->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI
01152     && bridge->attr->bridge.downstream.pci.domain == domain
01153     && bridge->attr->bridge.downstream.pci.secondary_bus <= bus
01154     && bridge->attr->bridge.downstream.pci.subordinate_bus >= bus;
01155 }
01156 
01161 #ifdef __cplusplus
01162 } /* extern "C" */
01163 #endif
01164 
01165 
01166 #endif /* HWLOC_HELPER_H */