Hardware Locality (hwloc)
v2.2-20200408.0300.gitad4a86f
|
00001 /* 00002 * Copyright © 2009 CNRS 00003 * Copyright © 2009-2020 Inria. All rights reserved. 00004 * Copyright © 2009-2012 Université Bordeaux 00005 * Copyright © 2009-2010 Cisco Systems, Inc. All rights reserved. 00006 * See COPYING in top-level directory. 00007 */ 00008 00013 #ifndef HWLOC_HELPER_H 00014 #define HWLOC_HELPER_H 00015 00016 #ifndef HWLOC_H 00017 #error Please include the main hwloc.h instead 00018 #endif 00019 00020 #include <stdlib.h> 00021 #include <errno.h> 00022 00023 00024 #ifdef __cplusplus 00025 extern "C" { 00026 #endif 00027 00028 00041 static __hwloc_inline hwloc_obj_t 00042 hwloc_get_first_largest_obj_inside_cpuset(hwloc_topology_t topology, hwloc_const_cpuset_t set) 00043 { 00044 hwloc_obj_t obj = hwloc_get_root_obj(topology); 00045 if (!hwloc_bitmap_intersects(obj->cpuset, set)) 00046 return NULL; 00047 while (!hwloc_bitmap_isincluded(obj->cpuset, set)) { 00048 /* while the object intersects without being included, look at its children */ 00049 hwloc_obj_t child = obj->first_child; 00050 while (child) { 00051 if (hwloc_bitmap_intersects(child->cpuset, set)) 00052 break; 00053 child = child->next_sibling; 00054 } 00055 if (!child) 00056 /* no child intersects, return their father */ 00057 return obj; 00058 /* found one intersecting child, look at its children */ 00059 obj = child; 00060 } 00061 /* obj is included, return it */ 00062 return obj; 00063 } 00064 00069 HWLOC_DECLSPEC int hwloc_get_largest_objs_inside_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00070 hwloc_obj_t * __hwloc_restrict objs, int max); 00071 00084 static __hwloc_inline hwloc_obj_t 00085 hwloc_get_next_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00086 int depth, hwloc_obj_t prev) 00087 { 00088 hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev); 00089 if (!next) 00090 return NULL; 00091 while (next && (hwloc_bitmap_iszero(next->cpuset) || !hwloc_bitmap_isincluded(next->cpuset, set))) 00092 next = next->next_cousin; 00093 return next; 00094 } 00095 00108 static __hwloc_inline hwloc_obj_t 00109 hwloc_get_next_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00110 hwloc_obj_type_t type, hwloc_obj_t prev) 00111 { 00112 int depth = hwloc_get_type_depth(topology, type); 00113 if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) 00114 return NULL; 00115 return hwloc_get_next_obj_inside_cpuset_by_depth(topology, set, depth, prev); 00116 } 00117 00126 static __hwloc_inline hwloc_obj_t 00127 hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00128 int depth, unsigned idx) __hwloc_attribute_pure; 00129 static __hwloc_inline hwloc_obj_t 00130 hwloc_get_obj_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00131 int depth, unsigned idx) 00132 { 00133 hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0); 00134 unsigned count = 0; 00135 if (!obj) 00136 return NULL; 00137 while (obj) { 00138 if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) { 00139 if (count == idx) 00140 return obj; 00141 count++; 00142 } 00143 obj = obj->next_cousin; 00144 } 00145 return NULL; 00146 } 00147 00160 static __hwloc_inline hwloc_obj_t 00161 hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00162 hwloc_obj_type_t type, unsigned idx) __hwloc_attribute_pure; 00163 static __hwloc_inline hwloc_obj_t 00164 hwloc_get_obj_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00165 hwloc_obj_type_t type, unsigned idx) 00166 { 00167 int depth = hwloc_get_type_depth(topology, type); 00168 if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) 00169 return NULL; 00170 return hwloc_get_obj_inside_cpuset_by_depth(topology, set, depth, idx); 00171 } 00172 00181 static __hwloc_inline unsigned 00182 hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00183 int depth) __hwloc_attribute_pure; 00184 static __hwloc_inline unsigned 00185 hwloc_get_nbobjs_inside_cpuset_by_depth (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00186 int depth) 00187 { 00188 hwloc_obj_t obj = hwloc_get_obj_by_depth (topology, depth, 0); 00189 unsigned count = 0; 00190 if (!obj) 00191 return 0; 00192 while (obj) { 00193 if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) 00194 count++; 00195 obj = obj->next_cousin; 00196 } 00197 return count; 00198 } 00199 00212 static __hwloc_inline int 00213 hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00214 hwloc_obj_type_t type) __hwloc_attribute_pure; 00215 static __hwloc_inline int 00216 hwloc_get_nbobjs_inside_cpuset_by_type (hwloc_topology_t topology, hwloc_const_cpuset_t set, 00217 hwloc_obj_type_t type) 00218 { 00219 int depth = hwloc_get_type_depth(topology, type); 00220 if (depth == HWLOC_TYPE_DEPTH_UNKNOWN) 00221 return 0; 00222 if (depth == HWLOC_TYPE_DEPTH_MULTIPLE) 00223 return -1; /* FIXME: agregate nbobjs from different levels? */ 00224 return (int) hwloc_get_nbobjs_inside_cpuset_by_depth(topology, set, depth); 00225 } 00226 00240 static __hwloc_inline int 00241 hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set, 00242 hwloc_obj_t obj) __hwloc_attribute_pure; 00243 static __hwloc_inline int 00244 hwloc_get_obj_index_inside_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set, 00245 hwloc_obj_t obj) 00246 { 00247 int idx = 0; 00248 if (!hwloc_bitmap_isincluded(obj->cpuset, set)) 00249 return -1; 00250 /* count how many objects are inside the cpuset on the way from us to the beginning of the level */ 00251 while ((obj = obj->prev_cousin) != NULL) 00252 if (!hwloc_bitmap_iszero(obj->cpuset) && hwloc_bitmap_isincluded(obj->cpuset, set)) 00253 idx++; 00254 return idx; 00255 } 00256 00271 static __hwloc_inline hwloc_obj_t 00272 hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set, 00273 hwloc_obj_t parent) __hwloc_attribute_pure; 00274 static __hwloc_inline hwloc_obj_t 00275 hwloc_get_child_covering_cpuset (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_const_cpuset_t set, 00276 hwloc_obj_t parent) 00277 { 00278 hwloc_obj_t child; 00279 if (hwloc_bitmap_iszero(set)) 00280 return NULL; 00281 child = parent->first_child; 00282 while (child) { 00283 if (child->cpuset && hwloc_bitmap_isincluded(set, child->cpuset)) 00284 return child; 00285 child = child->next_sibling; 00286 } 00287 return NULL; 00288 } 00289 00294 static __hwloc_inline hwloc_obj_t 00295 hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure; 00296 static __hwloc_inline hwloc_obj_t 00297 hwloc_get_obj_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) 00298 { 00299 struct hwloc_obj *current = hwloc_get_root_obj(topology); 00300 if (hwloc_bitmap_iszero(set) || !hwloc_bitmap_isincluded(set, current->cpuset)) 00301 return NULL; 00302 while (1) { 00303 hwloc_obj_t child = hwloc_get_child_covering_cpuset(topology, set, current); 00304 if (!child) 00305 return current; 00306 current = child; 00307 } 00308 } 00309 00320 static __hwloc_inline hwloc_obj_t 00321 hwloc_get_next_obj_covering_cpuset_by_depth(hwloc_topology_t topology, hwloc_const_cpuset_t set, 00322 int depth, hwloc_obj_t prev) 00323 { 00324 hwloc_obj_t next = hwloc_get_next_obj_by_depth(topology, depth, prev); 00325 if (!next) 00326 return NULL; 00327 while (next && !hwloc_bitmap_intersects(set, next->cpuset)) 00328 next = next->next_cousin; 00329 return next; 00330 } 00331 00347 static __hwloc_inline hwloc_obj_t 00348 hwloc_get_next_obj_covering_cpuset_by_type(hwloc_topology_t topology, hwloc_const_cpuset_t set, 00349 hwloc_obj_type_t type, hwloc_obj_t prev) 00350 { 00351 int depth = hwloc_get_type_depth(topology, type); 00352 if (depth == HWLOC_TYPE_DEPTH_UNKNOWN || depth == HWLOC_TYPE_DEPTH_MULTIPLE) 00353 return NULL; 00354 return hwloc_get_next_obj_covering_cpuset_by_depth(topology, set, depth, prev); 00355 } 00356 00377 static __hwloc_inline hwloc_obj_t 00378 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj) __hwloc_attribute_pure; 00379 static __hwloc_inline hwloc_obj_t 00380 hwloc_get_ancestor_obj_by_depth (hwloc_topology_t topology __hwloc_attribute_unused, int depth, hwloc_obj_t obj) 00381 { 00382 hwloc_obj_t ancestor = obj; 00383 if (obj->depth < depth) 00384 return NULL; 00385 while (ancestor && ancestor->depth > depth) 00386 ancestor = ancestor->parent; 00387 return ancestor; 00388 } 00389 00397 static __hwloc_inline hwloc_obj_t 00398 hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj) __hwloc_attribute_pure; 00399 static __hwloc_inline hwloc_obj_t 00400 hwloc_get_ancestor_obj_by_type (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_type_t type, hwloc_obj_t obj) 00401 { 00402 hwloc_obj_t ancestor = obj->parent; 00403 while (ancestor && ancestor->type != type) 00404 ancestor = ancestor->parent; 00405 return ancestor; 00406 } 00407 00409 static __hwloc_inline hwloc_obj_t 00410 hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2) __hwloc_attribute_pure; 00411 static __hwloc_inline hwloc_obj_t 00412 hwloc_get_common_ancestor_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj1, hwloc_obj_t obj2) 00413 { 00414 /* the loop isn't so easy since intermediate ancestors may have 00415 * different depth, causing us to alternate between using obj1->parent 00416 * and obj2->parent. Also, even if at some point we find ancestors of 00417 * of the same depth, their ancestors may have different depth again. 00418 */ 00419 while (obj1 != obj2) { 00420 while (obj1->depth > obj2->depth) 00421 obj1 = obj1->parent; 00422 while (obj2->depth > obj1->depth) 00423 obj2 = obj2->parent; 00424 if (obj1 != obj2 && obj1->depth == obj2->depth) { 00425 obj1 = obj1->parent; 00426 obj2 = obj2->parent; 00427 } 00428 } 00429 return obj1; 00430 } 00431 00437 static __hwloc_inline int 00438 hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root) __hwloc_attribute_pure; 00439 static __hwloc_inline int 00440 hwloc_obj_is_in_subtree (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj, hwloc_obj_t subtree_root) 00441 { 00442 return obj->cpuset && subtree_root->cpuset && hwloc_bitmap_isincluded(obj->cpuset, subtree_root->cpuset); 00443 } 00444 00455 static __hwloc_inline hwloc_obj_t 00456 hwloc_get_next_child (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t parent, hwloc_obj_t prev) 00457 { 00458 hwloc_obj_t obj; 00459 int state = 0; 00460 if (prev) { 00461 if (prev->type == HWLOC_OBJ_MISC) 00462 state = 3; 00463 else if (prev->type == HWLOC_OBJ_BRIDGE || prev->type == HWLOC_OBJ_PCI_DEVICE || prev->type == HWLOC_OBJ_OS_DEVICE) 00464 state = 2; 00465 else if (prev->type == HWLOC_OBJ_NUMANODE) 00466 state = 1; 00467 obj = prev->next_sibling; 00468 } else { 00469 obj = parent->first_child; 00470 } 00471 if (!obj && state == 0) { 00472 obj = parent->memory_first_child; 00473 state = 1; 00474 } 00475 if (!obj && state == 1) { 00476 obj = parent->io_first_child; 00477 state = 2; 00478 } 00479 if (!obj && state == 2) { 00480 obj = parent->misc_first_child; 00481 state = 3; 00482 } 00483 return obj; 00484 } 00485 00512 HWLOC_DECLSPEC int 00513 hwloc_obj_type_is_normal(hwloc_obj_type_t type); 00514 00523 HWLOC_DECLSPEC int 00524 hwloc_obj_type_is_io(hwloc_obj_type_t type); 00525 00534 HWLOC_DECLSPEC int 00535 hwloc_obj_type_is_memory(hwloc_obj_type_t type); 00536 00543 HWLOC_DECLSPEC int 00544 hwloc_obj_type_is_cache(hwloc_obj_type_t type); 00545 00552 HWLOC_DECLSPEC int 00553 hwloc_obj_type_is_dcache(hwloc_obj_type_t type); 00554 00561 HWLOC_DECLSPEC int 00562 hwloc_obj_type_is_icache(hwloc_obj_type_t type); 00563 00593 static __hwloc_inline int 00594 hwloc_get_cache_type_depth (hwloc_topology_t topology, 00595 unsigned cachelevel, hwloc_obj_cache_type_t cachetype) 00596 { 00597 int depth; 00598 int found = HWLOC_TYPE_DEPTH_UNKNOWN; 00599 for (depth=0; ; depth++) { 00600 hwloc_obj_t obj = hwloc_get_obj_by_depth(topology, depth, 0); 00601 if (!obj) 00602 break; 00603 if (!hwloc_obj_type_is_dcache(obj->type) || obj->attr->cache.depth != cachelevel) 00604 /* doesn't match, try next depth */ 00605 continue; 00606 if (cachetype == (hwloc_obj_cache_type_t) -1) { 00607 if (found != HWLOC_TYPE_DEPTH_UNKNOWN) { 00608 /* second match, return MULTIPLE */ 00609 return HWLOC_TYPE_DEPTH_MULTIPLE; 00610 } 00611 /* first match, mark it as found */ 00612 found = depth; 00613 continue; 00614 } 00615 if (obj->attr->cache.type == cachetype || obj->attr->cache.type == HWLOC_OBJ_CACHE_UNIFIED) 00616 /* exact match (either unified is alone, or we match instruction or data), return immediately */ 00617 return depth; 00618 } 00619 /* went to the bottom, return what we found */ 00620 return found; 00621 } 00622 00627 static __hwloc_inline hwloc_obj_t 00628 hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) __hwloc_attribute_pure; 00629 static __hwloc_inline hwloc_obj_t 00630 hwloc_get_cache_covering_cpuset (hwloc_topology_t topology, hwloc_const_cpuset_t set) 00631 { 00632 hwloc_obj_t current = hwloc_get_obj_covering_cpuset(topology, set); 00633 while (current) { 00634 if (hwloc_obj_type_is_dcache(current->type)) 00635 return current; 00636 current = current->parent; 00637 } 00638 return NULL; 00639 } 00640 00645 static __hwloc_inline hwloc_obj_t 00646 hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj) __hwloc_attribute_pure; 00647 static __hwloc_inline hwloc_obj_t 00648 hwloc_get_shared_cache_covering_obj (hwloc_topology_t topology __hwloc_attribute_unused, hwloc_obj_t obj) 00649 { 00650 hwloc_obj_t current = obj->parent; 00651 if (!obj->cpuset) 00652 return NULL; 00653 while (current) { 00654 if (!hwloc_bitmap_isequal(current->cpuset, obj->cpuset) 00655 && hwloc_obj_type_is_dcache(current->type)) 00656 return current; 00657 current = current->parent; 00658 } 00659 return NULL; 00660 } 00661 00691 HWLOC_DECLSPEC int hwloc_bitmap_singlify_per_core(hwloc_topology_t topology, hwloc_bitmap_t cpuset, unsigned which); 00692 00702 static __hwloc_inline hwloc_obj_t 00703 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure; 00704 static __hwloc_inline hwloc_obj_t 00705 hwloc_get_pu_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) 00706 { 00707 hwloc_obj_t obj = NULL; 00708 while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, obj)) != NULL) 00709 if (obj->os_index == os_index) 00710 return obj; 00711 return NULL; 00712 } 00713 00723 static __hwloc_inline hwloc_obj_t 00724 hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) __hwloc_attribute_pure; 00725 static __hwloc_inline hwloc_obj_t 00726 hwloc_get_numanode_obj_by_os_index(hwloc_topology_t topology, unsigned os_index) 00727 { 00728 hwloc_obj_t obj = NULL; 00729 while ((obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_NUMANODE, obj)) != NULL) 00730 if (obj->os_index == os_index) 00731 return obj; 00732 return NULL; 00733 } 00734 00746 /* TODO: rather provide an iterator? Provide a way to know how much should be allocated? By returning the total number of objects instead? */ 00747 HWLOC_DECLSPEC unsigned hwloc_get_closest_objs (hwloc_topology_t topology, hwloc_obj_t src, hwloc_obj_t * __hwloc_restrict objs, unsigned max); 00748 00761 static __hwloc_inline hwloc_obj_t 00762 hwloc_get_obj_below_by_type (hwloc_topology_t topology, 00763 hwloc_obj_type_t type1, unsigned idx1, 00764 hwloc_obj_type_t type2, unsigned idx2) __hwloc_attribute_pure; 00765 static __hwloc_inline hwloc_obj_t 00766 hwloc_get_obj_below_by_type (hwloc_topology_t topology, 00767 hwloc_obj_type_t type1, unsigned idx1, 00768 hwloc_obj_type_t type2, unsigned idx2) 00769 { 00770 hwloc_obj_t obj; 00771 obj = hwloc_get_obj_by_type (topology, type1, idx1); 00772 if (!obj) 00773 return NULL; 00774 return hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, type2, idx2); 00775 } 00776 00795 static __hwloc_inline hwloc_obj_t 00796 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv) __hwloc_attribute_pure; 00797 static __hwloc_inline hwloc_obj_t 00798 hwloc_get_obj_below_array_by_type (hwloc_topology_t topology, int nr, hwloc_obj_type_t *typev, unsigned *idxv) 00799 { 00800 hwloc_obj_t obj = hwloc_get_root_obj(topology); 00801 int i; 00802 for(i=0; i<nr; i++) { 00803 if (!obj) 00804 return NULL; 00805 obj = hwloc_get_obj_inside_cpuset_by_type(topology, obj->cpuset, typev[i], idxv[i]); 00806 } 00807 return obj; 00808 } 00809 00820 enum hwloc_distrib_flags_e { 00824 HWLOC_DISTRIB_FLAG_REVERSE = (1UL<<0) 00825 }; 00826 00850 static __hwloc_inline int 00851 hwloc_distrib(hwloc_topology_t topology, 00852 hwloc_obj_t *roots, unsigned n_roots, 00853 hwloc_cpuset_t *set, 00854 unsigned n, 00855 int until, unsigned long flags) 00856 { 00857 unsigned i; 00858 unsigned tot_weight; 00859 unsigned given, givenweight; 00860 hwloc_cpuset_t *cpusetp = set; 00861 00862 if (flags & ~HWLOC_DISTRIB_FLAG_REVERSE) { 00863 errno = EINVAL; 00864 return -1; 00865 } 00866 00867 tot_weight = 0; 00868 for (i = 0; i < n_roots; i++) 00869 tot_weight += (unsigned) hwloc_bitmap_weight(roots[i]->cpuset); 00870 00871 for (i = 0, given = 0, givenweight = 0; i < n_roots; i++) { 00872 unsigned chunk, weight; 00873 hwloc_obj_t root = roots[flags & HWLOC_DISTRIB_FLAG_REVERSE ? n_roots-1-i : i]; 00874 hwloc_cpuset_t cpuset = root->cpuset; 00875 if (root->type == HWLOC_OBJ_NUMANODE) 00876 /* NUMANodes have same cpuset as their parent, but we need normal objects below */ 00877 root = root->parent; 00878 weight = (unsigned) hwloc_bitmap_weight(cpuset); 00879 if (!weight) 00880 continue; 00881 /* Give to root a chunk proportional to its weight. 00882 * If previous chunks got rounded-up, we may get a bit less. */ 00883 chunk = (( (givenweight+weight) * n + tot_weight-1) / tot_weight) 00884 - (( givenweight * n + tot_weight-1) / tot_weight); 00885 if (!root->arity || chunk <= 1 || root->depth >= until) { 00886 /* We can't split any more, put everything there. */ 00887 if (chunk) { 00888 /* Fill cpusets with ours */ 00889 unsigned j; 00890 for (j=0; j < chunk; j++) 00891 cpusetp[j] = hwloc_bitmap_dup(cpuset); 00892 } else { 00893 /* We got no chunk, just merge our cpuset to a previous one 00894 * (the first chunk cannot be empty) 00895 * so that this root doesn't get ignored. 00896 */ 00897 assert(given); 00898 hwloc_bitmap_or(cpusetp[-1], cpusetp[-1], cpuset); 00899 } 00900 } else { 00901 /* Still more to distribute, recurse into children */ 00902 hwloc_distrib(topology, root->children, root->arity, cpusetp, chunk, until, flags); 00903 } 00904 cpusetp += chunk; 00905 given += chunk; 00906 givenweight += weight; 00907 } 00908 00909 return 0; 00910 } 00911 00929 HWLOC_DECLSPEC hwloc_const_cpuset_t 00930 hwloc_topology_get_complete_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure; 00931 00943 HWLOC_DECLSPEC hwloc_const_cpuset_t 00944 hwloc_topology_get_topology_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure; 00945 00962 HWLOC_DECLSPEC hwloc_const_cpuset_t 00963 hwloc_topology_get_allowed_cpuset(hwloc_topology_t topology) __hwloc_attribute_pure; 00964 00974 HWLOC_DECLSPEC hwloc_const_nodeset_t 00975 hwloc_topology_get_complete_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure; 00976 00988 HWLOC_DECLSPEC hwloc_const_nodeset_t 00989 hwloc_topology_get_topology_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure; 00990 01007 HWLOC_DECLSPEC hwloc_const_nodeset_t 01008 hwloc_topology_get_allowed_nodeset(hwloc_topology_t topology) __hwloc_attribute_pure; 01009 01030 static __hwloc_inline int 01031 hwloc_cpuset_to_nodeset(hwloc_topology_t topology, hwloc_const_cpuset_t _cpuset, hwloc_nodeset_t nodeset) 01032 { 01033 int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); 01034 hwloc_obj_t obj = NULL; 01035 assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN); 01036 hwloc_bitmap_zero(nodeset); 01037 while ((obj = hwloc_get_next_obj_covering_cpuset_by_depth(topology, _cpuset, depth, obj)) != NULL) 01038 if (hwloc_bitmap_set(nodeset, obj->os_index) < 0) 01039 return -1; 01040 return 0; 01041 } 01042 01054 static __hwloc_inline int 01055 hwloc_cpuset_from_nodeset(hwloc_topology_t topology, hwloc_cpuset_t _cpuset, hwloc_const_nodeset_t nodeset) 01056 { 01057 int depth = hwloc_get_type_depth(topology, HWLOC_OBJ_NUMANODE); 01058 hwloc_obj_t obj = NULL; 01059 assert(depth != HWLOC_TYPE_DEPTH_UNKNOWN); 01060 hwloc_bitmap_zero(_cpuset); 01061 while ((obj = hwloc_get_next_obj_by_depth(topology, depth, obj)) != NULL) { 01062 if (hwloc_bitmap_isset(nodeset, obj->os_index)) 01063 /* no need to check obj->cpuset because objects in levels always have a cpuset */ 01064 if (hwloc_bitmap_or(_cpuset, _cpuset, obj->cpuset) < 0) 01065 return -1; 01066 } 01067 return 0; 01068 } 01069 01089 static __hwloc_inline hwloc_obj_t 01090 hwloc_get_non_io_ancestor_obj(hwloc_topology_t topology __hwloc_attribute_unused, 01091 hwloc_obj_t ioobj) 01092 { 01093 hwloc_obj_t obj = ioobj; 01094 while (obj && !obj->cpuset) { 01095 obj = obj->parent; 01096 } 01097 return obj; 01098 } 01099 01104 static __hwloc_inline hwloc_obj_t 01105 hwloc_get_next_pcidev(hwloc_topology_t topology, hwloc_obj_t prev) 01106 { 01107 return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PCI_DEVICE, prev); 01108 } 01109 01113 static __hwloc_inline hwloc_obj_t 01114 hwloc_get_pcidev_by_busid(hwloc_topology_t topology, 01115 unsigned domain, unsigned bus, unsigned dev, unsigned func) 01116 { 01117 hwloc_obj_t obj = NULL; 01118 while ((obj = hwloc_get_next_pcidev(topology, obj)) != NULL) { 01119 if (obj->attr->pcidev.domain == domain 01120 && obj->attr->pcidev.bus == bus 01121 && obj->attr->pcidev.dev == dev 01122 && obj->attr->pcidev.func == func) 01123 return obj; 01124 } 01125 return NULL; 01126 } 01127 01131 static __hwloc_inline hwloc_obj_t 01132 hwloc_get_pcidev_by_busidstring(hwloc_topology_t topology, const char *busid) 01133 { 01134 unsigned domain = 0; /* default */ 01135 unsigned bus, dev, func; 01136 01137 if (sscanf(busid, "%x:%x.%x", &bus, &dev, &func) != 3 01138 && sscanf(busid, "%x:%x:%x.%x", &domain, &bus, &dev, &func) != 4) { 01139 errno = EINVAL; 01140 return NULL; 01141 } 01142 01143 return hwloc_get_pcidev_by_busid(topology, domain, bus, dev, func); 01144 } 01145 01150 static __hwloc_inline hwloc_obj_t 01151 hwloc_get_next_osdev(hwloc_topology_t topology, hwloc_obj_t prev) 01152 { 01153 return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_OS_DEVICE, prev); 01154 } 01155 01160 static __hwloc_inline hwloc_obj_t 01161 hwloc_get_next_bridge(hwloc_topology_t topology, hwloc_obj_t prev) 01162 { 01163 return hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_BRIDGE, prev); 01164 } 01165 01166 /* \brief Checks whether a given bridge covers a given PCI bus. 01167 */ 01168 static __hwloc_inline int 01169 hwloc_bridge_covers_pcibus(hwloc_obj_t bridge, 01170 unsigned domain, unsigned bus) 01171 { 01172 return bridge->type == HWLOC_OBJ_BRIDGE 01173 && bridge->attr->bridge.downstream_type == HWLOC_OBJ_BRIDGE_PCI 01174 && bridge->attr->bridge.downstream.pci.domain == domain 01175 && bridge->attr->bridge.downstream.pci.secondary_bus <= bus 01176 && bridge->attr->bridge.downstream.pci.subordinate_bus >= bus; 01177 } 01178 01183 #ifdef __cplusplus 01184 } /* extern "C" */ 01185 #endif 01186 01187 01188 #endif /* HWLOC_HELPER_H */