Line | Branch | Exec | Source |
---|---|---|---|
1 | /*********************************************************************************/ | ||
2 | /* Copyright 2009-2024 Barcelona Supercomputing Center */ | ||
3 | /* */ | ||
4 | /* This file is part of the DLB library. */ | ||
5 | /* */ | ||
6 | /* DLB is free software: you can redistribute it and/or modify */ | ||
7 | /* it under the terms of the GNU Lesser General Public License as published by */ | ||
8 | /* the Free Software Foundation, either version 3 of the License, or */ | ||
9 | /* (at your option) any later version. */ | ||
10 | /* */ | ||
11 | /* DLB is distributed in the hope that it will be useful, */ | ||
12 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | ||
13 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | ||
14 | /* GNU Lesser General Public License for more details. */ | ||
15 | /* */ | ||
16 | /* You should have received a copy of the GNU Lesser General Public License */ | ||
17 | /* along with DLB. If not, see <https://www.gnu.org/licenses/>. */ | ||
18 | /*********************************************************************************/ | ||
19 | |||
20 | #ifdef HAVE_CONFIG_H | ||
21 | #include <config.h> | ||
22 | #endif | ||
23 | |||
24 | #include "support/mask_utils.h" | ||
25 | |||
26 | #include "support/debug.h" | ||
27 | |||
28 | #ifdef HWLOC_LIB | ||
29 | #include <hwloc.h> | ||
30 | #include <hwloc/bitmap.h> | ||
31 | #include <hwloc/glibc-sched.h> | ||
32 | #endif | ||
33 | #include <unistd.h> | ||
34 | #include <sys/types.h> | ||
35 | #include <dirent.h> | ||
36 | |||
37 | #include <sched.h> | ||
38 | #include <stdio.h> | ||
39 | #include <stdlib.h> | ||
40 | #include <string.h> | ||
41 | #include <limits.h> | ||
42 | #include <ctype.h> | ||
43 | #include <sys/types.h> | ||
44 | #include <regex.h> | ||
45 | |||
46 | #ifdef IS_BGQ_MACHINE | ||
47 | static void parse_mask_from_file(const char *filename, cpu_set_t *mask) | ||
48 | __attribute__((unused)); | ||
49 | static int parse_hwloc(void) __attribute__((unused)); | ||
50 | static void parse_system_files(void) __attribute__((unused)); | ||
51 | #endif | ||
52 | |||
53 | |||
54 | /*********************************************************************************/ | ||
55 | /* mu_cpuset_t: custom cpuset type for mask utils */ | ||
56 | /*********************************************************************************/ | ||
57 | |||
58 | /* Initial values to accomodate up to CPU_SETSIZE CPUs. | ||
59 | * Later, they are reduced according to the machine specification */ | ||
60 | static unsigned int mu_cpuset_setsize = CPU_SETSIZE; | ||
61 | static size_t mu_cpuset_alloc_size = CPU_ALLOC_SIZE(CPU_SETSIZE); | ||
62 | static size_t mu_cpuset_num_ulongs = CPU_ALLOC_SIZE(CPU_SETSIZE) / sizeof(unsigned long); | ||
63 | |||
64 | |||
65 | 844 | static inline void mu_cpuset_from_glibc_sched_affinity(mu_cpuset_t *mu_cpuset, | |
66 | const cpu_set_t *cpu_set) { | ||
67 | 3376 | *mu_cpuset = (const mu_cpuset_t) { | |
68 | 844 | .set = CPU_ALLOC(mu_cpuset_setsize), | |
69 | .alloc_size = mu_cpuset_alloc_size, | ||
70 | 844 | .count = CPU_COUNT_S(mu_cpuset_alloc_size, cpu_set), | |
71 | 844 | .first_cpuid = mu_get_first_cpu(cpu_set), | |
72 | 844 | .last_cpuid = mu_get_last_cpu(cpu_set), | |
73 | }; | ||
74 | 844 | memcpy(mu_cpuset->set, cpu_set, mu_cpuset_alloc_size); | |
75 | 844 | } | |
76 | |||
77 | #ifdef HWLOC_LIB | ||
78 | 590 | static inline void mu_cpuset_from_hwloc_bitmap(mu_cpuset_t *mu_cpuset, | |
79 | hwloc_const_bitmap_t bitmap, hwloc_topology_t topology) { | ||
80 | 590 | *mu_cpuset = (const mu_cpuset_t) { | |
81 | 590 | .set = CPU_ALLOC(mu_cpuset_setsize), | |
82 | .alloc_size = mu_cpuset_alloc_size, | ||
83 | 590 | .count = hwloc_bitmap_weight(bitmap), | |
84 | 590 | .first_cpuid = hwloc_bitmap_first(bitmap), | |
85 | 590 | .last_cpuid = hwloc_bitmap_last(bitmap), | |
86 | }; | ||
87 | 590 | hwloc_cpuset_to_glibc_sched_affinity(topology, bitmap, mu_cpuset->set, | |
88 | mu_cpuset_alloc_size); | ||
89 | 590 | } | |
90 | #endif | ||
91 | |||
92 | |||
93 | /*********************************************************************************/ | ||
94 | /* Mask utils system info */ | ||
95 | /*********************************************************************************/ | ||
96 | |||
97 | /* mask_utils main structure with system info */ | ||
98 | typedef struct { | ||
99 | unsigned int num_nodes; | ||
100 | unsigned int num_cores; | ||
101 | unsigned int num_cpus; | ||
102 | mu_cpuset_t sys_mask; | ||
103 | mu_cpuset_t* node_masks; | ||
104 | mu_cpuset_t* core_masks_by_coreid; | ||
105 | mu_cpuset_t** core_masks_by_cpuid; | ||
106 | } mu_system_loc_t; | ||
107 | |||
108 | enum { BITS_PER_BYTE = 8 }; | ||
109 | enum { CPUS_PER_ULONG = sizeof(unsigned long) * BITS_PER_BYTE }; | ||
110 | |||
111 | static mu_system_loc_t sys = {0}; | ||
112 | static bool mu_initialized = false; | ||
113 | |||
114 | 67 | static void init_mu_struct(void) { | |
115 | 67 | sys = (const mu_system_loc_t) {}; | |
116 | 67 | } | |
117 | |||
118 | /* This function (re-)initializes 'sys' with the given cpu sets. | ||
119 | * It is used for specific set-ups, fallback, or testing purposes */ | ||
120 | 54 | static void init_system_masks(const cpu_set_t *sys_mask, | |
121 | const cpu_set_t *core_masks, unsigned int num_cores, | ||
122 | const cpu_set_t *node_masks, unsigned int num_nodes) { | ||
123 | |||
124 | /* De-allocate structures if already initialized */ | ||
125 |
2/2✓ Branch 0 taken 47 times.
✓ Branch 1 taken 7 times.
|
54 | if (mu_initialized) { |
126 | 47 | mu_finalize(); | |
127 | } else { | ||
128 | 7 | init_mu_struct(); | |
129 | } | ||
130 | |||
131 | /*** System ***/ | ||
132 | 54 | sys.num_cpus = mu_get_last_cpu(sys_mask) + 1; | |
133 | 54 | mu_cpuset_setsize = sys.num_cpus; | |
134 | 54 | mu_cpuset_num_ulongs = (mu_cpuset_setsize + CPUS_PER_ULONG - 1) / CPUS_PER_ULONG; | |
135 | 54 | mu_cpuset_alloc_size = mu_cpuset_num_ulongs * sizeof(unsigned long); | |
136 | 54 | sys.sys_mask = (const mu_cpuset_t) { | |
137 | 54 | .set = CPU_ALLOC(mu_cpuset_setsize), | |
138 | 54 | .count = CPU_COUNT_S(mu_cpuset_alloc_size, sys_mask), | |
139 | .first_cpuid = 0, | ||
140 | 54 | .last_cpuid = mu_cpuset_setsize - 1, | |
141 | }; | ||
142 | 54 | memcpy(sys.sys_mask.set, sys_mask, mu_cpuset_alloc_size); | |
143 | |||
144 | /*** Cores ***/ | ||
145 | 54 | sys.num_cores = num_cores; | |
146 | 54 | sys.core_masks_by_coreid = malloc(sys.num_cores * sizeof(mu_cpuset_t)); | |
147 | 54 | sys.core_masks_by_cpuid = calloc(sys.num_cpus, sizeof(mu_cpuset_t*)); | |
148 |
2/2✓ Branch 0 taken 776 times.
✓ Branch 1 taken 54 times.
|
830 | for (unsigned int core_id = 0; core_id < sys.num_cores; ++core_id) { |
149 | 776 | mu_cpuset_t *core_cpuset = &sys.core_masks_by_coreid[core_id]; | |
150 | 776 | mu_cpuset_from_glibc_sched_affinity(core_cpuset, &core_masks[core_id]); | |
151 | 776 | for (int cpuid = core_cpuset->first_cpuid; | |
152 |
2/2✓ Branch 0 taken 848 times.
✓ Branch 1 taken 776 times.
|
1624 | cpuid >= 0; |
153 | 848 | cpuid = mu_get_next_cpu(core_cpuset->set, cpuid)) { | |
154 | /* Save reference to another array indexed by cpuid */ | ||
155 | 848 | sys.core_masks_by_cpuid[cpuid] = core_cpuset; | |
156 | } | ||
157 | } | ||
158 | |||
159 | /*** NUMA Nodes ***/ | ||
160 | 54 | sys.num_nodes = num_nodes; | |
161 | 54 | sys.node_masks = malloc(sys.num_nodes * sizeof(mu_cpuset_t)); | |
162 |
2/2✓ Branch 0 taken 58 times.
✓ Branch 1 taken 54 times.
|
112 | for (unsigned int node_id = 0; node_id < sys.num_nodes; ++node_id) { |
163 | 58 | mu_cpuset_from_glibc_sched_affinity(&sys.node_masks[node_id], &node_masks[node_id]); | |
164 | } | ||
165 | |||
166 | 54 | mu_initialized = true; | |
167 | 54 | } | |
168 | |||
169 | |||
170 | /* This function (re-)initializes 'sys' given an overall number of resources. | ||
171 | * It is used for specific set-ups, fallback, or testing purposes */ | ||
172 | 43 | static void init_system(unsigned int num_cpus, unsigned int num_cores, | |
173 | unsigned int num_nodes) { | ||
174 | |||
175 | /*** System ***/ | ||
176 | cpu_set_t sys_mask; | ||
177 | 43 | CPU_ZERO(&sys_mask); | |
178 |
2/2✓ Branch 0 taken 760 times.
✓ Branch 1 taken 43 times.
|
803 | for (cpuid_t cpuid = 0; cpuid < num_cpus; ++cpuid) { |
179 |
1/2✓ Branch 0 taken 760 times.
✗ Branch 1 not taken.
|
760 | CPU_SET(cpuid, &sys_mask); |
180 | } | ||
181 | |||
182 | /*** Cores ***/ | ||
183 | 43 | cpuid_t cpus_per_core = num_cpus / num_cores; | |
184 | 43 | cpu_set_t *core_masks = calloc(num_cores, sizeof(cpu_set_t)); | |
185 |
2/2✓ Branch 0 taken 732 times.
✓ Branch 1 taken 43 times.
|
775 | for (cpuid_t core_id = 0; core_id < num_cores; ++core_id) { |
186 | 732 | for (cpuid_t cpuid = core_id * cpus_per_core; | |
187 |
2/2✓ Branch 0 taken 760 times.
✓ Branch 1 taken 732 times.
|
1492 | cpuid < (core_id+1) * cpus_per_core; ++cpuid) { |
188 |
1/2✓ Branch 0 taken 760 times.
✗ Branch 1 not taken.
|
760 | CPU_SET(cpuid, &core_masks[core_id]); |
189 | } | ||
190 | } | ||
191 | |||
192 | /*** NUMA Nodes ***/ | ||
193 | 43 | cpuid_t cpus_per_node = num_cpus / num_nodes; | |
194 | 43 | cpu_set_t *node_masks = calloc(num_nodes, sizeof(cpu_set_t)); | |
195 |
2/2✓ Branch 0 taken 46 times.
✓ Branch 1 taken 43 times.
|
89 | for (cpuid_t node_id = 0; node_id < num_nodes; ++node_id) { |
196 | 46 | for (cpuid_t cpuid = node_id * cpus_per_node; | |
197 |
2/2✓ Branch 0 taken 760 times.
✓ Branch 1 taken 46 times.
|
806 | cpuid < (node_id+1) * cpus_per_node; ++cpuid) { |
198 |
1/2✓ Branch 0 taken 760 times.
✗ Branch 1 not taken.
|
760 | CPU_SET(cpuid, &node_masks[node_id]); |
199 | } | ||
200 | } | ||
201 | |||
202 | 43 | init_system_masks(&sys_mask, core_masks, num_cores, node_masks, num_nodes); | |
203 | 43 | free(core_masks); | |
204 | 43 | free(node_masks); | |
205 | 43 | } | |
206 | |||
207 | 59 | static int parse_hwloc(void) { | |
208 | #ifdef HWLOC_LIB | ||
209 | /* Check runtime library compatibility */ | ||
210 | 59 | unsigned int hwloc_version = hwloc_get_api_version(); | |
211 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 59 times.
|
59 | if (hwloc_version >> 16 != HWLOC_API_VERSION >> 16) { |
212 | ✗ | warning("Detected incompatible HWLOC runtime library"); | |
213 | ✗ | return -1; | |
214 | } | ||
215 | |||
216 | hwloc_topology_t topology; | ||
217 | 59 | hwloc_topology_init(&topology); | |
218 | 59 | hwloc_topology_load(topology); | |
219 | |||
220 | /*** System ***/ | ||
221 | 59 | hwloc_obj_t machine = hwloc_get_obj_by_type(topology, HWLOC_OBJ_MACHINE, 0); | |
222 | 59 | sys.num_cpus = hwloc_bitmap_last(machine->cpuset) + 1; | |
223 | 59 | mu_cpuset_setsize = sys.num_cpus; | |
224 | #if HWLOC_API_VERSION >= 0x00020100 | ||
225 | 59 | mu_cpuset_num_ulongs = hwloc_bitmap_nr_ulongs(machine->cpuset); | |
226 | #else | ||
227 | mu_cpuset_num_ulongs = (mu_cpuset_setsize + CPUS_PER_ULONG - 1) / CPUS_PER_ULONG; | ||
228 | #endif | ||
229 | 59 | mu_cpuset_alloc_size = mu_cpuset_num_ulongs * sizeof(unsigned long); | |
230 | 59 | mu_cpuset_from_hwloc_bitmap(&sys.sys_mask, machine->cpuset, topology); | |
231 | |||
232 | /*** Cores ***/ | ||
233 | 59 | hwloc_obj_type_t core = HWLOC_OBJ_CORE; | |
234 | 59 | sys.num_cores = hwloc_get_nbobjs_by_type(topology, core); | |
235 | 59 | sys.core_masks_by_coreid = calloc(sys.num_cores, sizeof(mu_cpuset_t)); | |
236 | 59 | sys.core_masks_by_cpuid = calloc(sys.num_cpus, sizeof(mu_cpuset_t*)); | |
237 | 59 | unsigned int num_valid_cores = 0; | |
238 |
2/2✓ Branch 0 taken 472 times.
✓ Branch 1 taken 59 times.
|
531 | for (unsigned int core_id = 0; core_id < sys.num_cores; ++core_id) { |
239 | 472 | hwloc_obj_t obj = hwloc_get_obj_by_type(topology, core, core_id); | |
240 |
1/2✓ Branch 0 taken 472 times.
✗ Branch 1 not taken.
|
472 | if (!hwloc_bitmap_iszero(obj->cpuset)) { |
241 | 472 | ++num_valid_cores; | |
242 | 472 | mu_cpuset_t *core_cpuset = &sys.core_masks_by_coreid[core_id]; | |
243 | 472 | mu_cpuset_from_hwloc_bitmap(core_cpuset, obj->cpuset, topology); | |
244 | 472 | for (int cpuid = core_cpuset->first_cpuid; | |
245 |
2/2✓ Branch 0 taken 472 times.
✓ Branch 1 taken 472 times.
|
944 | cpuid >= 0; |
246 | 472 | cpuid = hwloc_bitmap_next(obj->cpuset, cpuid)) { | |
247 | /* Save reference to another array indexed by cpuid */ | ||
248 | 472 | sys.core_masks_by_cpuid[cpuid] = core_cpuset; | |
249 | } | ||
250 | } | ||
251 | } | ||
252 | |||
253 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 59 times.
|
59 | fatal_cond(!num_valid_cores, "HWLOC could not find Core affinity masks"); |
254 | |||
255 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 59 times.
|
59 | if (sys.num_cores != num_valid_cores) { |
256 | ✗ | verbose(VB_AFFINITY, "HWLOC found %d cores but only %d with a valid mask", | |
257 | sys.num_cores, num_valid_cores); | ||
258 | } | ||
259 | |||
260 | /*** NUMA Nodes ***/ | ||
261 | 59 | hwloc_obj_type_t node = HWLOC_OBJ_NODE; | |
262 | 59 | sys.num_nodes = hwloc_get_nbobjs_by_type(topology, node); | |
263 | 59 | sys.node_masks = calloc(sys.num_cores, sizeof(mu_cpuset_t)); | |
264 | 59 | unsigned int num_valid_nodes = 0; | |
265 |
2/2✓ Branch 0 taken 59 times.
✓ Branch 1 taken 59 times.
|
118 | for (unsigned int node_id = 0; node_id < sys.num_nodes; ++node_id) { |
266 | 59 | hwloc_obj_t obj = hwloc_get_obj_by_type(topology, node, node_id); | |
267 |
1/2✓ Branch 0 taken 59 times.
✗ Branch 1 not taken.
|
59 | if (!hwloc_bitmap_iszero(obj->cpuset)) { |
268 | 59 | ++num_valid_nodes; | |
269 | 59 | mu_cpuset_from_hwloc_bitmap(&sys.node_masks[node_id], | |
270 | 59 | obj->cpuset, topology); | |
271 | } | ||
272 | } | ||
273 | |||
274 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 59 times.
|
59 | fatal_cond(!num_valid_nodes, "HWLOC could not find Node affinity masks"); |
275 | |||
276 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 59 times.
|
59 | if (sys.num_nodes != num_valid_nodes) { |
277 | ✗ | verbose(VB_AFFINITY, "HWLOC found %d nodes but only %d with a valid mask", | |
278 | sys.num_nodes, num_valid_nodes); | ||
279 | } | ||
280 | |||
281 | 59 | hwloc_topology_destroy(topology); | |
282 | |||
283 | 59 | return 0; | |
284 | #else | ||
285 | return -1; | ||
286 | #endif | ||
287 | } | ||
288 | |||
289 | 10 | static void parse_mask_from_file(const char *filename, cpu_set_t *mask) { | |
290 |
1/2✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
|
10 | if (access(filename, F_OK) == 0) { |
291 | enum { BUF_LEN = CPU_SETSIZE*7 }; | ||
292 | char buf[BUF_LEN]; | ||
293 | 10 | FILE *fd = fopen(filename, "r"); | |
294 | |||
295 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 10 times.
|
10 | if (!fgets(buf, BUF_LEN, fd)) { |
296 | ✗ | fatal("cannot read %s\n", filename); | |
297 | } | ||
298 | 10 | fclose(fd); | |
299 | |||
300 | 10 | size_t len = strlen(buf); | |
301 |
1/2✓ Branch 0 taken 10 times.
✗ Branch 1 not taken.
|
10 | if (buf[len - 1] == '\n') |
302 | 10 | buf[len - 1] = '\0'; | |
303 | |||
304 | 10 | mu_parse_mask(buf, mask); | |
305 | } | ||
306 | 10 | } | |
307 | |||
308 | 8 | static int parse_int_from_file(const char *filename) { | |
309 | 8 | int value = -1; | |
310 |
1/2✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
|
8 | if (access(filename, F_OK) == 0) { |
311 | enum { BUF_LEN = 16 }; | ||
312 | char buf[BUF_LEN]; | ||
313 | 8 | FILE *fd = fopen(filename, "r"); | |
314 | |||
315 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 8 times.
|
8 | if (!fgets(buf, BUF_LEN, fd)) { |
316 | ✗ | fatal("cannot read %s\n", filename); | |
317 | } | ||
318 | 8 | fclose(fd); | |
319 | |||
320 | 8 | value = strtol(buf, NULL, 10); | |
321 | } | ||
322 | 8 | return value; | |
323 | } | ||
324 | |||
325 | #define PATH_SYSTEM_MASK "/sys/devices/system/cpu/present" | ||
326 | #define PATH_SYSTEM_CPUS "/sys/devices/system/cpu" | ||
327 | #define PATH_SYSTEM_NODE "/sys/devices/system/node" | ||
328 | 1 | static void parse_system_files(void) { | |
329 | /*** System ***/ | ||
330 | cpu_set_t system_mask; | ||
331 | 1 | parse_mask_from_file(PATH_SYSTEM_MASK, &system_mask); | |
332 | 1 | sys.num_cpus = mu_get_last_cpu(&system_mask) + 1; | |
333 | 1 | mu_cpuset_setsize = sys.num_cpus; | |
334 | 1 | mu_cpuset_num_ulongs = (mu_cpuset_setsize + CPUS_PER_ULONG - 1) / CPUS_PER_ULONG; | |
335 | 1 | mu_cpuset_alloc_size = mu_cpuset_num_ulongs * sizeof(unsigned long); | |
336 | 1 | mu_cpuset_from_glibc_sched_affinity(&sys.sys_mask, &system_mask); | |
337 | |||
338 | /*** Cores ***/ | ||
339 | // note that we are probably overallocating because we don't knwow the | ||
340 | // number of cores yet | ||
341 | 1 | sys.core_masks_by_coreid = calloc(sys.num_cpus, sizeof(mu_cpuset_t)); | |
342 | 1 | sys.core_masks_by_cpuid = calloc(sys.num_cpus, sizeof(mu_cpuset_t*)); | |
343 | 1 | int num_cores = 0; | |
344 | 1 | DIR *dir = opendir(PATH_SYSTEM_CPUS); | |
345 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (dir) { |
346 | struct dirent *d; | ||
347 |
2/2✓ Branch 1 taken 25 times.
✓ Branch 2 taken 1 times.
|
26 | while ((d = readdir(dir))) { |
348 |
3/4✓ Branch 0 taken 25 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 16 times.
✓ Branch 3 taken 9 times.
|
25 | if (d && d->d_type == DT_DIR |
349 |
2/2✓ Branch 0 taken 10 times.
✓ Branch 1 taken 6 times.
|
16 | && strncmp(d->d_name, "cpu", 3) == 0 |
350 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
|
10 | && isdigit(d->d_name[3]) ) { |
351 | |||
352 | /* Get CPU id */ | ||
353 | 8 | int cpu_id = strtol(d->d_name+3, NULL, 10); | |
354 |
2/4✓ Branch 0 taken 8 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 8 times.
|
8 | fatal_cond(cpu_id < 0 || cpu_id > 1024, "Error parsing cpu_id"); |
355 | |||
356 | /* Get core CPUs list */ | ||
357 | cpu_set_t core_mask; | ||
358 | 8 | CPU_ZERO(&core_mask); | |
359 | char filename[64]; | ||
360 | 8 | snprintf(filename, 64, PATH_SYSTEM_CPUS | |
361 | 8 | "/%.8s/topology/thread_siblings_list", d->d_name); | |
362 | 8 | parse_mask_from_file(filename, &core_mask); | |
363 | |||
364 | /* Get core id, in some architectures this value may not be reliable */ | ||
365 | 8 | snprintf(filename, 64, PATH_SYSTEM_CPUS "/%.8s/topology/core_id", | |
366 | 8 | d->d_name); | |
367 | 8 | int core_id = parse_int_from_file(filename); | |
368 | |||
369 | /* Try to respect parsed core_id */ | ||
370 |
2/4✓ Branch 0 taken 8 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 8 times.
✗ Branch 3 not taken.
|
8 | if (core_id >= 0 && core_id < 1024) { |
371 | 8 | mu_cpuset_t *core_cpuset = &sys.core_masks_by_coreid[core_id]; | |
372 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 7 times.
|
8 | if (core_cpuset->set == NULL) { |
373 | /* Save core mask */ | ||
374 | 1 | mu_cpuset_from_glibc_sched_affinity(core_cpuset, &core_mask); | |
375 | 1 | ++num_cores; | |
376 |
1/2✓ Branch 0 taken 7 times.
✗ Branch 1 not taken.
|
7 | } else if (CPU_EQUAL_S(mu_cpuset_alloc_size, core_cpuset->set, &core_mask)) { |
377 | /* Core mask already saved */ | ||
378 | } else { | ||
379 | /* Current core mask differ */ | ||
380 | 7 | core_id = -1; | |
381 | } | ||
382 | } | ||
383 | |||
384 | /* core_id has not been reliable, find an empty spot or same core */ | ||
385 |
4/4✓ Branch 0 taken 42 times.
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 35 times.
✓ Branch 3 taken 7 times.
|
43 | for (unsigned int i = 0; i < sys.num_cpus && core_id == -1; ++i) { |
386 | 35 | mu_cpuset_t *core_cpuset = &sys.core_masks_by_coreid[i]; | |
387 |
2/2✓ Branch 0 taken 7 times.
✓ Branch 1 taken 28 times.
|
35 | if (core_cpuset->set == NULL) { |
388 | /* Save core mask */ | ||
389 | 7 | mu_cpuset_from_glibc_sched_affinity(core_cpuset, &core_mask); | |
390 | 7 | ++num_cores; | |
391 | 7 | core_id = i; | |
392 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 28 times.
|
28 | } else if (CPU_EQUAL_S(mu_cpuset_alloc_size, core_cpuset->set, &core_mask)) { |
393 | /* Core mask already saved */ | ||
394 | ✗ | core_id = i; | |
395 | } | ||
396 | } | ||
397 | |||
398 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
|
8 | fatal_cond(core_id == -1, "Could not obtain core id for CPU %d", cpu_id); |
399 | |||
400 | /* Add core mask reference to array indexed by CPU id */ | ||
401 | 8 | sys.core_masks_by_cpuid[cpu_id] = &sys.core_masks_by_coreid[core_id]; | |
402 | } | ||
403 | } | ||
404 | 1 | closedir(dir); | |
405 | } | ||
406 | 1 | sys.num_cores = num_cores; | |
407 | |||
408 | /*** NUMA Nodes ***/ | ||
409 | 1 | int num_nodes = 0; | |
410 | 1 | dir = opendir(PATH_SYSTEM_NODE); | |
411 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (dir) { |
412 | struct dirent *d; | ||
413 |
2/2✓ Branch 1 taken 11 times.
✓ Branch 2 taken 1 times.
|
12 | while ((d = readdir(dir))) { |
414 |
3/4✓ Branch 0 taken 11 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 4 times.
✓ Branch 3 taken 7 times.
|
11 | if (d && d->d_type == DT_DIR |
415 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 3 times.
|
4 | && strncmp(d->d_name, "node", 4) == 0 |
416 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | && isdigit(d->d_name[4]) ) { |
417 | |||
418 | /* Get node id */ | ||
419 | 1 | int node_id = strtol(d->d_name+4, NULL, 10); | |
420 |
2/4✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 1 times.
|
1 | fatal_cond(node_id < 0 || node_id > 1024, "Error parsing node_id"); |
421 | |||
422 | /* Get node CPUs list */ | ||
423 | cpu_set_t node_mask; | ||
424 | 1 | CPU_ZERO(&node_mask); | |
425 | char filename[64]; | ||
426 | 1 | snprintf(filename, 64, PATH_SYSTEM_NODE "/%.10s/cpulist", d->d_name); | |
427 | 1 | parse_mask_from_file(filename, &node_mask); | |
428 | |||
429 | /* Save node mask */ | ||
430 |
1/2✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
|
1 | if (CPU_COUNT(&node_mask) > 0) { |
431 | 1 | num_nodes = max_int(num_nodes, node_id + 1); | |
432 | 1 | mu_cpuset_t *p = realloc(sys.node_masks, num_nodes*sizeof(mu_cpuset_t)); | |
433 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | fatal_cond(!p, "realloc failed"); |
434 | 1 | sys.node_masks = p; | |
435 | 1 | mu_cpuset_from_glibc_sched_affinity(&sys.node_masks[node_id], &node_mask); | |
436 | } | ||
437 | } | ||
438 | } | ||
439 | 1 | closedir(dir); | |
440 | } | ||
441 | 1 | sys.num_nodes = num_nodes; | |
442 | |||
443 | /* Fallback if some info could not be parsed */ | ||
444 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (sys.sys_mask.count == 0) { |
445 | ✗ | int nproc_onln = sysconf(_SC_NPROCESSORS_ONLN); | |
446 | ✗ | fatal_cond(nproc_onln <= 0, "Cannot obtain system size. Contact us at " | |
447 | PACKAGE_BUGREPORT " or configure DLB with HWLOC support."); | ||
448 | ✗ | init_system(nproc_onln, nproc_onln, 1); | |
449 | } | ||
450 | 1 | } | |
451 | |||
452 | 114 | static void print_sys_info(void) { | |
453 | |||
454 |
2/2✓ Branch 0 taken 50 times.
✓ Branch 1 taken 64 times.
|
114 | verbose(VB_AFFINITY, "System mask: %s", mu_to_str(sys.sys_mask.set)); |
455 | |||
456 |
2/2✓ Branch 0 taken 118 times.
✓ Branch 1 taken 114 times.
|
232 | for (unsigned int node_id = 0; node_id < sys.num_nodes; ++node_id) { |
457 |
2/2✓ Branch 0 taken 13 times.
✓ Branch 1 taken 105 times.
|
118 | verbose(VB_AFFINITY, "Node %d mask: %s", |
458 | node_id, mu_to_str(sys.node_masks[node_id].set)); | ||
459 | } | ||
460 | |||
461 |
2/2✓ Branch 0 taken 1256 times.
✓ Branch 1 taken 114 times.
|
1370 | for (unsigned int core_id = 0; core_id < sys.num_cores; ++core_id) { |
462 |
2/2✓ Branch 0 taken 190 times.
✓ Branch 1 taken 1066 times.
|
1256 | verbose(VB_AFFINITY, "Core %d mask: %s", |
463 | core_id, mu_to_str(sys.core_masks_by_coreid[core_id].set)); | ||
464 | } | ||
465 | |||
466 | 114 | for (int cpuid = sys.sys_mask.first_cpuid; | |
467 |
2/2✓ Branch 0 taken 1330 times.
✓ Branch 1 taken 114 times.
|
1444 | cpuid >= 0; |
468 | 1330 | cpuid = mu_get_next_cpu(sys.sys_mask.set, cpuid)) { | |
469 | 1330 | const mu_cpuset_t *core_cpuset = sys.core_masks_by_cpuid[cpuid]; | |
470 |
3/4✓ Branch 0 taken 1328 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 1328 times.
✗ Branch 3 not taken.
|
1330 | if (core_cpuset && core_cpuset->set) { |
471 |
2/2✓ Branch 0 taken 214 times.
✓ Branch 1 taken 1114 times.
|
1328 | verbose(VB_AFFINITY, "CPU %d core mask: %s", |
472 | cpuid, mu_to_str(core_cpuset->set)); | ||
473 | } | ||
474 | } | ||
475 | 114 | } | |
476 | |||
477 | |||
478 | /*********************************************************************************/ | ||
479 | /* Mask utils public functions */ | ||
480 | /*********************************************************************************/ | ||
481 | |||
482 | 133 | void mu_init( void ) { | |
483 |
2/2✓ Branch 0 taken 59 times.
✓ Branch 1 taken 74 times.
|
133 | if ( !mu_initialized ) { |
484 | 59 | init_mu_struct(); | |
485 | |||
486 | #if defined IS_BGQ_MACHINE | ||
487 | enum { BGQ_NUM_CPUS = 64 }; | ||
488 | enum { BGQ_NUM_CORES = 16 }; | ||
489 | enum { BGQ_NUM_NODES = 1 }; | ||
490 | init_system(BGQ_NUM_CPUS, BGQ_NUM_CORES, BGQ_NUM_NODES); | ||
491 | #else | ||
492 | /* Try to parse HW info from HWLOC first */ | ||
493 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 59 times.
|
59 | if (parse_hwloc() != 0) { |
494 | /* Fallback to system files if needed */ | ||
495 | ✗ | parse_system_files(); | |
496 | } | ||
497 | |||
498 | 59 | mu_initialized = true; | |
499 | #endif | ||
500 | 59 | print_sys_info(); | |
501 | } | ||
502 | 133 | } | |
503 | |||
504 | /* This function used to be declared as destructor but it may be dangerous | ||
505 | * with the OpenMP / DLB finalization at destruction time. */ | ||
506 | 55 | void mu_finalize( void ) { | |
507 | |||
508 | 55 | CPU_FREE(sys.sys_mask.set); | |
509 | |||
510 | /* Nodes */ | ||
511 |
2/2✓ Branch 0 taken 58 times.
✓ Branch 1 taken 55 times.
|
113 | for (unsigned int i = 0; i < sys.num_nodes; ++i) { |
512 | 58 | CPU_FREE(sys.node_masks[i].set); | |
513 | } | ||
514 | |||
515 | /* Cores per core id */ | ||
516 |
2/2✓ Branch 0 taken 558 times.
✓ Branch 1 taken 55 times.
|
613 | for (unsigned int i = 0; i < sys.num_cores; ++i) { |
517 | 558 | CPU_FREE(sys.core_masks_by_coreid[i].set); | |
518 | } | ||
519 | |||
520 | 55 | sys = (const mu_system_loc_t) {}; | |
521 | 55 | mu_initialized = false; | |
522 | 55 | mu_cpuset_setsize = CPU_SETSIZE; | |
523 | 55 | mu_cpuset_alloc_size = CPU_ALLOC_SIZE(CPU_SETSIZE); | |
524 | 55 | mu_cpuset_num_ulongs = CPU_ALLOC_SIZE(CPU_SETSIZE) / sizeof(unsigned long); | |
525 | 55 | } | |
526 | |||
527 | 8012 | int mu_get_system_size( void ) { | |
528 |
2/2✓ Branch 0 taken 24 times.
✓ Branch 1 taken 7988 times.
|
8012 | if (unlikely(!mu_initialized)) mu_init(); |
529 | 8012 | return sys.sys_mask.last_cpuid + 1; | |
530 | } | ||
531 | |||
532 | 98 | void mu_get_system_mask(cpu_set_t *mask) { | |
533 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 98 times.
|
98 | if (unlikely(!mu_initialized)) mu_init(); |
534 | 98 | CPU_ZERO(mask); | |
535 | 98 | memcpy(mask, sys.sys_mask.set, mu_cpuset_alloc_size); | |
536 | 98 | } | |
537 | |||
538 | 6 | int mu_get_system_hwthreads_per_core(void) { | |
539 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | if (unlikely(!mu_initialized)) mu_init(); |
540 | 6 | return sys.core_masks_by_coreid[0].count; | |
541 | } | ||
542 | |||
543 | 51 | bool mu_system_has_smt(void) { | |
544 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 51 times.
|
51 | if (unlikely(!mu_initialized)) mu_init(); |
545 | 51 | return sys.core_masks_by_coreid[0].count > 1; | |
546 | } | ||
547 | |||
548 | 1 | int mu_get_num_cores(void) { | |
549 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (unlikely(!mu_initialized)) mu_init(); |
550 | 1 | return sys.num_cores; | |
551 | } | ||
552 | |||
553 | 9868 | int mu_get_core_id(int cpuid) { | |
554 | |||
555 |
3/4✓ Branch 0 taken 9852 times.
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 9852 times.
|
9868 | if (cpuid < 0 || (unsigned)cpuid > sys.num_cpus) return -1; |
556 | |||
557 |
2/2✓ Branch 0 taken 92120 times.
✓ Branch 1 taken 2 times.
|
92122 | for (unsigned int core_id = 0; core_id < sys.num_cores; ++core_id) { |
558 |
5/6✓ Branch 0 taken 92120 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 9850 times.
✓ Branch 3 taken 82270 times.
✓ Branch 4 taken 9850 times.
✓ Branch 5 taken 82270 times.
|
92120 | if (CPU_ISSET_S(cpuid, mu_cpuset_alloc_size, |
559 | sys.core_masks_by_coreid[core_id].set)) { | ||
560 | 9850 | return core_id; | |
561 | } | ||
562 | } | ||
563 | |||
564 | 2 | return -1; | |
565 | } | ||
566 | |||
567 | 247 | const mu_cpuset_t* mu_get_core_mask(int cpuid) { | |
568 | |||
569 |
3/4✓ Branch 0 taken 247 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 246 times.
|
247 | if (cpuid < 0 || (unsigned)cpuid > sys.num_cpus) return NULL; |
570 | |||
571 | 246 | return sys.core_masks_by_cpuid[cpuid]; | |
572 | } | ||
573 | |||
574 | 25 | const mu_cpuset_t* mu_get_core_mask_by_coreid(int core_id) { | |
575 | |||
576 |
2/4✓ Branch 0 taken 25 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 25 times.
|
25 | if (core_id < 0 || (unsigned)core_id > sys.num_cores) return NULL; |
577 | |||
578 | 25 | return &sys.core_masks_by_coreid[core_id]; | |
579 | } | ||
580 | |||
581 | /* Return Mask of full NUMA nodes covering at least 1 CPU of cpuset: | ||
582 | * e.g.: | ||
583 | * node0: [0-3] | ||
584 | * node1: [4-7] | ||
585 | * cpuset: [1-7] | ||
586 | * returns [0-7] | ||
587 | */ | ||
588 | 25 | void mu_get_nodes_intersecting_with_cpuset(cpu_set_t *node_set, const cpu_set_t *cpuset) { | |
589 | |||
590 | 25 | CPU_ZERO(node_set); | |
591 |
2/2✓ Branch 0 taken 27 times.
✓ Branch 1 taken 25 times.
|
52 | for (unsigned int i=0; i<sys.num_nodes; ++i) { |
592 | cpu_set_t intxn; | ||
593 |
2/2✓ Branch 0 taken 27 times.
✓ Branch 1 taken 27 times.
|
54 | CPU_AND_S(mu_cpuset_alloc_size, &intxn, sys.node_masks[i].set, cpuset); |
594 |
2/2✓ Branch 1 taken 25 times.
✓ Branch 2 taken 2 times.
|
27 | if (CPU_COUNT_S(mu_cpuset_alloc_size, &intxn) > 0) { |
595 |
2/2✓ Branch 0 taken 25 times.
✓ Branch 1 taken 25 times.
|
50 | CPU_OR_S(mu_cpuset_alloc_size, node_set, node_set, sys.node_masks[i].set); |
596 | } | ||
597 | } | ||
598 | 25 | } | |
599 | |||
600 | /* Return Mask of full NUMA nodes containing all CPUs in cpuset: | ||
601 | * e.g.: | ||
602 | * node0: [0-3] | ||
603 | * node1: [4-7] | ||
604 | * cpuset: [1-7] | ||
605 | * returns [4-7] | ||
606 | */ | ||
607 | 2 | void mu_get_nodes_subset_of_cpuset(cpu_set_t *node_set, const cpu_set_t *cpuset) { | |
608 | |||
609 | 2 | CPU_ZERO(node_set); | |
610 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 2 times.
|
6 | for (unsigned int i=0; i<sys.num_nodes; ++i) { |
611 |
2/2✓ Branch 1 taken 2 times.
✓ Branch 2 taken 2 times.
|
4 | if (mu_is_subset(sys.node_masks[i].set, cpuset)) { |
612 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 2 times.
|
4 | CPU_OR_S(mu_cpuset_alloc_size, node_set, node_set, sys.node_masks[i].set); |
613 | } | ||
614 | } | ||
615 | 2 | } | |
616 | |||
617 | /* Return Mask of cores covering at least 1 CPU of cpuset: | ||
618 | * e.g.: | ||
619 | * node0: [0-1] | ||
620 | * node1: [2-3] | ||
621 | * cpuset: [1-3] | ||
622 | * returns [0-3] | ||
623 | */ | ||
624 | 2 | void mu_get_cores_intersecting_with_cpuset(cpu_set_t *core_set, const cpu_set_t *cpuset) { | |
625 | 2 | CPU_ZERO(core_set); | |
626 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
|
10 | for (unsigned int core_id = 0; core_id < sys.num_cores; ++core_id) { |
627 | 8 | const mu_cpuset_t *core_cpuset = &sys.core_masks_by_coreid[core_id]; | |
628 | cpu_set_t intxn; | ||
629 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
|
16 | CPU_AND_S(mu_cpuset_alloc_size, &intxn, core_cpuset->set, cpuset); |
630 |
2/2✓ Branch 1 taken 4 times.
✓ Branch 2 taken 4 times.
|
8 | if (CPU_COUNT_S(mu_cpuset_alloc_size, &intxn) > 0) { |
631 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
|
8 | CPU_OR_S(mu_cpuset_alloc_size, core_set, core_set, core_cpuset->set); |
632 | } | ||
633 | } | ||
634 | 2 | } | |
635 | |||
636 | /* Return Mask of cores containing all CPUs in cpuset: | ||
637 | * e.g.: | ||
638 | * core0: [0-1] | ||
639 | * core1: [2-3] | ||
640 | * cpuset: [1-3] | ||
641 | * returns [2-3] | ||
642 | */ | ||
643 | 5 | void mu_get_cores_subset_of_cpuset(cpu_set_t *core_set, const cpu_set_t *cpuset) { | |
644 | 5 | CPU_ZERO(core_set); | |
645 |
2/2✓ Branch 0 taken 20 times.
✓ Branch 1 taken 5 times.
|
25 | for (unsigned int core_id = 0; core_id < sys.num_cores; ++core_id) { |
646 | 20 | const mu_cpuset_t *core_cpuset = &sys.core_masks_by_coreid[core_id]; | |
647 |
2/2✓ Branch 1 taken 9 times.
✓ Branch 2 taken 11 times.
|
20 | if (mu_is_subset(core_cpuset->set, cpuset)) { |
648 |
2/2✓ Branch 0 taken 9 times.
✓ Branch 1 taken 9 times.
|
18 | CPU_OR_S(mu_cpuset_alloc_size, core_set, core_set, core_cpuset->set); |
649 | } | ||
650 | } | ||
651 | 5 | } | |
652 | |||
653 | /* Return the next enabled CPU in mask which pertains to the next core after | ||
654 | * prev_cpu, or -1 if not found. */ | ||
655 | 42 | int mu_get_cpu_next_core(const cpu_set_t *mask, int prev_cpu) { | |
656 | |||
657 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 41 times.
|
42 | if (unlikely(prev_cpu < -1)) return -1; |
658 | |||
659 | 41 | int prev_core = mu_get_core_id(prev_cpu); | |
660 | 41 | int next_cpu = mu_get_next_cpu(mask, prev_cpu); | |
661 | 41 | int next_core = mu_get_core_id(next_cpu); | |
662 | |||
663 | 41 | while (next_cpu != -1 | |
664 |
4/4✓ Branch 0 taken 56 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 30 times.
✓ Branch 3 taken 26 times.
|
71 | && next_core <= prev_core) { |
665 | 30 | next_cpu = mu_get_next_cpu(mask, next_cpu); | |
666 | 30 | next_core = mu_get_core_id(next_cpu); | |
667 | } | ||
668 | |||
669 | 41 | return next_cpu; | |
670 | } | ||
671 | |||
672 | /* We define as "complete" those cores that all the CPUs defined by | ||
673 | * sys.core_masks_by_coreid are enabled. */ | ||
674 | |||
675 | /* Return the number of complete cores in the mask. | ||
676 | * e.g.: | ||
677 | * node0: [0-1] | ||
678 | * node1: [2-3] | ||
679 | * node2: [4-5] | ||
680 | * cpuset: [0-4] | ||
681 | * returns 2 | ||
682 | */ | ||
683 | 26 | int mu_count_cores(const cpu_set_t *mask) { | |
684 | |||
685 | 26 | int cores_count = 0; | |
686 | |||
687 |
2/2✓ Branch 0 taken 332 times.
✓ Branch 1 taken 26 times.
|
358 | for (unsigned int coreid = 0; coreid < sys.num_cores; coreid++) { |
688 | // Check if we have the complete set of CPUs form the core | ||
689 |
2/2✓ Branch 1 taken 154 times.
✓ Branch 2 taken 178 times.
|
332 | if (mu_is_subset(sys.core_masks_by_coreid[coreid].set, mask)) { |
690 | 154 | cores_count++; | |
691 | } | ||
692 | } | ||
693 | |||
694 | 26 | return cores_count; | |
695 | } | ||
696 | |||
697 | /* Return the id of the last complete core in the mask if any, otherwise return 1. | ||
698 | * e.g.: | ||
699 | * core0: [0-1] | ||
700 | * core1: [2-3] | ||
701 | * core2: [4-5] | ||
702 | * cpuset: [0-3] | ||
703 | * returns 1 (node1) | ||
704 | */ | ||
705 | 24 | int mu_get_last_coreid(const cpu_set_t *mask){ | |
706 |
2/2✓ Branch 0 taken 168 times.
✓ Branch 1 taken 2 times.
|
170 | for (int coreid = sys.num_cores-1; coreid >= 0 ; coreid--) { |
707 | // Check if we have the complete set of CPUs form the core | ||
708 |
2/2✓ Branch 1 taken 22 times.
✓ Branch 2 taken 146 times.
|
168 | if (mu_is_subset(sys.core_masks_by_coreid[coreid].set, mask)) { |
709 | 22 | return coreid; | |
710 | } | ||
711 | } | ||
712 | |||
713 | 2 | return -1; | |
714 | } | ||
715 | |||
716 | /* Disables the CPUs of the last complete core in the mask and returns its | ||
717 | * coreid if any, otherwise return -1. | ||
718 | * e.g.: | ||
719 | * core0: [0-1] | ||
720 | * core1: [2-3] | ||
721 | * core2: [4-5] | ||
722 | * cpuset: [2-5] | ||
723 | * returns 2 (node2) | ||
724 | * updated cpuset: [2-3] | ||
725 | */ | ||
726 | 22 | int mu_take_last_coreid(cpu_set_t *mask) { | |
727 | 22 | int last_coreid = mu_get_last_coreid(mask); | |
728 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 20 times.
|
22 | if (last_coreid == -1) return -1; |
729 | 20 | mu_xor(mask, mask, sys.core_masks_by_coreid[last_coreid].set); | |
730 | 20 | return last_coreid; | |
731 | } | ||
732 | |||
733 | /* Enables all the CPUs of the core | ||
734 | * e.g.: | ||
735 | * core0: [0-1] | ||
736 | * core1: [2-3] | ||
737 | * core2: [4-5] | ||
738 | * cpuset: [] | ||
739 | * coreid: 1 | ||
740 | * updated cpuset: [2-3] | ||
741 | */ | ||
742 | 2 | void mu_set_core(cpu_set_t *mask, int coreid){ | |
743 | 2 | mu_or(mask, mask, sys.core_masks_by_coreid[coreid].set); | |
744 | 2 | } | |
745 | |||
746 | /* Disables all the CPUs of the core | ||
747 | * e.g.: | ||
748 | * core0: [0-1] | ||
749 | * core1: [2-3] | ||
750 | * core2: [4-5] | ||
751 | * cpuset: [0-5] | ||
752 | * coreid: 1 | ||
753 | * updated cpuset: [0-1,4-5] | ||
754 | */ | ||
755 | 2 | void mu_unset_core(cpu_set_t *mask, int coreid){ | |
756 | 2 | mu_substract(mask, mask, sys.core_masks_by_coreid[coreid].set); | |
757 | 2 | } | |
758 | |||
759 | /* Basic mask utils functions that do not need to read system's topology, | ||
760 | * i.e., mostly mask operations */ | ||
761 | |||
762 | 2 | void mu_zero(cpu_set_t *result) { | |
763 | 2 | CPU_ZERO_S(mu_cpuset_alloc_size, result); | |
764 | 2 | } | |
765 | |||
766 | 5 | void mu_and(cpu_set_t *result, const cpu_set_t *mask1, const cpu_set_t *mask2) { | |
767 |
2/2✓ Branch 0 taken 20 times.
✓ Branch 1 taken 5 times.
|
25 | CPU_AND_S(mu_cpuset_alloc_size, result, mask1, mask2); |
768 | 5 | } | |
769 | |||
770 | 16 | void mu_or(cpu_set_t *result, const cpu_set_t *mask1, const cpu_set_t *mask2) { | |
771 |
2/2✓ Branch 0 taken 31 times.
✓ Branch 1 taken 16 times.
|
47 | CPU_OR_S(mu_cpuset_alloc_size, result, mask1, mask2); |
772 | 16 | } | |
773 | |||
774 | 20 | void mu_xor (cpu_set_t *result, const cpu_set_t *mask1, const cpu_set_t *mask2) { | |
775 |
2/2✓ Branch 0 taken 20 times.
✓ Branch 1 taken 20 times.
|
40 | CPU_XOR_S(mu_cpuset_alloc_size, result, mask1, mask2); |
776 | 20 | } | |
777 | |||
778 | 4 | bool mu_equal(const cpu_set_t *mask1, const cpu_set_t *mask2) { | |
779 | 4 | return CPU_EQUAL_S(mu_cpuset_alloc_size, mask1, mask2) != 0; | |
780 | } | ||
781 | |||
782 | /* Returns true is all bits in subset are set in superset */ | ||
783 | 628 | bool mu_is_subset(const cpu_set_t *subset, const cpu_set_t *superset) { | |
784 | // The condition is true if the intersection is identical to subset | ||
785 | cpu_set_t intxn; | ||
786 |
2/2✓ Branch 0 taken 763 times.
✓ Branch 1 taken 628 times.
|
1391 | CPU_AND_S(mu_cpuset_alloc_size, &intxn, subset, superset); |
787 | 628 | return CPU_EQUAL_S(mu_cpuset_alloc_size, &intxn, subset); | |
788 | } | ||
789 | |||
790 | /* Returns true is all bits in superset are set in subset */ | ||
791 | 9 | bool mu_is_superset(const cpu_set_t *superset, const cpu_set_t *subset) { | |
792 | // The condition is true if the intersection is identical to subset | ||
793 | cpu_set_t intxn; | ||
794 |
2/2✓ Branch 0 taken 144 times.
✓ Branch 1 taken 9 times.
|
153 | CPU_AND_S(mu_cpuset_alloc_size, &intxn, superset, subset); |
795 | 9 | return CPU_EQUAL_S(mu_cpuset_alloc_size, &intxn, subset); | |
796 | } | ||
797 | |||
798 | /* Returns true is all bits in subset are set in superset and they're not equal */ | ||
799 | 16 | bool mu_is_proper_subset(const cpu_set_t *subset, const cpu_set_t *superset) { | |
800 | cpu_set_t intxn; | ||
801 |
2/2✓ Branch 0 taken 151 times.
✓ Branch 1 taken 16 times.
|
167 | CPU_AND_S(mu_cpuset_alloc_size, &intxn, subset, superset); |
802 | 16 | return CPU_EQUAL_S(mu_cpuset_alloc_size, &intxn, subset) | |
803 |
4/4✓ Branch 0 taken 12 times.
✓ Branch 1 taken 4 times.
✓ Branch 2 taken 9 times.
✓ Branch 3 taken 3 times.
|
16 | && !CPU_EQUAL_S(mu_cpuset_alloc_size, subset, superset); |
804 | } | ||
805 | |||
806 | /* Returns true is all bits in superset are set in subset and they're not equal */ | ||
807 | 19 | bool mu_is_proper_superset(const cpu_set_t *superset, const cpu_set_t *subset) { | |
808 | cpu_set_t intxn; | ||
809 |
2/2✓ Branch 0 taken 154 times.
✓ Branch 1 taken 19 times.
|
173 | CPU_AND_S(mu_cpuset_alloc_size, &intxn, superset, subset); |
810 | 19 | return CPU_EQUAL_S(mu_cpuset_alloc_size, &intxn, subset) | |
811 |
4/4✓ Branch 0 taken 9 times.
✓ Branch 1 taken 10 times.
✓ Branch 2 taken 6 times.
✓ Branch 3 taken 3 times.
|
19 | && !CPU_EQUAL_S(mu_cpuset_alloc_size, superset, subset); |
812 | } | ||
813 | |||
814 | /* Return true if any bit is present in both sets */ | ||
815 | 13 | bool mu_intersects(const cpu_set_t *mask1, const cpu_set_t *mask2) { | |
816 | cpu_set_t intxn; | ||
817 |
2/2✓ Branch 0 taken 148 times.
✓ Branch 1 taken 13 times.
|
161 | CPU_AND_S(mu_cpuset_alloc_size, &intxn, mask1, mask2); |
818 | 13 | return CPU_COUNT_S(mu_cpuset_alloc_size, &intxn) > 0; | |
819 | } | ||
820 | |||
821 | /* Return the number of bits set in mask */ | ||
822 | 59 | int mu_count(const cpu_set_t *mask) { | |
823 | 59 | return CPU_COUNT_S(mu_cpuset_alloc_size, mask); | |
824 | } | ||
825 | |||
826 | /* Return the minuend after substracting the bits in substrahend */ | ||
827 | 622 | void mu_substract(cpu_set_t *result, const cpu_set_t *minuend, const cpu_set_t *substrahend) { | |
828 | cpu_set_t xor; | ||
829 |
2/2✓ Branch 0 taken 682 times.
✓ Branch 1 taken 622 times.
|
1304 | CPU_XOR_S(mu_cpuset_alloc_size, &xor, minuend, substrahend); |
830 |
2/2✓ Branch 0 taken 682 times.
✓ Branch 1 taken 622 times.
|
1304 | CPU_AND_S(mu_cpuset_alloc_size, result, minuend, &xor); |
831 | 622 | } | |
832 | |||
833 | /* Return the one and only enabled CPU in mask, or -1 if count != 1 */ | ||
834 | 8 | int mu_get_single_cpu(const cpu_set_t *mask) { | |
835 |
2/2✓ Branch 1 taken 3 times.
✓ Branch 2 taken 5 times.
|
8 | if (CPU_COUNT_S(mu_cpuset_alloc_size, mask) == 1) { |
836 | 3 | return mu_get_first_cpu(mask); | |
837 | } | ||
838 | 5 | return -1; | |
839 | } | ||
840 | |||
841 | /* some of the following functions have been inspired by: | ||
842 | * https://github.com/open-mpi/hwloc/blob/master/hwloc/bitmap.c */ | ||
843 | |||
844 | /* Return the first enabled CPU in mask, or -1 if mask is empty */ | ||
845 | 3054 | int mu_get_first_cpu(const cpu_set_t *mask) { | |
846 | |||
847 |
2/2✓ Branch 0 taken 3147 times.
✓ Branch 1 taken 38 times.
|
3185 | for (unsigned int i = 0; i < mu_cpuset_num_ulongs; ++i) { |
848 | 3147 | unsigned long bits = mask->__bits[i]; | |
849 |
2/2✓ Branch 0 taken 3016 times.
✓ Branch 1 taken 131 times.
|
3147 | if (bits) { |
850 | 3016 | return ffsl(bits) - 1 + CPUS_PER_ULONG * i; | |
851 | } | ||
852 | } | ||
853 | |||
854 | 38 | return -1; | |
855 | } | ||
856 | |||
857 | /* Return the last enabled CPU in mask, or -1 if mask is empty */ | ||
858 | 928 | int mu_get_last_cpu(const cpu_set_t *mask) { | |
859 | |||
860 |
2/2✓ Branch 0 taken 1886 times.
✓ Branch 1 taken 1 times.
|
1887 | for (unsigned int i = mu_cpuset_num_ulongs; i-- > 0; ) { |
861 | 1886 | unsigned long bits = mask->__bits[i]; | |
862 |
2/2✓ Branch 0 taken 927 times.
✓ Branch 1 taken 959 times.
|
1886 | if (bits) { |
863 | /* glibc does not provide a fls function, there are more optimal | ||
864 | * solutions, but this function is not that critical */ | ||
865 | 927 | int cpuid = CPUS_PER_ULONG * i; | |
866 |
2/2✓ Branch 0 taken 20132 times.
✓ Branch 1 taken 927 times.
|
21059 | while (bits >>= 1) { |
867 | 20132 | ++cpuid; | |
868 | } | ||
869 | 927 | return cpuid; | |
870 | } | ||
871 | } | ||
872 | |||
873 | 1 | return -1; | |
874 | } | ||
875 | |||
876 | /* Return the next enabled CPU in mask after prev, or -1 if not found */ | ||
877 | 5471 | int mu_get_next_cpu(const cpu_set_t *mask, int prev) { | |
878 | |||
879 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 5470 times.
|
5471 | if (unlikely(prev < -1)) return -1; |
880 | |||
881 | 5470 | for (unsigned int i = (prev + 1) / CPUS_PER_ULONG; | |
882 |
2/2✓ Branch 0 taken 6289 times.
✓ Branch 1 taken 1845 times.
|
8134 | i < mu_cpuset_num_ulongs; ++i) { |
883 | 6289 | unsigned long bits = mask->__bits[i]; | |
884 | |||
885 | /* mask bitmap only if previous cpu belong to current iteration */ | ||
886 |
4/4✓ Branch 0 taken 6287 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 5402 times.
✓ Branch 3 taken 885 times.
|
6289 | if (prev >= 0 && (unsigned)prev / CPUS_PER_ULONG == i) { |
887 | 5402 | bits &= ULONG_MAX << (prev % CPUS_PER_ULONG + 1); | |
888 | } | ||
889 | |||
890 |
2/2✓ Branch 0 taken 3625 times.
✓ Branch 1 taken 2664 times.
|
6289 | if (bits) { |
891 | 3625 | return ffsl(bits) - 1 + CPUS_PER_ULONG * i; | |
892 | } | ||
893 | } | ||
894 | |||
895 | 1845 | return -1; | |
896 | } | ||
897 | |||
898 | /* Return the next unset CPU in mask after prev, or -1 if not found */ | ||
899 | 658 | int mu_get_next_unset(const cpu_set_t *mask, int prev) { | |
900 | |||
901 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 657 times.
|
658 | if (unlikely(prev < -1)) return -1; |
902 | |||
903 | 657 | for (unsigned int i = (prev + 1) / CPUS_PER_ULONG; | |
904 |
2/2✓ Branch 0 taken 699 times.
✓ Branch 1 taken 18 times.
|
717 | i < mu_cpuset_num_ulongs; ++i) { |
905 | 699 | unsigned long bits = ~(mask->__bits[i]); | |
906 | |||
907 | /* mask bitmap only if previous cpu belong to current iteration */ | ||
908 |
4/4✓ Branch 0 taken 698 times.
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 643 times.
✓ Branch 3 taken 55 times.
|
699 | if (prev >= 0 && (unsigned)prev / CPUS_PER_ULONG == i) { |
909 | 643 | bits &= ULONG_MAX << (prev % CPUS_PER_ULONG + 1); | |
910 | } | ||
911 | |||
912 |
2/2✓ Branch 0 taken 639 times.
✓ Branch 1 taken 60 times.
|
699 | if (bits) { |
913 | 639 | return ffsl(bits) - 1 + CPUS_PER_ULONG * i; | |
914 | } | ||
915 | } | ||
916 | |||
917 | 18 | return -1; | |
918 | } | ||
919 | |||
920 | // mu_to_str and mu_parse_mask functions are used by DLB utilities | ||
921 | // We export their dynamic symbols to avoid code duplication, | ||
922 | // although they do not belong to the public API | ||
923 | #pragma GCC visibility push(default) | ||
924 | 628 | const char* mu_to_str( const cpu_set_t *mask ) { | |
925 | |||
926 | static __thread char buffer[CPU_SETSIZE*4]; | ||
927 | 628 | char *b = buffer; | |
928 | 628 | *(b++) = '['; | |
929 | 628 | bool entry_made = false; | |
930 |
2/2✓ Branch 1 taken 644 times.
✓ Branch 2 taken 628 times.
|
1272 | for (int cpuid = mu_get_first_cpu(mask); cpuid >= 0; |
931 | 644 | cpuid = mu_get_next_cpu(mask, cpuid)) { | |
932 | |||
933 | /* Find interval distance */ | ||
934 | 644 | int next_unset = mu_get_next_unset(mask, cpuid); | |
935 | 1273 | int distance = next_unset > 0 ? next_unset - 1 - cpuid | |
936 |
2/2✓ Branch 0 taken 629 times.
✓ Branch 1 taken 15 times.
|
644 | : mu_get_last_cpu(mask) - cpuid; |
937 | |||
938 | /* Add ',' separator for subsequent entries */ | ||
939 |
2/2✓ Branch 0 taken 44 times.
✓ Branch 1 taken 600 times.
|
644 | if (entry_made) { |
940 | 44 | *(b++) = ','; | |
941 | } else { | ||
942 | 600 | entry_made = true; | |
943 | } | ||
944 | |||
945 | /* Write element, pair or range */ | ||
946 |
2/2✓ Branch 0 taken 415 times.
✓ Branch 1 taken 229 times.
|
644 | if (distance == 0) { |
947 | 415 | b += sprintf(b, "%d", cpuid); | |
948 |
2/2✓ Branch 0 taken 112 times.
✓ Branch 1 taken 117 times.
|
229 | } else if (distance == 1) { |
949 | 112 | b += sprintf(b, "%d,%d", cpuid, cpuid+1); | |
950 | 112 | ++cpuid; | |
951 | } else { | ||
952 | 117 | b += sprintf(b, "%d-%d", cpuid, cpuid+distance); | |
953 | 117 | cpuid += distance; | |
954 | } | ||
955 | } | ||
956 | 628 | *(b++) = ']'; | |
957 | 628 | *b = '\0'; | |
958 | |||
959 | 628 | return buffer; | |
960 | } | ||
961 | |||
962 | 39 | static void parse_64_bits_mask(cpu_set_t *mask, unsigned int offset, const char *str, int base) { | |
963 | 39 | unsigned long long number = strtoull(str, NULL, base); | |
964 | 39 | unsigned int i = offset; | |
965 |
3/4✓ Branch 0 taken 344 times.
✓ Branch 1 taken 39 times.
✓ Branch 2 taken 344 times.
✗ Branch 3 not taken.
|
383 | while (number > 0 && i < mu_cpuset_setsize) { |
966 |
2/2✓ Branch 0 taken 66 times.
✓ Branch 1 taken 278 times.
|
344 | if (number & 1) { |
967 |
1/2✓ Branch 0 taken 66 times.
✗ Branch 1 not taken.
|
66 | CPU_SET(i, mask); |
968 | } | ||
969 | 344 | ++i; | |
970 | 344 | number = number >> 1; | |
971 | } | ||
972 | 39 | } | |
973 | |||
974 | 204 | void mu_parse_mask( const char *str, cpu_set_t *mask ) { | |
975 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 204 times.
|
205 | if (!str) return; |
976 | |||
977 | 204 | size_t str_len = strnlen(str, CPU_SETSIZE+1); | |
978 |
3/4✓ Branch 0 taken 204 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 203 times.
|
204 | if ( str_len == 0 || str_len > CPU_SETSIZE) return; |
979 | |||
980 | regex_t regex_bitmask; | ||
981 | regex_t regex_hexmask; | ||
982 | regex_t regex_range; | ||
983 | regex_t old_regex_bitmask; | ||
984 | 203 | CPU_ZERO( mask ); | |
985 | |||
986 | /* Compile regular expressions */ | ||
987 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 203 times.
|
203 | if ( regcomp(®ex_bitmask, "^0[bB][0-1]+$", REG_EXTENDED|REG_NOSUB) ) { |
988 | ✗ | fatal0( "Could not compile regex"); | |
989 | } | ||
990 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 203 times.
|
203 | if ( regcomp(®ex_hexmask, "^0[xX][0-9,a-f,A-F]+$", REG_EXTENDED|REG_NOSUB) ) { |
991 | ✗ | fatal0( "Could not compile regex"); | |
992 | } | ||
993 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 203 times.
|
203 | if ( regcomp(®ex_range, "^[0-9,-]+$", REG_EXTENDED|REG_NOSUB) ) { |
994 | ✗ | fatal0( "Could not compile regex"); | |
995 | } | ||
996 | |||
997 | /***** Deprecated *****/ | ||
998 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 203 times.
|
203 | if ( regcomp(&old_regex_bitmask, "^[0-1][0-1]+[bB]$", REG_EXTENDED|REG_NOSUB) ) { |
999 | ✗ | fatal0( "Could not compile regex"); | |
1000 | } | ||
1001 | /* Regular expression matches OLD bitmask, e.g.: 11110011b */ | ||
1002 |
2/2✓ Branch 1 taken 3 times.
✓ Branch 2 taken 200 times.
|
203 | if ( !regexec(&old_regex_bitmask, str, 0, NULL, 0) ) { |
1003 | 3 | warning("The binary form xxxxb is deprecated, please use 0bxxxx."); | |
1004 | // Parse | ||
1005 |
2/2✓ Branch 0 taken 19 times.
✓ Branch 1 taken 3 times.
|
22 | for (unsigned int i=0; i<str_len; i++) { |
1006 |
3/4✓ Branch 0 taken 4 times.
✓ Branch 1 taken 15 times.
✓ Branch 2 taken 4 times.
✗ Branch 3 not taken.
|
19 | if ( str[i] == '1' && i < mu_cpuset_setsize ) { |
1007 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | CPU_SET( i, mask ); |
1008 | } | ||
1009 | } | ||
1010 | } | ||
1011 | /**********************/ | ||
1012 | |||
1013 | /* Regular expression matches bitmask, e.g.: 0b11100001 */ | ||
1014 |
2/2✓ Branch 1 taken 9 times.
✓ Branch 2 taken 191 times.
|
200 | else if ( !regexec(®ex_bitmask, str, 0, NULL, 0) ) { |
1015 | /* Ignore '0b' */ | ||
1016 | 9 | str += 2; | |
1017 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 3 times.
|
9 | if (strlen(str) <= 64) { |
1018 | 6 | parse_64_bits_mask(mask, 0, str, 2); | |
1019 | } else { | ||
1020 | /* parse in chunks of 64 bits */ | ||
1021 | 3 | char *str_copy = strdup(str); | |
1022 | char *start_ptr; | ||
1023 | 3 | char *end_ptr = str_copy + strlen(str_copy); | |
1024 | 3 | unsigned int offset = 0; | |
1025 | do { | ||
1026 |
2/2✓ Branch 0 taken 10 times.
✓ Branch 1 taken 2 times.
|
12 | start_ptr = strlen(str_copy) < 64 ? str_copy : end_ptr - 64; |
1027 | 12 | parse_64_bits_mask(mask, offset, start_ptr, 2); | |
1028 | 12 | offset += 64; | |
1029 | 12 | end_ptr = start_ptr; | |
1030 | 12 | *end_ptr = '\0'; | |
1031 |
2/2✓ Branch 0 taken 9 times.
✓ Branch 1 taken 3 times.
|
12 | } while (strlen(str_copy) > 0); |
1032 | 3 | free(str_copy); | |
1033 | } | ||
1034 | } | ||
1035 | |||
1036 | /* Regular expression matches hexmask, e.g.: 0xE1 */ | ||
1037 |
2/2✓ Branch 1 taken 10 times.
✓ Branch 2 taken 181 times.
|
191 | else if ( !regexec(®ex_hexmask, str, 0, NULL, 0) ) { |
1038 | /* Ignore '0x' */ | ||
1039 | 10 | str += 2; | |
1040 |
2/2✓ Branch 0 taken 7 times.
✓ Branch 1 taken 3 times.
|
10 | if (strlen(str) <= 16) { |
1041 | 7 | parse_64_bits_mask(mask, 0, str, 16); | |
1042 | } else { | ||
1043 | /* parse in chunks of 64 bits (16 hex digits) */ | ||
1044 | 3 | char *str_copy = strdup(str); | |
1045 | char *start_ptr; | ||
1046 | 3 | char *end_ptr = str_copy + strlen(str_copy); | |
1047 | 3 | unsigned int offset = 0; | |
1048 | do { | ||
1049 |
2/2✓ Branch 0 taken 12 times.
✓ Branch 1 taken 2 times.
|
14 | start_ptr = strlen(str_copy) < 16 ? str_copy : end_ptr - 16; |
1050 | 14 | parse_64_bits_mask(mask, offset, start_ptr, 16); | |
1051 | 14 | offset += 64; | |
1052 | 14 | end_ptr = start_ptr; | |
1053 | 14 | *end_ptr = '\0'; | |
1054 |
2/2✓ Branch 0 taken 11 times.
✓ Branch 1 taken 3 times.
|
14 | } while (strlen(str_copy) > 0); |
1055 | 3 | free(str_copy); | |
1056 | } | ||
1057 | } | ||
1058 | |||
1059 | /* Regular expression matches range, e.g.: 0,5-7 */ | ||
1060 |
1/2✓ Branch 1 taken 181 times.
✗ Branch 2 not taken.
|
181 | else if ( !regexec(®ex_range, str, 0, NULL, 0) ) { |
1061 | // Parse | ||
1062 | 181 | const char *ptr = str; | |
1063 | char *endptr; | ||
1064 |
2/2✓ Branch 0 taken 280 times.
✓ Branch 1 taken 181 times.
|
461 | while ( ptr < str+strlen(str) ) { |
1065 | // Discard junk at the left | ||
1066 |
2/2✓ Branch 0 taken 16 times.
✓ Branch 1 taken 264 times.
|
280 | if ( !isdigit(*ptr) ) { ptr++; continue; } |
1067 | |||
1068 | 264 | unsigned long start_ = strtoul( ptr, &endptr, 10 ); | |
1069 | 264 | unsigned long start = start_ < mu_cpuset_setsize ? start_ : mu_cpuset_setsize; | |
1070 | 264 | ptr = endptr; | |
1071 | |||
1072 | // Single element | ||
1073 |
5/6✓ Branch 0 taken 197 times.
✓ Branch 1 taken 67 times.
✓ Branch 2 taken 83 times.
✓ Branch 3 taken 114 times.
✓ Branch 4 taken 150 times.
✗ Branch 5 not taken.
|
264 | if ( (*ptr == ',' || *ptr == '\0') && start < mu_cpuset_setsize ) { |
1074 |
1/2✓ Branch 0 taken 150 times.
✗ Branch 1 not taken.
|
150 | CPU_SET( start, mask ); |
1075 | 150 | ptr++; | |
1076 | 150 | continue; | |
1077 | } | ||
1078 | // Range | ||
1079 |
1/2✓ Branch 0 taken 114 times.
✗ Branch 1 not taken.
|
114 | else if ( *ptr == '-' ) { |
1080 | // Discard '-' and possible junk | ||
1081 | 114 | ptr++; | |
1082 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 114 times.
|
114 | if ( !isdigit(*ptr) ) { ptr++; continue; } |
1083 | |||
1084 | 114 | unsigned long end_ = strtoul( ptr, &endptr, 10 ); | |
1085 | 114 | unsigned long end = end_ < mu_cpuset_setsize ? end_ : mu_cpuset_setsize; | |
1086 | 114 | ptr = endptr; | |
1087 | |||
1088 | // Valid range | ||
1089 |
1/2✓ Branch 0 taken 114 times.
✗ Branch 1 not taken.
|
114 | if ( end > start ) { |
1090 |
3/4✓ Branch 0 taken 1658 times.
✓ Branch 1 taken 114 times.
✓ Branch 2 taken 1658 times.
✗ Branch 3 not taken.
|
1772 | for ( unsigned long i=start; i<=end && i<mu_cpuset_setsize; i++ ) { |
1091 |
1/2✓ Branch 0 taken 1658 times.
✗ Branch 1 not taken.
|
1658 | CPU_SET( i, mask ); |
1092 | } | ||
1093 | } | ||
1094 | 114 | continue; | |
1095 | } | ||
1096 | // Unexpected token | ||
1097 | else { } | ||
1098 | } | ||
1099 | } | ||
1100 | /* Regular expression does not match */ | ||
1101 | else { } | ||
1102 | |||
1103 | 203 | regfree(®ex_bitmask); | |
1104 | 203 | regfree(®ex_hexmask); | |
1105 | 203 | regfree(®ex_range); | |
1106 | 203 | regfree(&old_regex_bitmask); | |
1107 | |||
1108 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 203 times.
|
203 | if ( CPU_COUNT(mask) == 0 ) { |
1109 | ✗ | warning( "Parsed mask \"%s\" does not seem to be a valid mask\n", str ); | |
1110 | } | ||
1111 | } | ||
1112 | #pragma GCC visibility pop | ||
1113 | |||
1114 | /* Equivalent to mu_to_str, but generate quoted string in str up to namelen-1 bytes */ | ||
1115 | 8 | void mu_get_quoted_mask(const cpu_set_t *mask, char *str, size_t namelen) { | |
1116 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
|
8 | if (namelen < 2) |
1117 | ✗ | return; | |
1118 | |||
1119 | 8 | char *b = str; | |
1120 | 8 | *(b++) = '"'; | |
1121 | 8 | size_t bytes = 1; | |
1122 | 8 | bool entry_made = false; | |
1123 |
2/2✓ Branch 1 taken 8 times.
✓ Branch 2 taken 8 times.
|
16 | for (int cpuid = mu_get_first_cpu(mask); cpuid >= 0; |
1124 | 8 | cpuid = mu_get_next_cpu(mask, cpuid)) { | |
1125 | |||
1126 | /* Find interval distance */ | ||
1127 | 8 | int next_unset = mu_get_next_unset(mask, cpuid); | |
1128 | 14 | int distance = next_unset > 0 ? next_unset - 1 - cpuid | |
1129 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 2 times.
|
8 | : mu_get_last_cpu(mask) - cpuid; |
1130 | |||
1131 | /* Add ',' separator for subsequent entries */ | ||
1132 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 7 times.
|
8 | if (entry_made) { |
1133 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (bytes+1 < namelen) { |
1134 | 1 | *(b++) = ','; | |
1135 | 1 | ++bytes; | |
1136 | } | ||
1137 | } else { | ||
1138 | 7 | entry_made = true; | |
1139 | } | ||
1140 | |||
1141 | /* Write element, pair or range */ | ||
1142 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 5 times.
|
8 | if (distance == 0) { |
1143 | 3 | int len = snprintf(NULL, 0, "%d", cpuid); | |
1144 |
1/2✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
|
3 | if (bytes+len < namelen) { |
1145 | 3 | b += sprintf(b, "%d", cpuid); | |
1146 | 3 | bytes += len; | |
1147 | } | ||
1148 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 2 times.
|
5 | } else if (distance == 1) { |
1149 | 3 | int len = snprintf(NULL, 0, "%d,%d", cpuid, cpuid+1); | |
1150 |
1/2✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
|
3 | if (bytes+len < namelen) { |
1151 | 3 | b += sprintf(b, "%d,%d", cpuid, cpuid+1); | |
1152 | 3 | bytes += len; | |
1153 | 3 | ++cpuid; | |
1154 | } | ||
1155 | } else { | ||
1156 | 2 | int len = snprintf(NULL, 0, "%d-%d", cpuid, cpuid+distance); | |
1157 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | if (bytes+len < namelen) { |
1158 | 2 | b += sprintf(b, "%d-%d", cpuid, cpuid+distance); | |
1159 | 2 | bytes += len; | |
1160 | 2 | cpuid += distance; | |
1161 | } | ||
1162 | } | ||
1163 | } | ||
1164 |
1/2✓ Branch 0 taken 8 times.
✗ Branch 1 not taken.
|
8 | if (bytes+1 < namelen) { |
1165 | 8 | *(b++) = '"'; | |
1166 | 8 | ++bytes; | |
1167 | } | ||
1168 | 8 | *b = '\0'; | |
1169 | } | ||
1170 | |||
1171 | 1 | char * mu_parse_to_slurm_format(const cpu_set_t *mask) { | |
1172 | 1 | char *str = malloc((mu_cpuset_setsize >> 2) + 3); | |
1173 | if (str < 0) | ||
1174 | return NULL; | ||
1175 | 1 | unsigned int offset = 2; | |
1176 | 1 | unsigned long long val = 0; | |
1177 | 1 | const int threshold = 4; | |
1178 | 1 | sprintf(str, "0x"); | |
1179 |
2/2✓ Branch 1 taken 130 times.
✓ Branch 2 taken 1 times.
|
131 | for (int cpuid = mu_get_last_cpu(mask); cpuid >= 0; --cpuid) { |
1180 |
5/6✓ Branch 0 taken 130 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 9 times.
✓ Branch 3 taken 121 times.
✓ Branch 4 taken 9 times.
✓ Branch 5 taken 121 times.
|
130 | if(CPU_ISSET(cpuid, mask)) { |
1181 | 9 | val |= 1 << (cpuid % threshold); | |
1182 | } | ||
1183 |
4/4✓ Branch 0 taken 129 times.
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 32 times.
✓ Branch 3 taken 97 times.
|
130 | if (cpuid > 0 && cpuid % threshold == 0) { |
1184 | 32 | sprintf(str+offset, "%llx", val); | |
1185 | 32 | val = 0; | |
1186 | 32 | offset++; | |
1187 | } | ||
1188 | } | ||
1189 | 1 | sprintf(str+offset, "%llx", val); | |
1190 | 1 | return str; | |
1191 | } | ||
1192 | |||
1193 | 4 | bool mu_equivalent_masks(const char *str1, const char *str2) { | |
1194 | cpu_set_t mask1, mask2; | ||
1195 | 4 | mu_parse_mask(str1, &mask1); | |
1196 | 4 | mu_parse_mask(str2, &mask2); | |
1197 | 4 | return CPU_EQUAL(&mask1, &mask2); | |
1198 | } | ||
1199 | |||
1200 | |||
1201 | 2733 | static int cmp_cpuids(cpuid_t cpuid1, cpuid_t cpuid2) { | |
1202 | 2733 | int cpu1_core_id = mu_get_core_id(cpuid1); | |
1203 | 2733 | int cpu2_core_id = mu_get_core_id(cpuid2); | |
1204 |
2/2✓ Branch 0 taken 438 times.
✓ Branch 1 taken 2295 times.
|
2733 | if (cpu1_core_id == cpu2_core_id) { |
1205 | 438 | return cpuid1 - cpuid2; | |
1206 | } else { | ||
1207 | 2295 | return cpu1_core_id - cpu2_core_id; | |
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | /* Compare CPUs so that: | ||
1212 | * - owned CPUs first, in ascending order | ||
1213 | * - non-owned later, starting from the first owned, then ascending | ||
1214 | * e.g.: system: [0-7], owned: [3-5] | ||
1215 | * cpu_list = {4,5,6,7,0,1,2,3} | ||
1216 | */ | ||
1217 | 1539 | int mu_cmp_cpuids_by_ownership(const void *cpuid1, const void *cpuid2, void *mask) { | |
1218 | /* Expand arguments */ | ||
1219 | 1539 | cpuid_t _cpuid1 = *(cpuid_t*)cpuid1; | |
1220 | 1539 | cpuid_t _cpuid2 = *(cpuid_t*)cpuid2; | |
1221 | 1539 | cpu_set_t *process_mask = mask; | |
1222 | |||
1223 |
5/6✓ Branch 0 taken 1539 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 733 times.
✓ Branch 3 taken 806 times.
✓ Branch 4 taken 733 times.
✓ Branch 5 taken 806 times.
|
1539 | if (CPU_ISSET(_cpuid1, process_mask)) { |
1224 |
5/6✓ Branch 0 taken 733 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 525 times.
✓ Branch 3 taken 208 times.
✓ Branch 4 taken 525 times.
✓ Branch 5 taken 208 times.
|
733 | if (CPU_ISSET(_cpuid2, process_mask)) { |
1225 | /* both CPUs are owned: ascending order */ | ||
1226 | 525 | return cmp_cpuids(_cpuid1, _cpuid2); | |
1227 | } else { | ||
1228 | /* cpuid2 is NOT owned and cpuid1 IS */ | ||
1229 | 208 | return -1; | |
1230 | } | ||
1231 | } else { | ||
1232 |
5/6✓ Branch 0 taken 806 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 223 times.
✓ Branch 3 taken 583 times.
✓ Branch 4 taken 223 times.
✓ Branch 5 taken 583 times.
|
806 | if (CPU_ISSET(_cpuid2, process_mask)) { |
1233 | /* cpuid2 IS owned and cpuid1 is NOT */ | ||
1234 | 223 | return 1; | |
1235 | } else { | ||
1236 | /* none is owned */ | ||
1237 | 583 | int first_cpu = mu_get_first_cpu(process_mask); | |
1238 | 583 | int first_core = mu_get_core_id(first_cpu); | |
1239 | 583 | int cpu1_core_id = mu_get_core_id(_cpuid1); | |
1240 | 583 | int cpu2_core_id = mu_get_core_id(_cpuid2); | |
1241 |
2/2✓ Branch 0 taken 351 times.
✓ Branch 1 taken 232 times.
|
583 | if ((cpu1_core_id > first_core |
1242 |
2/2✓ Branch 0 taken 47 times.
✓ Branch 1 taken 304 times.
|
351 | && cpu2_core_id > first_core) |
1243 |
2/2✓ Branch 0 taken 232 times.
✓ Branch 1 taken 47 times.
|
279 | || (cpu1_core_id < first_core |
1244 |
2/2✓ Branch 0 taken 174 times.
✓ Branch 1 taken 58 times.
|
232 | && cpu2_core_id < first_core)) { |
1245 | /* Both CPUs are either before or after the process mask */ | ||
1246 | 478 | return cmp_cpuids(_cpuid1, _cpuid2); | |
1247 | } else { | ||
1248 | /* Compare with respect to process mask */ | ||
1249 | 105 | return cmp_cpuids(first_cpu, _cpuid1); | |
1250 | } | ||
1251 | } | ||
1252 | } | ||
1253 | } | ||
1254 | |||
1255 | /* Compare CPUs so that: | ||
1256 | * - CPUs are sorted according to the affinity array: | ||
1257 | * * affinity: array of cpu_set_t, each position represents a level | ||
1258 | * in the affinity, the last position is an empty cpu set. | ||
1259 | * (PRE: each affinity level is a superset of the previous level mask) | ||
1260 | * - Sorted by affinity level in ascending order | ||
1261 | * - non-owned later, starting from the first owned, then ascending | ||
1262 | * e.g.: affinity: {{6-7}, {4-7}, {0-7}, {}} | ||
1263 | * sorted_cpu_list = {6,7,4,5,0,1,2,3} | ||
1264 | */ | ||
1265 | 2295 | int mu_cmp_cpuids_by_affinity(const void *cpuid1, const void *cpuid2, void *affinity) { | |
1266 | /* Expand arguments */ | ||
1267 | 2295 | cpuid_t _cpuid1 = *(cpuid_t*)cpuid1; | |
1268 | 2295 | cpuid_t _cpuid2 = *(cpuid_t*)cpuid2; | |
1269 | 2295 | cpu_set_t *_affinity = affinity; | |
1270 | |||
1271 | /* Find affinity level of each CPU */ | ||
1272 | 2295 | int cpu1_level = 0; | |
1273 | 2295 | int cpu2_level = 0; | |
1274 | 2295 | cpu_set_t *mask = _affinity; | |
1275 |
2/2✓ Branch 1 taken 5301 times.
✓ Branch 2 taken 2295 times.
|
7596 | while(CPU_COUNT(mask) > 0) { |
1276 |
5/6✓ Branch 0 taken 5301 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3458 times.
✓ Branch 3 taken 1843 times.
✓ Branch 4 taken 1843 times.
✓ Branch 5 taken 3458 times.
|
5301 | if (!CPU_ISSET(_cpuid1, mask)) { |
1277 | 1843 | ++cpu1_level; | |
1278 | } | ||
1279 |
5/6✓ Branch 0 taken 5301 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3414 times.
✓ Branch 3 taken 1887 times.
✓ Branch 4 taken 1887 times.
✓ Branch 5 taken 3414 times.
|
5301 | if (!CPU_ISSET(_cpuid2, mask)) { |
1280 | 1887 | ++cpu2_level; | |
1281 | } | ||
1282 | 5301 | ++mask; | |
1283 | } | ||
1284 | |||
1285 | /* If levels differ, sort levels in ascending order */ | ||
1286 |
2/2✓ Branch 0 taken 670 times.
✓ Branch 1 taken 1625 times.
|
2295 | if (cpu1_level != cpu2_level) { |
1287 | 670 | return cpu1_level - cpu2_level; | |
1288 | } | ||
1289 | |||
1290 | /* If both are level 0, sort in ascending order */ | ||
1291 |
2/2✓ Branch 0 taken 672 times.
✓ Branch 1 taken 953 times.
|
1625 | if (cpu1_level == 0) { |
1292 | 672 | return cmp_cpuids(_cpuid1, _cpuid2); | |
1293 | } | ||
1294 | |||
1295 | /* If both are level 1, sort from the first CPU in level 0 */ | ||
1296 | /* e.g.: level0: [2,3], level1: [0,7] -> [4,5,6,7,0,1] */ | ||
1297 |
2/2✓ Branch 0 taken 672 times.
✓ Branch 1 taken 281 times.
|
953 | if (cpu1_level == 1) { |
1298 | 672 | cpu_set_t *level0_mask = _affinity; | |
1299 | 672 | int first_cpu = mu_get_first_cpu(level0_mask); | |
1300 | 672 | int first_core = mu_get_core_id(first_cpu); | |
1301 | 672 | int cpu1_core_id = mu_get_core_id(_cpuid1); | |
1302 | 672 | int cpu2_core_id = mu_get_core_id(_cpuid2); | |
1303 |
2/2✓ Branch 0 taken 520 times.
✓ Branch 1 taken 152 times.
|
672 | if ((cpu1_core_id > first_core |
1304 |
2/2✓ Branch 0 taken 16 times.
✓ Branch 1 taken 504 times.
|
520 | && cpu2_core_id > first_core) |
1305 |
2/2✓ Branch 0 taken 152 times.
✓ Branch 1 taken 16 times.
|
168 | || (cpu1_core_id < first_core |
1306 |
2/2✓ Branch 0 taken 83 times.
✓ Branch 1 taken 69 times.
|
152 | && cpu2_core_id < first_core)) { |
1307 | /* Both CPUs are either before or after the process mask */ | ||
1308 | 587 | return cmp_cpuids(_cpuid1, _cpuid2); | |
1309 | } else { | ||
1310 | /* Compare with respect to process mask */ | ||
1311 | 85 | return cmp_cpuids(first_cpu, _cpuid1); | |
1312 | } | ||
1313 | } | ||
1314 | |||
1315 | /* TODO: compute numa distance */ | ||
1316 | /* Levels 2+, sort in ascending order */ | ||
1317 | 281 | return cmp_cpuids(_cpuid1, _cpuid2); | |
1318 | } | ||
1319 | |||
1320 | |||
1321 | /*********************************************************************************/ | ||
1322 | /* Mask utils testing functions */ | ||
1323 | /*********************************************************************************/ | ||
1324 | |||
1325 | 2 | bool mu_testing_is_initialized(void) { | |
1326 | 2 | return mu_initialized; | |
1327 | } | ||
1328 | |||
1329 | 41 | void mu_testing_set_sys_size(int size) { | |
1330 | 41 | init_system(size, size, 1); | |
1331 | 41 | print_sys_info(); | |
1332 | 41 | } | |
1333 | |||
1334 | 2 | void mu_testing_set_sys(unsigned int num_cpus, unsigned int num_cores, | |
1335 | unsigned int num_nodes) { | ||
1336 | 2 | init_system(num_cpus, num_cores, num_nodes); | |
1337 | 2 | print_sys_info(); | |
1338 | 2 | } | |
1339 | |||
1340 | 11 | void mu_testing_set_sys_masks(const cpu_set_t *sys_mask, | |
1341 | const cpu_set_t *core_masks, unsigned int num_cores, | ||
1342 | const cpu_set_t *node_masks, unsigned int num_nodes) { | ||
1343 | 11 | init_system_masks(sys_mask, core_masks, num_cores, node_masks, num_nodes); | |
1344 | 11 | print_sys_info(); | |
1345 | 11 | } | |
1346 | |||
1347 | 1 | void mu_testing_init_nohwloc(void) { | |
1348 | 1 | init_mu_struct(); | |
1349 | 1 | parse_system_files(); | |
1350 | 1 | mu_initialized = true; | |
1351 | 1 | print_sys_info(); | |
1352 | 1 | } | |
1353 |