Line | Branch | Exec | Source |
---|---|---|---|
1 | /*********************************************************************************/ | ||
2 | /* Copyright 2009-2024 Barcelona Supercomputing Center */ | ||
3 | /* */ | ||
4 | /* This file is part of the DLB library. */ | ||
5 | /* */ | ||
6 | /* DLB is free software: you can redistribute it and/or modify */ | ||
7 | /* it under the terms of the GNU Lesser General Public License as published by */ | ||
8 | /* the Free Software Foundation, either version 3 of the License, or */ | ||
9 | /* (at your option) any later version. */ | ||
10 | /* */ | ||
11 | /* DLB is distributed in the hope that it will be useful, */ | ||
12 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | ||
13 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | ||
14 | /* GNU Lesser General Public License for more details. */ | ||
15 | /* */ | ||
16 | /* You should have received a copy of the GNU Lesser General Public License */ | ||
17 | /* along with DLB. If not, see <https://www.gnu.org/licenses/>. */ | ||
18 | /*********************************************************************************/ | ||
19 | |||
20 | #include "LB_comm/shmem_cpuinfo.h" | ||
21 | |||
22 | #include "LB_comm/shmem.h" | ||
23 | #include "LB_core/spd.h" | ||
24 | #include "apis/dlb_errors.h" | ||
25 | #include "apis/dlb_types.h" | ||
26 | #include "support/debug.h" | ||
27 | #include "support/types.h" | ||
28 | #include "support/mytime.h" | ||
29 | #include "support/tracing.h" | ||
30 | #include "support/options.h" | ||
31 | #include "support/mask_utils.h" | ||
32 | #include "support/queues.h" | ||
33 | #include "support/small_array.h" | ||
34 | #include "support/atomic.h" | ||
35 | |||
36 | #include <limits.h> | ||
37 | #include <sched.h> | ||
38 | #include <unistd.h> | ||
39 | #include <string.h> | ||
40 | #include <sys/types.h> | ||
41 | #include <sys/ioctl.h> | ||
42 | |||
43 | /* array_cpuid_t */ | ||
44 | #define ARRAY_T cpuid_t | ||
45 | #include "support/array_template.h" | ||
46 | |||
47 | /* array_cpuinfo_task_t */ | ||
48 | #define ARRAY_T cpuinfo_task_t | ||
49 | #define ARRAY_KEY_T pid_t | ||
50 | #include "support/array_template.h" | ||
51 | |||
52 | /* queue_pid_t */ | ||
53 | #define QUEUE_T pid_t | ||
54 | #define QUEUE_SIZE 8 | ||
55 | #include "support/queue_template.h" | ||
56 | |||
57 | /* queue_lewi_mask_request_t */ | ||
58 | typedef struct { | ||
59 | pid_t pid; | ||
60 | unsigned int howmany; | ||
61 | cpu_set_t allowed; | ||
62 | } lewi_mask_request_t; | ||
63 | #define QUEUE_T lewi_mask_request_t | ||
64 | #define QUEUE_KEY_T pid_t | ||
65 | #define QUEUE_SIZE 1024 | ||
66 | #include "support/queue_template.h" | ||
67 | |||
68 | |||
69 | /* NOTE on default values: | ||
70 | * The shared memory will be initializated to 0 when created, | ||
71 | * thus all default values should represent 0 | ||
72 | * A CPU, by default, is DISABLED and owned by NOBODY | ||
73 | */ | ||
74 | |||
75 | enum { NOBODY = 0 }; | ||
76 | |||
77 | typedef enum { | ||
78 | CPU_DISABLED = 0, // Not owned by any process nor part of the DLB mask | ||
79 | CPU_BUSY, | ||
80 | CPU_LENT | ||
81 | } cpu_state_t; | ||
82 | |||
83 | typedef struct { | ||
84 | cpuid_t id; // logical ID, or hwthread ID | ||
85 | cpuid_t core_id; // core ID | ||
86 | pid_t owner; // Current owner | ||
87 | pid_t guest; // Current user of the CPU | ||
88 | cpu_state_t state; // owner's POV state (busy or lent) | ||
89 | queue_pid_t requests; // List of PIDs requesting the CPU | ||
90 | } cpuinfo_t; | ||
91 | |||
92 | typedef struct cpuinfo_flags { | ||
93 | bool initialized:1; | ||
94 | bool queues_enabled:1; | ||
95 | bool hw_has_smt:1; | ||
96 | } cpuinfo_flags_t; | ||
97 | |||
98 | typedef struct { | ||
99 | cpuinfo_flags_t flags; | ||
100 | struct timespec initial_time; | ||
101 | atomic_int_least64_t timestamp_cpu_lent; | ||
102 | queue_lewi_mask_request_t lewi_mask_requests; | ||
103 | cpu_set_t free_cpus; /* redundant info for speeding up queries: | ||
104 | lent, non-guested CPUs (idle) */ | ||
105 | cpu_set_t occupied_cores; /* redundant info for speeding up queries: | ||
106 | lent or busy cores and guested by other | ||
107 | than the owner (lent or reclaimed) */ | ||
108 | cpuinfo_t node_info[]; | ||
109 | } shdata_t; | ||
110 | |||
111 | enum { SHMEM_CPUINFO_VERSION = 6 }; | ||
112 | |||
113 | static shmem_handler_t *shm_handler = NULL; | ||
114 | static shdata_t *shdata = NULL; | ||
115 | static int node_size; | ||
116 | static bool cpu_is_public_post_mortem = false; | ||
117 | static bool respect_cpuset = true; | ||
118 | static const char *shmem_name = "cpuinfo"; | ||
119 | static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; | ||
120 | static int subprocesses_attached = 0; | ||
121 | |||
122 | static inline bool is_idle(int cpu) __attribute__((unused)); | ||
123 | static inline bool is_borrowed(pid_t pid, int cpu) __attribute__((unused)); | ||
124 | static inline bool is_shmem_empty(void); | ||
125 | |||
126 | |||
127 | 263 | static void update_shmem_timestamp(void) { | |
128 | 263 | DLB_ATOMIC_ST_REL(&shdata->timestamp_cpu_lent, get_time_in_ns()); | |
129 | 263 | } | |
130 | |||
131 | /* A core is eligible if all the CPUs in the core are not guested, or guested | ||
132 | * by the process, and none of them are reclaimed */ | ||
133 | 286 | static bool core_is_eligible(pid_t pid, int cpuid) { | |
134 |
2/2✓ Branch 0 taken 11 times.
✓ Branch 1 taken 275 times.
|
286 | if (shdata->flags.hw_has_smt) { |
135 | 11 | const mu_cpuset_t *core_mask = mu_get_core_mask(cpuid); | |
136 | 11 | for (int cpuid_in_core = core_mask->first_cpuid; | |
137 |
2/2✓ Branch 0 taken 41 times.
✓ Branch 1 taken 8 times.
|
49 | cpuid_in_core >= 0; |
138 | 38 | cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) { | |
139 | 41 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid_in_core]; | |
140 |
4/4✓ Branch 0 taken 24 times.
✓ Branch 1 taken 17 times.
✓ Branch 2 taken 23 times.
✓ Branch 3 taken 1 times.
|
41 | if ((cpuinfo->guest != pid && cpuinfo->guest != NOBODY) |
141 |
3/4✓ Branch 0 taken 2 times.
✓ Branch 1 taken 38 times.
✓ Branch 2 taken 2 times.
✗ Branch 3 not taken.
|
40 | || (cpuinfo->state == CPU_BUSY && cpuinfo->owner != pid)) { |
142 | 3 | return false; | |
143 | } | ||
144 | } | ||
145 | 8 | return true; | |
146 | } else { | ||
147 | 275 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
148 | 275 | return cpuinfo->owner == pid | |
149 |
2/2✓ Branch 0 taken 259 times.
✓ Branch 1 taken 2 times.
|
261 | || cpuinfo->guest == pid |
150 |
3/4✓ Branch 0 taken 261 times.
✓ Branch 1 taken 14 times.
✓ Branch 2 taken 259 times.
✗ Branch 3 not taken.
|
536 | || cpuinfo->guest == NOBODY; |
151 | } | ||
152 | } | ||
153 | |||
154 | /* A core is occupied if any of the CPUs in the core are guested by some | ||
155 | * process that is not the owner. The owner is provided as parameter since | ||
156 | * this function may be called during the core registration */ | ||
157 | 8 | static bool core_is_occupied(pid_t owner, int cpuid) { | |
158 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
|
8 | if (owner == NOBODY) return false; |
159 | |||
160 |
1/2✓ Branch 0 taken 8 times.
✗ Branch 1 not taken.
|
8 | if (shdata->flags.hw_has_smt) { |
161 | 8 | const mu_cpuset_t *core_mask = mu_get_core_mask(cpuid); | |
162 | 8 | for (int cpuid_in_core = core_mask->first_cpuid; | |
163 |
2/2✓ Branch 0 taken 26 times.
✓ Branch 1 taken 2 times.
|
28 | cpuid_in_core >= 0; |
164 | 20 | cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) { | |
165 | 26 | pid_t guest = shdata->node_info[cpuid_in_core].guest; | |
166 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 18 times.
|
26 | if (guest != NOBODY |
167 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 2 times.
|
8 | && guest != owner) { |
168 | 6 | return true; | |
169 | } | ||
170 | } | ||
171 | 2 | return false; | |
172 | } else { | ||
173 | ✗ | pid_t guest = shdata->node_info[cpuid].guest; | |
174 | return guest != NOBODY | ||
175 | ✗ | && guest != owner; | |
176 | } | ||
177 | } | ||
178 | |||
179 | 739 | static bool cpu_is_occupied(pid_t owner, int cpuid) { | |
180 | 739 | pid_t guest = shdata->node_info[cpuid].guest; | |
181 | return guest != NOBODY | ||
182 |
1/2✓ Branch 0 taken 482 times.
✗ Branch 1 not taken.
|
482 | && owner != NOBODY |
183 |
4/4✓ Branch 0 taken 482 times.
✓ Branch 1 taken 257 times.
✓ Branch 2 taken 93 times.
✓ Branch 3 taken 389 times.
|
1221 | && guest != owner; |
184 | } | ||
185 | |||
186 | /* Assuming that only cpuid has changed its state, update occupied_cores accordingly */ | ||
187 | 739 | static void update_occupied_cores(pid_t owner, int cpuid) { | |
188 |
2/2✓ Branch 0 taken 38 times.
✓ Branch 1 taken 701 times.
|
739 | if (shdata->flags.hw_has_smt) { |
189 |
2/2✓ Branch 1 taken 2 times.
✓ Branch 2 taken 36 times.
|
38 | if (cpu_is_occupied(owner, cpuid)) { |
190 |
3/6✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 2 times.
✓ Branch 4 taken 2 times.
✗ Branch 5 not taken.
|
2 | if (!CPU_ISSET(cpuid, &shdata->occupied_cores)) { |
191 | // Core state has changed | ||
192 | 2 | const cpu_set_t *core_mask = mu_get_core_mask(cpuid)->set; | |
193 | 2 | mu_or(&shdata->occupied_cores, &shdata->occupied_cores, core_mask); | |
194 | } else { | ||
195 | // no change | ||
196 | } | ||
197 | } else { | ||
198 |
5/6✓ Branch 0 taken 36 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 8 times.
✓ Branch 3 taken 28 times.
✓ Branch 4 taken 8 times.
✓ Branch 5 taken 28 times.
|
36 | if (!CPU_ISSET(cpuid, &shdata->occupied_cores)) { |
199 | // no change | ||
200 | } else { | ||
201 | // need to check all cores | ||
202 | 8 | const cpu_set_t *core_mask = mu_get_core_mask(cpuid)->set; | |
203 |
2/2✓ Branch 1 taken 6 times.
✓ Branch 2 taken 2 times.
|
8 | if (core_is_occupied(owner, cpuid)) { |
204 | 6 | mu_or(&shdata->occupied_cores, &shdata->occupied_cores, core_mask); | |
205 | } else { | ||
206 | 2 | mu_substract(&shdata->occupied_cores, &shdata->occupied_cores, core_mask); | |
207 | } | ||
208 | } | ||
209 | } | ||
210 | } else { | ||
211 |
2/2✓ Branch 1 taken 91 times.
✓ Branch 2 taken 610 times.
|
701 | if (cpu_is_occupied(owner, cpuid)) { |
212 |
1/2✓ Branch 0 taken 91 times.
✗ Branch 1 not taken.
|
91 | CPU_SET(cpuid, &shdata->occupied_cores); |
213 | } else { | ||
214 |
1/2✓ Branch 0 taken 610 times.
✗ Branch 1 not taken.
|
610 | CPU_CLR(cpuid, &shdata->occupied_cores); |
215 | } | ||
216 | } | ||
217 | 739 | } | |
218 | |||
219 | 279 | static pid_t find_new_guest(cpuinfo_t *cpuinfo) { | |
220 | 279 | pid_t new_guest = NOBODY; | |
221 |
2/2✓ Branch 0 taken 19 times.
✓ Branch 1 taken 260 times.
|
279 | if (cpuinfo->state == CPU_BUSY) { |
222 | /* If CPU is claimed, ignore requests and assign owner */ | ||
223 | 19 | new_guest = cpuinfo->owner; | |
224 |
2/2✓ Branch 0 taken 16 times.
✓ Branch 1 taken 244 times.
|
260 | } else if (shdata->flags.queues_enabled) { |
225 | /* Pop first PID in queue that is eligible for this CPU */ | ||
226 | 16 | for (pid_t *it = queue_pid_t_front(&cpuinfo->requests); | |
227 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 16 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
16 | it != NULL && new_guest == NOBODY; |
228 | ✗ | it = queue_pid_t_next(&cpuinfo->requests, it)) { | |
229 | ✗ | if (core_is_eligible(*it, cpuinfo->id)) { | |
230 | ✗ | new_guest = *it; | |
231 | ✗ | queue_pid_t_delete(&cpuinfo->requests, it); | |
232 | } | ||
233 | } | ||
234 | |||
235 | /* If CPU did noy have requests, pop global queue */ | ||
236 |
1/2✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
|
16 | if (new_guest == NOBODY) { |
237 | 16 | for (lewi_mask_request_t *it = | |
238 | 16 | queue_lewi_mask_request_t_front(&shdata->lewi_mask_requests); | |
239 |
3/4✓ Branch 0 taken 12 times.
✓ Branch 1 taken 16 times.
✓ Branch 2 taken 12 times.
✗ Branch 3 not taken.
|
28 | it != NULL && new_guest == NOBODY; |
240 | 12 | it = queue_lewi_mask_request_t_next(&shdata->lewi_mask_requests, it)) { | |
241 |
5/6✓ Branch 0 taken 12 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 8 times.
✓ Branch 3 taken 4 times.
✓ Branch 4 taken 8 times.
✓ Branch 5 taken 4 times.
|
12 | if (CPU_ISSET(cpuinfo->id, &it->allowed) |
242 |
1/2✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
|
8 | && core_is_eligible(it->pid, cpuinfo->id)) { |
243 | 8 | new_guest = it->pid; | |
244 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
|
8 | if (--(it->howmany) == 0) { |
245 | ✗ | queue_lewi_mask_request_t_delete(&shdata->lewi_mask_requests, it); | |
246 | } | ||
247 | } | ||
248 | } | ||
249 | } | ||
250 | } else { | ||
251 | /* No suitable guest */ | ||
252 | 244 | new_guest = NOBODY; | |
253 | } | ||
254 | 279 | return new_guest; | |
255 | } | ||
256 | |||
257 | |||
258 | /*********************************************************************************/ | ||
259 | /* Register / Deregister CPU */ | ||
260 | /*********************************************************************************/ | ||
261 | |||
262 | 329 | static void register_cpu(cpuinfo_t *cpuinfo, int pid, int preinit_pid) { | |
263 | |||
264 | /* Set basic fields */ | ||
265 | 329 | cpuinfo->owner = pid; | |
266 | 329 | cpuinfo->state = CPU_BUSY; | |
267 |
4/4✓ Branch 0 taken 23 times.
✓ Branch 1 taken 306 times.
✓ Branch 2 taken 21 times.
✓ Branch 3 taken 2 times.
|
329 | if (cpuinfo->guest == NOBODY || cpuinfo->guest == preinit_pid) { |
268 | 327 | cpuinfo->guest = pid; | |
269 | } | ||
270 |
1/2✓ Branch 0 taken 329 times.
✗ Branch 1 not taken.
|
329 | CPU_CLR(cpuinfo->id, &shdata->free_cpus); |
271 | |||
272 | /* Add or remove CPUs in core to the occupied cores set */ | ||
273 | 329 | update_occupied_cores(pid, cpuinfo->id); | |
274 | |||
275 | /* Clear requests queue */ | ||
276 | 329 | queue_pid_t_clear(&cpuinfo->requests); | |
277 | 329 | } | |
278 | |||
279 | 888 | static void deregister_cpu(cpuinfo_t *cpuinfo, int pid) { | |
280 | |||
281 | 888 | cpuid_t cpuid = cpuinfo->id; | |
282 | |||
283 |
2/2✓ Branch 0 taken 151 times.
✓ Branch 1 taken 737 times.
|
888 | if (cpuinfo->owner == pid) { |
284 | 151 | cpuinfo->owner = NOBODY; | |
285 |
2/2✓ Branch 0 taken 137 times.
✓ Branch 1 taken 14 times.
|
151 | if (cpuinfo->guest == pid) { |
286 | 137 | cpuinfo->guest = NOBODY; | |
287 | } | ||
288 |
3/4✓ Branch 0 taken 131 times.
✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 131 times.
|
151 | if (cpu_is_public_post_mortem || !respect_cpuset) { |
289 | 20 | cpuinfo->state = CPU_LENT; | |
290 |
1/2✓ Branch 0 taken 20 times.
✗ Branch 1 not taken.
|
20 | if (cpuinfo->guest == NOBODY) { |
291 |
1/2✓ Branch 0 taken 20 times.
✗ Branch 1 not taken.
|
20 | CPU_SET(cpuid, &shdata->free_cpus); |
292 | } | ||
293 | } else { | ||
294 | 131 | cpuinfo->state = CPU_DISABLED; | |
295 | 131 | queue_pid_t_clear(&cpuinfo->requests); | |
296 |
1/2✓ Branch 0 taken 131 times.
✗ Branch 1 not taken.
|
131 | CPU_CLR(cpuid, &shdata->free_cpus); |
297 | } | ||
298 | /* Clear all CPUs in core from the occupied */ | ||
299 | 151 | const cpu_set_t *core_mask = mu_get_core_mask(cpuinfo->id)->set; | |
300 | 151 | mu_substract(&shdata->occupied_cores, &shdata->occupied_cores, core_mask); | |
301 | } else { | ||
302 | // Free external CPUs that I may be using | ||
303 |
2/2✓ Branch 0 taken 23 times.
✓ Branch 1 taken 714 times.
|
737 | if (cpuinfo->guest == pid) { |
304 | 23 | cpuinfo->guest = NOBODY; | |
305 |
1/2✓ Branch 0 taken 23 times.
✗ Branch 1 not taken.
|
23 | CPU_SET(cpuid, &shdata->free_cpus); |
306 | } | ||
307 | |||
308 | // Remove any previous CPU request | ||
309 |
2/2✓ Branch 0 taken 163 times.
✓ Branch 1 taken 574 times.
|
737 | if (shdata->flags.queues_enabled) { |
310 | 163 | queue_pid_t_remove(&cpuinfo->requests, pid); | |
311 | } | ||
312 | } | ||
313 | 888 | } | |
314 | |||
315 | |||
316 | /*********************************************************************************/ | ||
317 | /* Init / Register */ | ||
318 | /*********************************************************************************/ | ||
319 | |||
320 | 1 | static void cleanup_shmem(void *shdata_ptr, int pid) { | |
321 | 1 | shdata_t *shared_data = shdata_ptr; | |
322 | int cpuid; | ||
323 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 1 times.
|
9 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
324 | 8 | cpuinfo_t *cpuinfo = &shared_data->node_info[cpuid]; | |
325 | 8 | deregister_cpu(cpuinfo, pid); | |
326 | } | ||
327 | 1 | } | |
328 | |||
329 | 118 | static void open_shmem(const char *shmem_key, int shmem_color) { | |
330 | 118 | pthread_mutex_lock(&mutex); | |
331 | { | ||
332 |
2/2✓ Branch 0 taken 75 times.
✓ Branch 1 taken 43 times.
|
118 | if (shm_handler == NULL) { |
333 | 75 | node_size = mu_get_system_size(); | |
334 | 150 | shm_handler = shmem_init((void**)&shdata, | |
335 | 75 | &(const shmem_props_t) { | |
336 | 75 | .size = shmem_cpuinfo__size(), | |
337 | .name = shmem_name, | ||
338 | .key = shmem_key, | ||
339 | .color = shmem_color, | ||
340 | .version = SHMEM_CPUINFO_VERSION, | ||
341 | .cleanup_fn = cleanup_shmem, | ||
342 | }); | ||
343 | 75 | subprocesses_attached = 1; | |
344 | } else { | ||
345 | 43 | ++subprocesses_attached; | |
346 | } | ||
347 | } | ||
348 | 118 | pthread_mutex_unlock(&mutex); | |
349 | 118 | } | |
350 | |||
351 | 93 | static void init_shmem(void) { | |
352 | // Initialize some values if this is the 1st process attached to the shmem | ||
353 |
2/2✓ Branch 0 taken 46 times.
✓ Branch 1 taken 47 times.
|
93 | if (!shdata->flags.initialized) { |
354 | 92 | shdata->flags = (const cpuinfo_flags_t) { | |
355 | .initialized = true, | ||
356 | 46 | .hw_has_smt = mu_system_has_smt(), | |
357 | }; | ||
358 | 46 | get_time(&shdata->initial_time); | |
359 | 46 | shdata->timestamp_cpu_lent = 0; | |
360 | |||
361 | /* Initialize helper cpu sets */ | ||
362 | 46 | CPU_ZERO(&shdata->free_cpus); | |
363 | 46 | CPU_ZERO(&shdata->occupied_cores); | |
364 | |||
365 | /* Initialize global requests */ | ||
366 | 46 | queue_lewi_mask_request_t_init(&shdata->lewi_mask_requests); | |
367 | |||
368 | /* Initialize CPU ids */ | ||
369 | struct timespec now; | ||
370 | 46 | get_time(&now); | |
371 | int cpuid; | ||
372 |
2/2✓ Branch 0 taken 516 times.
✓ Branch 1 taken 46 times.
|
562 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
373 | 1032 | shdata->node_info[cpuid] = (const cpuinfo_t) { | |
374 | .id = cpuid, | ||
375 | 516 | .core_id = mu_get_core_id(cpuid), | |
376 | }; | ||
377 | |||
378 | /* Initialize cpuinfo queue */ | ||
379 | 516 | queue_pid_t_init(&shdata->node_info[cpuid].requests); | |
380 | |||
381 | /* If registered CPU set is not respected, all CPUs start as | ||
382 | * available from the beginning */ | ||
383 |
2/2✓ Branch 0 taken 156 times.
✓ Branch 1 taken 360 times.
|
516 | if (!respect_cpuset) { |
384 | 156 | shdata->node_info[cpuid].state = CPU_LENT; | |
385 |
1/2✓ Branch 0 taken 156 times.
✗ Branch 1 not taken.
|
156 | CPU_SET(cpuid, &shdata->free_cpus); |
386 | } | ||
387 | } | ||
388 | } | ||
389 | 93 | } | |
390 | |||
391 | 93 | static int register_process(pid_t pid, pid_t preinit_pid, const cpu_set_t *mask, bool steal) { | |
392 |
2/2✓ Branch 1 taken 12 times.
✓ Branch 2 taken 81 times.
|
93 | if (CPU_COUNT(mask) == 0) return DLB_SUCCESS; |
393 | |||
394 |
2/2✓ Branch 0 taken 9 times.
✓ Branch 1 taken 72 times.
|
81 | verbose(VB_SHMEM, "Registering process %d with mask %s", pid, mu_to_str(mask)); |
395 | |||
396 |
2/2✓ Branch 0 taken 76 times.
✓ Branch 1 taken 5 times.
|
81 | if (!steal) { |
397 | // Check first that my mask is not already owned | ||
398 | 76 | for (int cpuid = mu_get_first_cpu(mask); | |
399 |
2/2✓ Branch 0 taken 327 times.
✓ Branch 1 taken 73 times.
|
400 | cpuid >= 0; |
400 | 324 | cpuid = mu_get_next_cpu(mask, cpuid)) { | |
401 | |||
402 | 327 | pid_t owner = shdata->node_info[cpuid].owner; | |
403 | |||
404 |
6/6✓ Branch 0 taken 25 times.
✓ Branch 1 taken 302 times.
✓ Branch 2 taken 15 times.
✓ Branch 3 taken 10 times.
✓ Branch 4 taken 3 times.
✓ Branch 5 taken 12 times.
|
327 | if (owner != NOBODY && owner != pid && owner != preinit_pid) { |
405 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | verbose(VB_SHMEM, |
406 | "Error registering CPU %d, already owned by %d", | ||
407 | cpuid, owner); | ||
408 | 3 | return DLB_ERR_PERM; | |
409 | } | ||
410 | } | ||
411 | } | ||
412 | |||
413 | // Register mask | ||
414 | 78 | for (int cpuid = mu_get_first_cpu(mask); | |
415 |
2/2✓ Branch 0 taken 329 times.
✓ Branch 1 taken 78 times.
|
407 | cpuid >= 0; |
416 | 329 | cpuid = mu_get_next_cpu(mask, cpuid)) { | |
417 | |||
418 | 329 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
419 | |||
420 |
5/6✓ Branch 0 taken 5 times.
✓ Branch 1 taken 324 times.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 4 times.
✓ Branch 4 taken 1 times.
✗ Branch 5 not taken.
|
329 | if (steal && cpuinfo->owner != NOBODY && cpuinfo->owner != pid) { |
421 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | verbose(VB_SHMEM, "Acquiring ownership of CPU %d", cpuid); |
422 | } | ||
423 | |||
424 | 329 | register_cpu(cpuinfo, pid, preinit_pid); | |
425 | } | ||
426 | |||
427 | 78 | return DLB_SUCCESS; | |
428 | } | ||
429 | |||
430 | 79 | int shmem_cpuinfo__init(pid_t pid, pid_t preinit_pid, const cpu_set_t *process_mask, | |
431 | const char *shmem_key, int shmem_color) { | ||
432 | 79 | int error = DLB_SUCCESS; | |
433 | |||
434 | // Update post_mortem preference | ||
435 |
4/4✓ Branch 0 taken 42 times.
✓ Branch 1 taken 37 times.
✓ Branch 2 taken 14 times.
✓ Branch 3 taken 28 times.
|
79 | if (thread_spd && thread_spd->options.debug_opts & DBG_LPOSTMORTEM) { |
436 | 14 | cpu_is_public_post_mortem = true; | |
437 | } | ||
438 | |||
439 | // Respect cpuset | ||
440 |
4/4✓ Branch 0 taken 42 times.
✓ Branch 1 taken 37 times.
✓ Branch 2 taken 15 times.
✓ Branch 3 taken 27 times.
|
79 | if (thread_spd && !thread_spd->options.lewi_respect_cpuset) { |
441 | 15 | respect_cpuset = false; | |
442 | 15 | cpu_is_public_post_mortem = true; | |
443 | } | ||
444 | |||
445 | // Shared memory creation | ||
446 | 79 | open_shmem(shmem_key, shmem_color); | |
447 | |||
448 | //cpu_set_t affinity_mask; | ||
449 | //mu_get_nodes_intersecting_with_cpuset(&affinity_mask, process_mask); | ||
450 | |||
451 | //DLB_INSTR( int idle_count = 0; ) | ||
452 | |||
453 | 79 | shmem_lock(shm_handler); | |
454 | { | ||
455 | // Initialize shared memory, if needed | ||
456 | 79 | init_shmem(); | |
457 | |||
458 | // Register process_mask, with stealing = false always in normal Init() | ||
459 | 79 | error = register_process(pid, preinit_pid, process_mask, /* steal */ false); | |
460 | } | ||
461 | 79 | shmem_unlock(shm_handler); | |
462 | |||
463 | // TODO mask info should go in shmem_procinfo. Print something else here? | ||
464 | //verbose( VB_SHMEM, "Process Mask: %s", mu_to_str(process_mask) ); | ||
465 | //verbose( VB_SHMEM, "Process Affinity Mask: %s", mu_to_str(&affinity_mask) ); | ||
466 | |||
467 | //add_event(IDLE_CPUS_EVENT, idle_count); | ||
468 | |||
469 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 76 times.
|
79 | if (error == DLB_ERR_PERM) { |
470 | 3 | warn_error(DLB_ERR_PERM); | |
471 | } | ||
472 | |||
473 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 76 times.
|
79 | if (error != DLB_SUCCESS) { |
474 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | verbose(VB_SHMEM, |
475 | "Error during shmem_cpuinfo initialization, finalizing shared memory"); | ||
476 | 3 | shmem_cpuinfo__finalize(pid, shmem_key, shmem_color); | |
477 | } | ||
478 | |||
479 | 79 | return error; | |
480 | } | ||
481 | |||
482 | 39 | int shmem_cpuinfo_ext__init(const char *shmem_key, int shmem_color) { | |
483 | 39 | open_shmem(shmem_key, shmem_color); | |
484 | 39 | return DLB_SUCCESS; | |
485 | } | ||
486 | |||
487 | 14 | int shmem_cpuinfo_ext__preinit(pid_t pid, const cpu_set_t *mask, dlb_drom_flags_t flags) { | |
488 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
|
14 | if (shm_handler == NULL) return DLB_ERR_NOSHMEM; |
489 | int error; | ||
490 | 14 | shmem_lock(shm_handler); | |
491 | { | ||
492 | // Initialize shared memory, if needed | ||
493 | 14 | init_shmem(); | |
494 | |||
495 | // Register process_mask, with stealing according to user arguments | ||
496 | 14 | error = register_process(pid, /* preinit_pid */ 0, mask, flags & DLB_STEAL_CPUS); | |
497 | } | ||
498 | 14 | shmem_unlock(shm_handler); | |
499 | |||
500 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
|
14 | if (error == DLB_ERR_PERM) { |
501 | ✗ | warn_error(DLB_ERR_PERM); | |
502 | } | ||
503 | |||
504 | 14 | return error; | |
505 | } | ||
506 | |||
507 | |||
508 | /*********************************************************************************/ | ||
509 | /* Finalize / Deregister */ | ||
510 | /*********************************************************************************/ | ||
511 | |||
512 | 118 | static void close_shmem(void) { | |
513 | 118 | pthread_mutex_lock(&mutex); | |
514 | { | ||
515 |
2/2✓ Branch 0 taken 75 times.
✓ Branch 1 taken 43 times.
|
118 | if (--subprocesses_attached == 0) { |
516 | 75 | shmem_finalize(shm_handler, is_shmem_empty); | |
517 | 75 | shm_handler = NULL; | |
518 | 75 | shdata = NULL; | |
519 | } | ||
520 | } | ||
521 | 118 | pthread_mutex_unlock(&mutex); | |
522 | 118 | } | |
523 | |||
524 | /* Even though the correct deregistration should be done through shmem_cpuinfo__deregister, | ||
525 | * this function is kept to allow deregistration from outside the LeWI_mask policy */ | ||
526 | 84 | static void deregister_process(pid_t pid) { | |
527 | int cpuid; | ||
528 |
2/2✓ Branch 0 taken 880 times.
✓ Branch 1 taken 84 times.
|
964 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
529 | 880 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
530 | 880 | deregister_cpu(cpuinfo, pid); | |
531 | } | ||
532 | |||
533 | // Remove any previous global request | ||
534 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 76 times.
|
84 | if (shdata->flags.queues_enabled) { |
535 | 8 | queue_lewi_mask_request_t_remove(&shdata->lewi_mask_requests, pid); | |
536 | } | ||
537 | 84 | } | |
538 | |||
539 | 89 | int shmem_cpuinfo__finalize(pid_t pid, const char *shmem_key, int shmem_color) { | |
540 |
2/2✓ Branch 0 taken 10 times.
✓ Branch 1 taken 79 times.
|
89 | if (shm_handler == NULL) { |
541 | /* cpuinfo_finalize may be called to finalize existing process | ||
542 | * even if the file descriptor is not opened. (DLB_PreInit + forc-exec case) */ | ||
543 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 10 times.
|
10 | if (shmem_exists(shmem_name, shmem_key)) { |
544 | ✗ | open_shmem(shmem_key, shmem_color); | |
545 | } else { | ||
546 | 10 | return DLB_ERR_NOSHMEM; | |
547 | } | ||
548 | } | ||
549 | |||
550 | //DLB_INSTR( int idle_count = 0; ) | ||
551 | |||
552 | // Lock the shmem to deregister CPUs | ||
553 | 79 | shmem_lock(shm_handler); | |
554 | { | ||
555 | 79 | deregister_process(pid); | |
556 | //DLB_INSTR( if (is_idle(cpuid)) idle_count++; ) | ||
557 | } | ||
558 | 79 | shmem_unlock(shm_handler); | |
559 | |||
560 | 79 | update_shmem_timestamp(); | |
561 | |||
562 | // Shared memory destruction | ||
563 | 79 | close_shmem(); | |
564 | |||
565 | //add_event(IDLE_CPUS_EVENT, idle_count); | ||
566 | |||
567 | 79 | return DLB_SUCCESS; | |
568 | } | ||
569 | |||
570 | 39 | int shmem_cpuinfo_ext__finalize(void) { | |
571 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 39 times.
|
39 | if (shm_handler == NULL) return DLB_ERR_NOSHMEM; |
572 | |||
573 | // Shared memory destruction | ||
574 | 39 | close_shmem(); | |
575 | |||
576 | 39 | return DLB_SUCCESS; | |
577 | } | ||
578 | |||
579 | 5 | int shmem_cpuinfo_ext__postfinalize(pid_t pid) { | |
580 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
5 | if (shm_handler == NULL) return DLB_ERR_NOSHMEM; |
581 | |||
582 | 5 | int error = DLB_SUCCESS; | |
583 | 5 | shmem_lock(shm_handler); | |
584 | { | ||
585 | 5 | deregister_process(pid); | |
586 | } | ||
587 | 5 | shmem_unlock(shm_handler); | |
588 | |||
589 | 5 | update_shmem_timestamp(); | |
590 | |||
591 | 5 | return error; | |
592 | } | ||
593 | |||
594 | |||
595 | /*********************************************************************************/ | ||
596 | /* Lend CPU */ | ||
597 | /*********************************************************************************/ | ||
598 | |||
599 | /* Add cpu_mask to the Shared Mask | ||
600 | * If the process originally owns the CPU: State => CPU_LENT | ||
601 | * If the process is currently using the CPU: Guest => NOBODY | ||
602 | */ | ||
603 | 296 | static void lend_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
604 | 296 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
605 | |||
606 |
2/2✓ Branch 0 taken 216 times.
✓ Branch 1 taken 80 times.
|
296 | if (cpuinfo->owner == pid) { |
607 | // If the CPU is owned by the process, just change the state | ||
608 | 216 | cpuinfo->state = CPU_LENT; | |
609 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 80 times.
|
80 | } else if (shdata->flags.queues_enabled) { |
610 | // Otherwise, remove any previous request | ||
611 | ✗ | queue_pid_t_remove(&cpuinfo->requests, pid); | |
612 | } | ||
613 | |||
614 | // If the process is the guest, free it | ||
615 |
2/2✓ Branch 0 taken 279 times.
✓ Branch 1 taken 17 times.
|
296 | if (cpuinfo->guest == pid) { |
616 | 279 | cpuinfo->guest = NOBODY; | |
617 | } | ||
618 | |||
619 | // If the CPU is free, find a new guest | ||
620 |
2/2✓ Branch 0 taken 279 times.
✓ Branch 1 taken 17 times.
|
296 | if (cpuinfo->guest == NOBODY) { |
621 | 279 | pid_t new_guest = find_new_guest(cpuinfo); | |
622 |
2/2✓ Branch 0 taken 27 times.
✓ Branch 1 taken 252 times.
|
279 | if (new_guest != NOBODY) { |
623 | 27 | cpuinfo->guest = new_guest; | |
624 | 27 | array_cpuinfo_task_t_push( | |
625 | tasks, | ||
626 | 27 | (const cpuinfo_task_t) { | |
627 | .action = ENABLE_CPU, | ||
628 | .pid = new_guest, | ||
629 | .cpuid = cpuid, | ||
630 | }); | ||
631 | |||
632 | // If SMT is enabled, this CPU could have been the last lent | ||
633 | // CPU in core, allowing find_new_guest to find new guests for | ||
634 | // the rest of CPUs in the core. Iterate the rest of cpus now: | ||
635 | 27 | const mu_cpuset_t *core_mask = mu_get_core_mask(cpuid); | |
636 | 27 | for (int cpuid_in_core = core_mask->first_cpuid; | |
637 |
2/2✓ Branch 0 taken 27 times.
✓ Branch 1 taken 27 times.
|
54 | cpuid_in_core >= 0; |
638 | 27 | cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) { | |
639 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
|
27 | if (cpuid_in_core != cpuid) { |
640 | ✗ | cpuinfo_t *cpuinfo_in_core = &shdata->node_info[cpuid_in_core]; | |
641 | ✗ | if (cpuinfo_in_core->guest == NOBODY) { | |
642 | ✗ | new_guest = find_new_guest(cpuinfo_in_core); | |
643 | ✗ | if (new_guest != NOBODY) { | |
644 | ✗ | cpuinfo_in_core->guest = new_guest; | |
645 | ✗ | array_cpuinfo_task_t_push( | |
646 | tasks, | ||
647 | ✗ | (const cpuinfo_task_t) { | |
648 | .action = ENABLE_CPU, | ||
649 | .pid = new_guest, | ||
650 | .cpuid = cpuid_in_core, | ||
651 | }); | ||
652 | ✗ | CPU_CLR(cpuid_in_core, &shdata->free_cpus); | |
653 | } | ||
654 | } | ||
655 | } | ||
656 | } | ||
657 | } | ||
658 | } | ||
659 | |||
660 | // Add CPU to the appropriate CPU sets | ||
661 |
2/2✓ Branch 0 taken 252 times.
✓ Branch 1 taken 44 times.
|
296 | if (cpuinfo->guest == NOBODY) { |
662 |
1/2✓ Branch 0 taken 252 times.
✗ Branch 1 not taken.
|
252 | CPU_SET(cpuid, &shdata->free_cpus); |
663 | } | ||
664 | |||
665 | // Add or remove CPUs in core to the occupied cores set | ||
666 | 296 | update_occupied_cores(cpuinfo->owner, cpuinfo->id); | |
667 | 296 | } | |
668 | |||
669 | 88 | int shmem_cpuinfo__lend_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
670 | |||
671 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 88 times.
|
88 | if (cpuid >= node_size) return DLB_ERR_PERM; |
672 | |||
673 | 88 | int error = DLB_SUCCESS; | |
674 | //DLB_DEBUG( cpu_set_t freed_cpus; ) | ||
675 | //DLB_DEBUG( cpu_set_t idle_cpus; ) | ||
676 | //DLB_DEBUG( CPU_ZERO( &freed_cpus ); ) | ||
677 | //DLB_DEBUG( CPU_ZERO( &idle_cpus ); ) | ||
678 | |||
679 | //DLB_INSTR( int idle_count = 0; ) | ||
680 | |||
681 | 88 | shmem_lock(shm_handler); | |
682 | { | ||
683 | 88 | lend_cpu(pid, cpuid, tasks); | |
684 | |||
685 | //// Look for Idle CPUs, only in DEBUG or INSTRUMENTATION | ||
686 | //int i; | ||
687 | //for (i = 0; i < node_size; i++) { | ||
688 | //if (is_idle(i)) { | ||
689 | //DLB_INSTR( idle_count++; ) | ||
690 | //DLB_DEBUG( CPU_SET(i, &idle_cpus); ) | ||
691 | //} | ||
692 | //} | ||
693 | } | ||
694 | 88 | shmem_unlock(shm_handler); | |
695 | |||
696 | 88 | update_shmem_timestamp(); | |
697 | |||
698 | //DLB_DEBUG( int size = CPU_COUNT(&freed_cpus); ) | ||
699 | //DLB_DEBUG( int post_size = CPU_COUNT(&idle_cpus); ) | ||
700 | //verbose(VB_SHMEM, "Lending %s", mu_to_str(&freed_cpus)); | ||
701 | //verbose(VB_SHMEM, "Increasing %d Idle Threads (%d now)", size, post_size); | ||
702 | //verbose(VB_SHMEM, "Available mask: %s", mu_to_str(&idle_cpus)); | ||
703 | |||
704 | //add_event(IDLE_CPUS_EVENT, idle_count); | ||
705 | 88 | return error; | |
706 | } | ||
707 | |||
708 | 42 | int shmem_cpuinfo__lend_cpu_mask(pid_t pid, const cpu_set_t *restrict mask, | |
709 | array_cpuinfo_task_t *restrict tasks) { | ||
710 | |||
711 | 42 | int error = DLB_SUCCESS; | |
712 | |||
713 | //DLB_DEBUG( cpu_set_t freed_cpus; ) | ||
714 | //DLB_DEBUG( cpu_set_t idle_cpus; ) | ||
715 | //DLB_DEBUG( CPU_ZERO( &freed_cpus ); ) | ||
716 | //DLB_DEBUG( CPU_ZERO( &idle_cpus ); ) | ||
717 | |||
718 | //DLB_INSTR( int idle_count = 0; ) | ||
719 | |||
720 | 42 | shmem_lock(shm_handler); | |
721 | { | ||
722 | 42 | for (int cpuid = mu_get_first_cpu(mask); | |
723 |
2/2✓ Branch 0 taken 139 times.
✓ Branch 1 taken 42 times.
|
181 | cpuid >= 0; |
724 | 139 | cpuid = mu_get_next_cpu(mask, cpuid)) { | |
725 | 139 | lend_cpu(pid, cpuid, tasks); | |
726 | |||
727 | //// Look for Idle CPUs, only in DEBUG or INSTRUMENTATION | ||
728 | //if (is_idle(cpuid)) { | ||
729 | //DLB_INSTR( idle_count++; ) | ||
730 | //DLB_DEBUG( CPU_SET(cpu, &idle_cpus); ) | ||
731 | //} | ||
732 | } | ||
733 | } | ||
734 | 42 | shmem_unlock(shm_handler); | |
735 | |||
736 | 42 | update_shmem_timestamp(); | |
737 | |||
738 | //DLB_DEBUG( int size = CPU_COUNT(&freed_cpus); ) | ||
739 | //DLB_DEBUG( int post_size = CPU_COUNT(&idle_cpus); ) | ||
740 | //verbose(VB_SHMEM, "Lending %s", mu_to_str(&freed_cpus)); | ||
741 | //verbose(VB_SHMEM, "Increasing %d Idle Threads (%d now)", size, post_size); | ||
742 | //verbose(VB_SHMEM, "Available mask: %s", mu_to_str(&idle_cpus)); | ||
743 | |||
744 | //add_event(IDLE_CPUS_EVENT, idle_count); | ||
745 | 42 | return error; | |
746 | } | ||
747 | |||
748 | |||
749 | /*********************************************************************************/ | ||
750 | /* Reclaim CPU */ | ||
751 | /*********************************************************************************/ | ||
752 | |||
753 | /* Recover CPU from the Shared Mask | ||
754 | * CPUs that owner == ME: State => CPU_BUSY | ||
755 | * CPUs that guest == NOBODY Guest => ME | ||
756 | */ | ||
757 | 237 | static int reclaim_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
758 | int error; | ||
759 | 237 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
760 |
2/2✓ Branch 0 taken 234 times.
✓ Branch 1 taken 3 times.
|
237 | if (cpuinfo->owner == pid) { |
761 | 234 | cpuinfo->state = CPU_BUSY; | |
762 |
2/2✓ Branch 0 taken 141 times.
✓ Branch 1 taken 93 times.
|
234 | if (cpuinfo->guest == pid) { |
763 | 141 | error = DLB_NOUPDT; | |
764 | } | ||
765 |
2/2✓ Branch 0 taken 53 times.
✓ Branch 1 taken 40 times.
|
93 | else if (cpuinfo->guest == NOBODY) { |
766 | /* The CPU was idle, acquire it */ | ||
767 | 53 | cpuinfo->guest = pid; | |
768 | 53 | array_cpuinfo_task_t_push( | |
769 | tasks, | ||
770 | 53 | (const cpuinfo_task_t) { | |
771 | .action = ENABLE_CPU, | ||
772 | .pid = pid, | ||
773 | .cpuid = cpuid, | ||
774 | }); | ||
775 |
1/2✓ Branch 0 taken 53 times.
✗ Branch 1 not taken.
|
53 | CPU_CLR(cpuid, &shdata->free_cpus); |
776 | 53 | error = DLB_SUCCESS; | |
777 | } else { | ||
778 | /* The CPU was guested, reclaim it */ | ||
779 | 40 | array_cpuinfo_task_t_push( | |
780 | tasks, | ||
781 | 40 | (const cpuinfo_task_t) { | |
782 | .action = DISABLE_CPU, | ||
783 | 40 | .pid = cpuinfo->guest, | |
784 | .cpuid = cpuid, | ||
785 | }); | ||
786 | 40 | array_cpuinfo_task_t_push( | |
787 | tasks, | ||
788 | 40 | (const cpuinfo_task_t) { | |
789 | .action = ENABLE_CPU, | ||
790 | .pid = pid, | ||
791 | .cpuid = cpuid, | ||
792 | }); | ||
793 | 40 | error = DLB_NOTED; | |
794 | } | ||
795 | } else { | ||
796 | 3 | error = DLB_ERR_PERM; | |
797 | } | ||
798 | |||
799 | 237 | return error; | |
800 | } | ||
801 | |||
802 | 1 | static int reclaim_core(pid_t pid, cpuid_t core_id, | |
803 | array_cpuinfo_task_t *restrict tasks, | ||
804 | unsigned int *num_reclaimed) { | ||
805 | |||
806 | 1 | int error = DLB_NOUPDT; | |
807 | 1 | *num_reclaimed = 0; | |
808 | |||
809 | 1 | const mu_cpuset_t *core_mask = mu_get_core_mask_by_coreid(core_id); | |
810 | 1 | for (int cpuid_in_core = core_mask->first_cpuid; | |
811 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
|
2 | cpuid_in_core >= 0; |
812 | 1 | cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) { | |
813 | 1 | int local_error = reclaim_cpu(pid, cpuid_in_core, tasks); | |
814 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
1 | if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) { |
815 | 1 | ++(*num_reclaimed); | |
816 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | if (error != DLB_NOTED) { |
817 | 1 | error = local_error; | |
818 | } | ||
819 | ✗ | } else if (shdata->node_info[cpuid_in_core].guest == pid) { | |
820 | /* already guested, continue */ | ||
821 | } else { | ||
822 | /* could not be borrowed for other reason, stop iterating */ | ||
823 | ✗ | break; | |
824 | } | ||
825 | } | ||
826 | |||
827 | 1 | return error; | |
828 | } | ||
829 | |||
830 | 11 | int shmem_cpuinfo__reclaim_all(pid_t pid, array_cpuinfo_task_t *restrict tasks) { | |
831 | 11 | int error = DLB_NOUPDT; | |
832 | 11 | shmem_lock(shm_handler); | |
833 | { | ||
834 | cpu_set_t cpus_to_reclaim; | ||
835 |
2/2✓ Branch 0 taken 176 times.
✓ Branch 1 taken 11 times.
|
187 | CPU_OR(&cpus_to_reclaim, &shdata->free_cpus, &shdata->occupied_cores); |
836 | |||
837 | 11 | for (int cpuid = mu_get_first_cpu(&cpus_to_reclaim); | |
838 |
2/2✓ Branch 0 taken 30 times.
✓ Branch 1 taken 11 times.
|
41 | cpuid >= 0; |
839 | 30 | cpuid = mu_get_next_cpu(&cpus_to_reclaim, cpuid)) { | |
840 |
1/2✓ Branch 0 taken 30 times.
✗ Branch 1 not taken.
|
30 | if (shdata->node_info[cpuid].owner == pid |
841 |
1/2✓ Branch 0 taken 30 times.
✗ Branch 1 not taken.
|
30 | && shdata->node_info[cpuid].guest != pid) { |
842 | 30 | int local_error = reclaim_cpu(pid, cpuid, tasks); | |
843 |
2/5✓ Branch 0 taken 15 times.
✓ Branch 1 taken 15 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
|
30 | switch(local_error) { |
844 | 15 | case DLB_NOTED: | |
845 | // max priority, always overwrite | ||
846 | 15 | error = DLB_NOTED; | |
847 | 15 | break; | |
848 | 15 | case DLB_SUCCESS: | |
849 | // medium priority, only update if error is in lowest priority | ||
850 | 15 | error = (error == DLB_NOTED) ? DLB_NOTED : DLB_SUCCESS; | |
851 | 15 | break; | |
852 | ✗ | case DLB_NOUPDT: | |
853 | // lowest priority, default value | ||
854 | ✗ | break; | |
855 | ✗ | case DLB_ERR_PERM: | |
856 | // ignore | ||
857 | ✗ | break; | |
858 | } | ||
859 | } | ||
860 | } | ||
861 | } | ||
862 | 11 | shmem_unlock(shm_handler); | |
863 | 11 | return error; | |
864 | } | ||
865 | |||
866 | 12 | int shmem_cpuinfo__reclaim_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
867 | |||
868 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 11 times.
|
12 | if (cpuid >= node_size) return DLB_ERR_PERM; |
869 | |||
870 | int error; | ||
871 | //DLB_DEBUG( cpu_set_t recovered_cpus; ) | ||
872 | //DLB_DEBUG( cpu_set_t idle_cpus; ) | ||
873 | //DLB_DEBUG( CPU_ZERO(&recovered_cpus); ) | ||
874 | //DLB_DEBUG( CPU_ZERO(&idle_cpus); ) | ||
875 | |||
876 | //DLB_INSTR( int idle_count = 0; ) | ||
877 | |||
878 | 11 | shmem_lock(shm_handler); | |
879 | { | ||
880 | 11 | error = reclaim_cpu(pid, cpuid, tasks); | |
881 | |||
882 | /* If the CPU was actually reclaimed and SMT is enabled, the rest of | ||
883 | * the CPUs in the core need to be disabled. | ||
884 | * Note that we are not changing the CPU state to BUSY because the owner | ||
885 | * still has not reclaim it. */ | ||
886 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 5 times.
|
11 | if (error == DLB_NOTED |
887 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 5 times.
|
6 | && shdata->flags.hw_has_smt) { |
888 | 1 | const mu_cpuset_t *core_mask = mu_get_core_mask(cpuid); | |
889 | 1 | for (int cpuid_in_core = core_mask->first_cpuid; | |
890 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 1 times.
|
5 | cpuid_in_core >= 0; |
891 | 4 | cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) { | |
892 |
2/2✓ Branch 0 taken 3 times.
✓ Branch 1 taken 1 times.
|
4 | if (cpuid_in_core != cpuid) { |
893 | 3 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid_in_core]; | |
894 |
1/2✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
|
3 | if (cpuinfo->guest != pid) { |
895 | 3 | array_cpuinfo_task_t_push( | |
896 | tasks, | ||
897 | 3 | (const cpuinfo_task_t) { | |
898 | .action = DISABLE_CPU, | ||
899 | 3 | .pid = cpuinfo->guest, | |
900 | .cpuid = cpuid_in_core, | ||
901 | }); | ||
902 | } | ||
903 | } | ||
904 | } | ||
905 | } | ||
906 | |||
907 | // if (!error) //DLB_DEBUG( CPU_SET(cpu, &recovered_cpus); ) | ||
908 | |||
909 | // Look for Idle CPUs, only in DEBUG or INSTRUMENTATION | ||
910 | //if (is_idle(cpu)) { | ||
911 | //DLB_INSTR( idle_count++; ) | ||
912 | //DLB_DEBUG( CPU_SET(cpu, &idle_cpus); ) | ||
913 | //} | ||
914 | } | ||
915 | 11 | shmem_unlock(shm_handler); | |
916 | |||
917 | //DLB_DEBUG( int recovered = CPU_COUNT(&recovered_cpus); ) | ||
918 | //DLB_DEBUG( int post_size = CPU_COUNT(&idle_cpus); ) | ||
919 | //verbose(VB_SHMEM, "Decreasing %d Idle Threads (%d now)", recovered, post_size); | ||
920 | //verbose(VB_SHMEM, "Available mask: %s", mu_to_str(&idle_cpus)); | ||
921 | |||
922 | //add_event(IDLE_CPUS_EVENT, idle_count); | ||
923 | 11 | return error; | |
924 | } | ||
925 | |||
926 | 1 | int shmem_cpuinfo__reclaim_cpus(pid_t pid, int ncpus, array_cpuinfo_task_t *restrict tasks) { | |
927 | 1 | int error = DLB_NOUPDT; | |
928 | //DLB_DEBUG( cpu_set_t idle_cpus; ) | ||
929 | //DLB_DEBUG( CPU_ZERO(&idle_cpus); ) | ||
930 | |||
931 | //DLB_INSTR( int idle_count = 0; ) | ||
932 | |||
933 | //cpu_set_t recovered_cpus; | ||
934 | //CPU_ZERO(&recovered_cpus); | ||
935 | |||
936 | 1 | shmem_lock(shm_handler); | |
937 | { | ||
938 | 1 | int num_cores = mu_get_num_cores(); | |
939 |
3/4✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 1 times.
|
2 | for (int core_id = 0; core_id < num_cores && ncpus>0; ++core_id) { |
940 | unsigned int num_reclaimed; | ||
941 | 1 | int local_error = reclaim_core(pid, core_id, tasks, &num_reclaimed); | |
942 |
1/5✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
|
1 | switch(local_error) { |
943 | ✗ | case DLB_NOTED: | |
944 | // max priority, always overwrite | ||
945 | ✗ | error = DLB_NOTED; | |
946 | ✗ | ncpus -= num_reclaimed; | |
947 | ✗ | break; | |
948 | 1 | case DLB_SUCCESS: | |
949 | // medium priority, only update if error is in lowest priority | ||
950 | 1 | error = (error == DLB_NOTED) ? DLB_NOTED : DLB_SUCCESS; | |
951 | 1 | ncpus -= num_reclaimed; | |
952 | 1 | break; | |
953 | ✗ | case DLB_NOUPDT: | |
954 | // lowest priority, default value | ||
955 | ✗ | break; | |
956 | ✗ | case DLB_ERR_PERM: | |
957 | // ignore | ||
958 | ✗ | break; | |
959 | } | ||
960 | // Look for Idle CPUs, only in DEBUG or INSTRUMENTATION | ||
961 | //if (is_idle(cpu)) { | ||
962 | //DLB_INSTR( idle_count++; ) | ||
963 | //DLB_DEBUG( CPU_SET(cpu, &idle_cpus); ) | ||
964 | //} | ||
965 | } | ||
966 | } | ||
967 | 1 | shmem_unlock(shm_handler); | |
968 | |||
969 | //DLB_DEBUG( int recovered = CPU_COUNT(&recovered_cpus); ) | ||
970 | //DLB_DEBUG( int post_size = CPU_COUNT(&idle_cpus); ) | ||
971 | //verbose(VB_SHMEM, "Decreasing %d Idle Threads (%d now)", recovered, post_size); | ||
972 | //verbose(VB_SHMEM, "Available mask: %s", mu_to_str(&idle_cpus)); | ||
973 | |||
974 | //add_event(IDLE_CPUS_EVENT, idle_count); | ||
975 | 1 | return error; | |
976 | } | ||
977 | |||
978 | 18 | int shmem_cpuinfo__reclaim_cpu_mask(pid_t pid, const cpu_set_t *restrict mask, | |
979 | array_cpuinfo_task_t *restrict tasks) { | ||
980 | 18 | int error = DLB_NOUPDT; | |
981 | 18 | shmem_lock(shm_handler); | |
982 | { | ||
983 | cpu_set_t cpus_to_reclaim; | ||
984 |
2/2✓ Branch 0 taken 288 times.
✓ Branch 1 taken 18 times.
|
306 | CPU_OR(&cpus_to_reclaim, &shdata->free_cpus, &shdata->occupied_cores); |
985 |
2/2✓ Branch 0 taken 288 times.
✓ Branch 1 taken 18 times.
|
306 | CPU_AND(&cpus_to_reclaim, &cpus_to_reclaim, mask); |
986 | |||
987 | 18 | for (int cpuid = mu_get_first_cpu(&cpus_to_reclaim); | |
988 |
3/4✓ Branch 0 taken 51 times.
✓ Branch 1 taken 18 times.
✓ Branch 2 taken 51 times.
✗ Branch 3 not taken.
|
69 | cpuid >= 0 && cpuid < node_size; |
989 | 51 | cpuid = mu_get_next_cpu(&cpus_to_reclaim, cpuid)) { | |
990 | 51 | int local_error = reclaim_cpu(pid, cpuid, tasks); | |
991 |
3/5✓ Branch 0 taken 2 times.
✓ Branch 1 taken 18 times.
✓ Branch 2 taken 31 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
|
51 | switch(local_error) { |
992 | 2 | case DLB_ERR_PERM: | |
993 | // max priority, always overwrite | ||
994 | 2 | error = DLB_ERR_PERM; | |
995 | 2 | break; | |
996 | 18 | case DLB_NOTED: | |
997 | // max priority unless there was a previous error | ||
998 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 18 times.
|
18 | error = error < 0 ? error : DLB_NOTED; |
999 | 18 | break; | |
1000 | 31 | case DLB_SUCCESS: | |
1001 | // medium priority, only update if error is in lowest priority | ||
1002 |
2/2✓ Branch 0 taken 23 times.
✓ Branch 1 taken 8 times.
|
31 | error = (error == DLB_NOUPDT) ? DLB_SUCCESS : error; |
1003 | 31 | break; | |
1004 | ✗ | case DLB_NOUPDT: | |
1005 | // lowest priority, default value | ||
1006 | ✗ | break; | |
1007 | } | ||
1008 | } | ||
1009 | } | ||
1010 | 18 | shmem_unlock(shm_handler); | |
1011 | 18 | return error; | |
1012 | } | ||
1013 | |||
1014 | |||
1015 | /*********************************************************************************/ | ||
1016 | /* Acquire CPU */ | ||
1017 | /*********************************************************************************/ | ||
1018 | |||
1019 | /* Acquire CPU | ||
1020 | * If successful: Guest => ME | ||
1021 | */ | ||
1022 | 122 | static int acquire_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
1023 | int error; | ||
1024 | 122 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1025 | |||
1026 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 121 times.
|
122 | if (cpuinfo->guest == pid) { |
1027 | // CPU already guested | ||
1028 | 1 | error = DLB_NOUPDT; | |
1029 |
2/2✓ Branch 0 taken 88 times.
✓ Branch 1 taken 33 times.
|
121 | } else if (cpuinfo->owner == pid) { |
1030 | // CPU is owned by the process | ||
1031 | 88 | cpuinfo->state = CPU_BUSY; | |
1032 |
2/2✓ Branch 0 taken 76 times.
✓ Branch 1 taken 12 times.
|
88 | if (cpuinfo->guest == NOBODY) { |
1033 | // CPU empty | ||
1034 | 76 | cpuinfo->guest = pid; | |
1035 | 76 | array_cpuinfo_task_t_push( | |
1036 | tasks, | ||
1037 | 76 | (const cpuinfo_task_t) { | |
1038 | .action = ENABLE_CPU, | ||
1039 | .pid = pid, | ||
1040 | .cpuid = cpuid, | ||
1041 | }); | ||
1042 |
1/2✓ Branch 0 taken 76 times.
✗ Branch 1 not taken.
|
76 | CPU_CLR(cpuid, &shdata->free_cpus); |
1043 | 76 | error = DLB_SUCCESS; | |
1044 | } else { | ||
1045 | // CPU needs to be reclaimed | ||
1046 | 12 | array_cpuinfo_task_t_push( | |
1047 | tasks, | ||
1048 | 12 | (const cpuinfo_task_t) { | |
1049 | .action = DISABLE_CPU, | ||
1050 | 12 | .pid = cpuinfo->guest, | |
1051 | .cpuid = cpuid, | ||
1052 | }); | ||
1053 | 12 | array_cpuinfo_task_t_push( | |
1054 | tasks, | ||
1055 | 12 | (const cpuinfo_task_t) { | |
1056 | .action = ENABLE_CPU, | ||
1057 | .pid = pid, | ||
1058 | .cpuid = cpuid, | ||
1059 | }); | ||
1060 | |||
1061 |
3/6✓ Branch 0 taken 12 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 12 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 12 times.
|
12 | if (!CPU_ISSET(cpuid, &shdata->occupied_cores)) { |
1062 | ✗ | update_occupied_cores(cpuinfo->owner, cpuinfo->id); | |
1063 | } | ||
1064 | |||
1065 | 12 | error = DLB_NOTED; | |
1066 | } | ||
1067 |
2/2✓ Branch 0 taken 25 times.
✓ Branch 1 taken 8 times.
|
33 | } else if (cpuinfo->guest == NOBODY |
1068 |
2/2✓ Branch 0 taken 23 times.
✓ Branch 1 taken 2 times.
|
25 | && cpuinfo->state == CPU_LENT |
1069 |
1/2✓ Branch 1 taken 23 times.
✗ Branch 2 not taken.
|
23 | && core_is_eligible(pid, cpuid)) { |
1070 | // CPU is available | ||
1071 | 23 | cpuinfo->guest = pid; | |
1072 | 23 | array_cpuinfo_task_t_push( | |
1073 | tasks, | ||
1074 | 23 | (const cpuinfo_task_t) { | |
1075 | .action = ENABLE_CPU, | ||
1076 | .pid = pid, | ||
1077 | .cpuid = cpuid, | ||
1078 | }); | ||
1079 | |||
1080 |
2/2✓ Branch 0 taken 16 times.
✓ Branch 1 taken 7 times.
|
23 | if (cpuinfo->owner != NOBODY |
1081 |
3/6✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 16 times.
✓ Branch 4 taken 16 times.
✗ Branch 5 not taken.
|
16 | && !CPU_ISSET(cpuid, &shdata->occupied_cores)) { |
1082 | 16 | update_occupied_cores(cpuinfo->owner, cpuinfo->id); | |
1083 | } | ||
1084 | |||
1085 |
1/2✓ Branch 0 taken 23 times.
✗ Branch 1 not taken.
|
23 | CPU_CLR(cpuid, &shdata->free_cpus); |
1086 | |||
1087 | 23 | error = DLB_SUCCESS; | |
1088 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
|
10 | } else if (cpuinfo->state != CPU_DISABLED) { |
1089 | // CPU is busy, or lent to another process | ||
1090 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 6 times.
|
8 | if (shdata->flags.queues_enabled) { |
1091 | /* Queue petition */ | ||
1092 |
1/2✓ Branch 1 taken 2 times.
✗ Branch 2 not taken.
|
2 | if (queue_pid_t_enqueue(&cpuinfo->requests, pid) == 0) { |
1093 | 2 | error = DLB_NOTED; | |
1094 | } else { | ||
1095 | ✗ | error = DLB_ERR_REQST; | |
1096 | } | ||
1097 | } else { | ||
1098 | 6 | error = DLB_NOUPDT; | |
1099 | } | ||
1100 | } else { | ||
1101 | // CPU is disabled | ||
1102 | 2 | error = DLB_ERR_PERM; | |
1103 | } | ||
1104 | |||
1105 | 122 | return error; | |
1106 | } | ||
1107 | |||
1108 | ✗ | static int acquire_core(pid_t pid, cpuid_t core_id, | |
1109 | array_cpuinfo_task_t *restrict tasks, | ||
1110 | unsigned int *num_acquired) { | ||
1111 | |||
1112 | ✗ | int error = DLB_NOUPDT; | |
1113 | ✗ | *num_acquired = 0; | |
1114 | |||
1115 | ✗ | const mu_cpuset_t *core_mask = mu_get_core_mask_by_coreid(core_id); | |
1116 | ✗ | for (int cpuid_in_core = core_mask->first_cpuid; | |
1117 | ✗ | cpuid_in_core >= 0; | |
1118 | ✗ | cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) { | |
1119 | ✗ | int local_error = acquire_cpu(pid, cpuid_in_core, tasks); | |
1120 | ✗ | if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) { | |
1121 | ✗ | ++(*num_acquired); | |
1122 | ✗ | if (error != DLB_NOTED) { | |
1123 | ✗ | error = local_error; | |
1124 | } | ||
1125 | ✗ | } else if (shdata->node_info[cpuid_in_core].guest == pid) { | |
1126 | /* already guested, continue */ | ||
1127 | } else { | ||
1128 | /* could not be borrowed for other reason, stop iterating */ | ||
1129 | ✗ | break; | |
1130 | } | ||
1131 | } | ||
1132 | |||
1133 | ✗ | return error; | |
1134 | } | ||
1135 | |||
1136 | 174 | static int acquire_cpus_in_array_cpuid_t(pid_t pid, | |
1137 | const array_cpuid_t *restrict array_cpuid, | ||
1138 | int *restrict ncpus, array_cpuinfo_task_t *restrict tasks) { | ||
1139 | |||
1140 | 174 | int error = DLB_NOUPDT; | |
1141 |
2/2✓ Branch 0 taken 172 times.
✓ Branch 1 taken 2 times.
|
174 | int _ncpus = ncpus != NULL ? *ncpus : INT_MAX; |
1142 | 174 | const bool hw_has_smt = shdata->flags.hw_has_smt; | |
1143 | |||
1144 | /* Acquire all CPUs in core if possible | ||
1145 | * (there is a high chance that consecutive CPUs belong to the same core, | ||
1146 | * try to skip those ones) */ | ||
1147 | 174 | int prev_core_id = -1; | |
1148 | 174 | for (unsigned int i = 0; | |
1149 |
4/4✓ Branch 0 taken 226 times.
✓ Branch 1 taken 19 times.
✓ Branch 2 taken 71 times.
✓ Branch 3 taken 155 times.
|
245 | _ncpus > 0 && i < array_cpuid->count; |
1150 | 71 | ++i) { | |
1151 | |||
1152 | 71 | cpuid_t cpuid = array_cpuid->items[i]; | |
1153 | int local_error; | ||
1154 | |||
1155 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 71 times.
|
71 | if (hw_has_smt) { |
1156 | ✗ | cpuid_t core_id = shdata->node_info[cpuid].core_id; | |
1157 | ✗ | if (prev_core_id != core_id) { | |
1158 | unsigned int num_acquired; | ||
1159 | ✗ | local_error = acquire_core(pid, core_id, tasks, &num_acquired); | |
1160 | ✗ | if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) { | |
1161 | ✗ | _ncpus -= num_acquired; | |
1162 | ✗ | if (error != DLB_NOTED) error = local_error; | |
1163 | } | ||
1164 | ✗ | prev_core_id = core_id; | |
1165 | } | ||
1166 | } else { | ||
1167 | 71 | local_error = acquire_cpu(pid, cpuid, tasks); | |
1168 |
3/4✓ Branch 0 taken 8 times.
✓ Branch 1 taken 63 times.
✓ Branch 2 taken 8 times.
✗ Branch 3 not taken.
|
71 | if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) { |
1169 | 71 | --_ncpus; | |
1170 |
2/2✓ Branch 0 taken 69 times.
✓ Branch 1 taken 2 times.
|
71 | if (error != DLB_NOTED) error = local_error; |
1171 | } | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1175 |
2/2✓ Branch 0 taken 172 times.
✓ Branch 1 taken 2 times.
|
174 | if (ncpus != NULL) { |
1176 | 172 | *ncpus = _ncpus; | |
1177 | } | ||
1178 | |||
1179 | 174 | return error; | |
1180 | } | ||
1181 | |||
1182 | 52 | int shmem_cpuinfo__acquire_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
1183 | |||
1184 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 51 times.
|
52 | if (cpuid >= node_size) return DLB_ERR_PERM; |
1185 | |||
1186 | int error; | ||
1187 | 51 | shmem_lock(shm_handler); | |
1188 | { | ||
1189 | 51 | error = acquire_cpu(pid, cpuid, tasks); | |
1190 | } | ||
1191 | 51 | shmem_unlock(shm_handler); | |
1192 | 51 | return error; | |
1193 | } | ||
1194 | |||
1195 | /* Simplification of shmem_cpuinfo__acquire_ncpus_from_cpu_subset when we just | ||
1196 | * want to iterate all CPUs in set. | ||
1197 | * This function is intended to be called when leaving a blocking call and the | ||
1198 | * process knows that the provided CPUs were previously lent and need to be | ||
1199 | * reclaimed. | ||
1200 | * | ||
1201 | * PRE: all CPUS are owned */ | ||
1202 | 2 | int shmem_cpuinfo__acquire_from_cpu_subset( | |
1203 | pid_t pid, | ||
1204 | const array_cpuid_t *restrict array_cpuid, | ||
1205 | array_cpuinfo_task_t *restrict tasks) { | ||
1206 | |||
1207 | int error; | ||
1208 | 2 | shmem_lock(shm_handler); | |
1209 | { | ||
1210 | 2 | error = acquire_cpus_in_array_cpuid_t(pid, array_cpuid, NULL, tasks); | |
1211 | } | ||
1212 | 2 | shmem_unlock(shm_handler); | |
1213 | 2 | return error; | |
1214 | } | ||
1215 | |||
1216 | static int borrow_cpus_in_array_cpuid_t(pid_t pid, | ||
1217 | const array_cpuid_t *restrict array_cpuid, | ||
1218 | int *restrict ncpus, array_cpuinfo_task_t *restrict tasks); | ||
1219 | |||
1220 | 94 | int shmem_cpuinfo__acquire_ncpus_from_cpu_subset( | |
1221 | pid_t pid, int *restrict requested_ncpus, | ||
1222 | const array_cpuid_t *restrict cpus_priority_array, | ||
1223 | lewi_affinity_t lewi_affinity, int max_parallelism, | ||
1224 | int64_t *restrict last_borrow, array_cpuinfo_task_t *restrict tasks) { | ||
1225 | |||
1226 | /* Return immediately if requested_ncpus is present and not greater than zero */ | ||
1227 |
3/4✓ Branch 0 taken 63 times.
✓ Branch 1 taken 31 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 63 times.
|
94 | if (requested_ncpus && *requested_ncpus <= 0) { |
1228 | ✗ | return DLB_NOUPDT; | |
1229 | } | ||
1230 | |||
1231 | /* Return immediately if there is nothing left to acquire */ | ||
1232 | /* 1) If the timestamp of the last unsuccessful borrow is newer than the last CPU lent */ | ||
1233 |
4/4✓ Branch 0 taken 93 times.
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 8 times.
✓ Branch 3 taken 85 times.
|
94 | if (last_borrow && *last_borrow > DLB_ATOMIC_LD_ACQ(&shdata->timestamp_cpu_lent)) { |
1234 | /* 2) Unless there's an owned CPUs not guested, in that case we will acquire anyway */ | ||
1235 | 8 | bool all_owned_cpus_are_guested = true; | |
1236 |
1/2✓ Branch 0 taken 32 times.
✗ Branch 1 not taken.
|
32 | for (unsigned int i=0; i<cpus_priority_array->count; ++i) { |
1237 | 32 | cpuid_t cpuid = cpus_priority_array->items[i]; | |
1238 | 32 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1239 | /* Iterate until the first not owned CPU */ | ||
1240 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 24 times.
|
32 | if (cpuinfo->owner != pid) break; |
1241 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 24 times.
|
24 | if (cpuinfo->guest != pid) { |
1242 | /* This CPU is owned and not guested, no need to iterate anymore */ | ||
1243 | ✗ | all_owned_cpus_are_guested = false; | |
1244 | ✗ | break; | |
1245 | } | ||
1246 | } | ||
1247 |
1/2✓ Branch 0 taken 8 times.
✗ Branch 1 not taken.
|
8 | if (all_owned_cpus_are_guested) { |
1248 | 8 | return DLB_NOUPDT; | |
1249 | } | ||
1250 | } | ||
1251 | |||
1252 | /* Return immediately if the process has reached the max_parallelism */ | ||
1253 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 86 times.
|
86 | if (max_parallelism != 0) { |
1254 | ✗ | for (unsigned int i=0; i<cpus_priority_array->count; ++i) { | |
1255 | ✗ | cpuid_t cpuid = cpus_priority_array->items[i]; | |
1256 | ✗ | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1257 | ✗ | if (cpuinfo->guest == pid) { | |
1258 | ✗ | --max_parallelism; | |
1259 | } | ||
1260 | } | ||
1261 | ✗ | if (max_parallelism <= 0) { | |
1262 | ✗ | return DLB_NOUPDT; | |
1263 | } | ||
1264 | } | ||
1265 | |||
1266 | /* Compute the max number of CPUs to acquire */ | ||
1267 |
2/2✓ Branch 0 taken 55 times.
✓ Branch 1 taken 31 times.
|
86 | int ncpus = requested_ncpus ? *requested_ncpus : node_size; |
1268 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 86 times.
|
86 | if (max_parallelism > 0) { |
1269 | ✗ | ncpus = min_int(ncpus, max_parallelism); | |
1270 | } | ||
1271 | |||
1272 | /* Arrays for temporary CPU priority (lazy initialized and always used within the lock) */ | ||
1273 | static array_cpuid_t owned_idle = {}; | ||
1274 | static array_cpuid_t owned_non_idle = {}; | ||
1275 | static array_cpuid_t non_owned = {}; | ||
1276 | |||
1277 | 86 | int error = DLB_NOUPDT; | |
1278 | 86 | shmem_lock(shm_handler); | |
1279 | { | ||
1280 | /* Lazy init first time, clear afterwards */ | ||
1281 |
2/2✓ Branch 0 taken 75 times.
✓ Branch 1 taken 11 times.
|
86 | if (likely(owned_idle.items != NULL)) { |
1282 | 75 | array_cpuid_t_clear(&owned_idle); | |
1283 | 75 | array_cpuid_t_clear(&owned_non_idle); | |
1284 | 75 | array_cpuid_t_clear(&non_owned); | |
1285 | } else { | ||
1286 | 11 | array_cpuid_t_init(&owned_idle, node_size); | |
1287 | 11 | array_cpuid_t_init(&owned_non_idle, node_size); | |
1288 | 11 | array_cpuid_t_init(&non_owned, node_size); | |
1289 | } | ||
1290 | |||
1291 | /* Iterate cpus_priority_array and construct all sub-arrays */ | ||
1292 |
2/2✓ Branch 0 taken 562 times.
✓ Branch 1 taken 86 times.
|
648 | for (unsigned int i = 0; i < cpus_priority_array->count; ++i) { |
1293 | 562 | cpuid_t cpuid = cpus_priority_array->items[i]; | |
1294 | 562 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1295 |
2/2✓ Branch 0 taken 187 times.
✓ Branch 1 taken 375 times.
|
562 | if (cpuinfo->owner == pid) { |
1296 |
2/2✓ Branch 0 taken 62 times.
✓ Branch 1 taken 125 times.
|
187 | if (cpuinfo->guest == NOBODY) { |
1297 | 62 | array_cpuid_t_push(&owned_idle, cpuid); | |
1298 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 117 times.
|
125 | } else if (cpuinfo->guest != pid) { |
1299 | 8 | array_cpuid_t_push(&owned_non_idle, cpuid); | |
1300 | } | ||
1301 |
2/2✓ Branch 0 taken 271 times.
✓ Branch 1 taken 104 times.
|
375 | } else if (cpuinfo->guest == NOBODY) { |
1302 | 271 | array_cpuid_t_push(&non_owned, cpuid); | |
1303 | } | ||
1304 | } | ||
1305 | |||
1306 | /* Acquire first owned CPUs that are IDLE */ | ||
1307 | 86 | int local_error = acquire_cpus_in_array_cpuid_t(pid, &owned_idle, &ncpus, tasks); | |
1308 |
3/4✓ Branch 0 taken 57 times.
✓ Branch 1 taken 29 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 57 times.
|
86 | if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) { |
1309 | /* Update error code if needed */ | ||
1310 |
1/2✓ Branch 0 taken 29 times.
✗ Branch 1 not taken.
|
29 | if (error != DLB_NOTED) error = local_error; |
1311 | } | ||
1312 | |||
1313 | /* Acquire the rest of owned CPUs */ | ||
1314 | 86 | local_error = acquire_cpus_in_array_cpuid_t(pid, &owned_non_idle, &ncpus, tasks); | |
1315 |
3/4✓ Branch 0 taken 86 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 5 times.
✓ Branch 3 taken 81 times.
|
86 | if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) { |
1316 | /* Update error code if needed */ | ||
1317 |
1/2✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
|
5 | if (error != DLB_NOTED) error = local_error; |
1318 | } | ||
1319 | |||
1320 | /* Borrow non-owned CPUs */ | ||
1321 | 86 | local_error = borrow_cpus_in_array_cpuid_t(pid, &non_owned, &ncpus, tasks); | |
1322 |
2/2✓ Branch 0 taken 39 times.
✓ Branch 1 taken 47 times.
|
86 | if (local_error == DLB_SUCCESS) { |
1323 | /* Update error code if needed */ | ||
1324 |
1/2✓ Branch 0 taken 39 times.
✗ Branch 1 not taken.
|
39 | if (error != DLB_NOTED) error = local_error; |
1325 | } | ||
1326 | |||
1327 | /* Add global petition for remaining CPUs if needed */ | ||
1328 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 84 times.
|
86 | if (shdata->flags.queues_enabled |
1329 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | && requested_ncpus |
1330 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | && ncpus > 0) { |
1331 | /* Construct a mask of allowed CPUs */ | ||
1332 | 2 | lewi_mask_request_t request = { | |
1333 | .pid = pid, | ||
1334 | .howmany = ncpus, | ||
1335 | }; | ||
1336 | 2 | CPU_ZERO(&request.allowed); | |
1337 |
2/2✓ Branch 0 taken 16 times.
✓ Branch 1 taken 2 times.
|
18 | for (unsigned int i=0; i<cpus_priority_array->count; ++i) { |
1338 | 16 | cpuid_t cpuid = cpus_priority_array->items[i]; | |
1339 |
1/2✓ Branch 0 taken 16 times.
✗ Branch 1 not taken.
|
16 | CPU_SET(cpuid, &request.allowed); |
1340 | } | ||
1341 | |||
1342 | /* Enqueue request */ | ||
1343 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
|
2 | verbose(VB_SHMEM, "Requesting %d CPUs more after acquiring", ncpus); |
1344 | lewi_mask_request_t *it; | ||
1345 | 2 | for (it = queue_lewi_mask_request_t_front(&shdata->lewi_mask_requests); | |
1346 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 2 times.
|
3 | it != NULL; |
1347 | 1 | it = queue_lewi_mask_request_t_next(&shdata->lewi_mask_requests, it)) { | |
1348 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (it->pid == pid |
1349 | ✗ | && CPU_EQUAL(&request.allowed, &it->allowed)) { | |
1350 | /* update entry */ | ||
1351 | ✗ | it->howmany += request.howmany; | |
1352 | ✗ | error = DLB_NOTED; | |
1353 | ✗ | break; | |
1354 | } | ||
1355 | } | ||
1356 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | if (it == NULL) { |
1357 | /* or add new entry */ | ||
1358 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | if (queue_lewi_mask_request_t_enqueue( |
1359 | 2 | &shdata->lewi_mask_requests, request) == 0) { | |
1360 | 2 | error = DLB_NOTED; | |
1361 | } else { | ||
1362 | ✗ | error = DLB_ERR_REQST; | |
1363 | } | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | /* Update timestamp if borrow did not succeed */ | ||
1368 |
6/6✓ Branch 0 taken 85 times.
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 25 times.
✓ Branch 3 taken 60 times.
✓ Branch 4 taken 18 times.
✓ Branch 5 taken 7 times.
|
86 | if (last_borrow != NULL && error != DLB_SUCCESS && error != DLB_NOTED) { |
1369 | 18 | *last_borrow = get_time_in_ns(); | |
1370 | } | ||
1371 | } | ||
1372 | 86 | shmem_unlock(shm_handler); | |
1373 | 86 | return error; | |
1374 | } | ||
1375 | |||
1376 | |||
1377 | /*********************************************************************************/ | ||
1378 | /* Borrow CPU */ | ||
1379 | /*********************************************************************************/ | ||
1380 | |||
1381 | 314 | static int borrow_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
1382 | |||
1383 | 314 | int error = DLB_NOUPDT; | |
1384 | 314 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1385 | |||
1386 |
2/2✓ Branch 0 taken 250 times.
✓ Branch 1 taken 64 times.
|
314 | if (cpuinfo->guest == NOBODY |
1387 |
1/2✓ Branch 1 taken 250 times.
✗ Branch 2 not taken.
|
250 | && core_is_eligible(pid, cpuid)) { |
1388 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 236 times.
|
250 | if (cpuinfo->owner == pid) { |
1389 | // CPU is owned by the process | ||
1390 | 14 | cpuinfo->state = CPU_BUSY; | |
1391 | 14 | cpuinfo->guest = pid; | |
1392 | 14 | array_cpuinfo_task_t_push( | |
1393 | tasks, | ||
1394 | 14 | (const cpuinfo_task_t) { | |
1395 | .action = ENABLE_CPU, | ||
1396 | .pid = pid, | ||
1397 | .cpuid = cpuid, | ||
1398 | }); | ||
1399 | 14 | error = DLB_SUCCESS; | |
1400 |
1/2✓ Branch 0 taken 14 times.
✗ Branch 1 not taken.
|
14 | CPU_CLR(cpuid, &shdata->free_cpus); |
1401 |
2/2✓ Branch 0 taken 84 times.
✓ Branch 1 taken 152 times.
|
236 | } else if (cpuinfo->state == CPU_LENT) { |
1402 | // CPU is available | ||
1403 | 84 | cpuinfo->guest = pid; | |
1404 | 84 | array_cpuinfo_task_t_push( | |
1405 | tasks, | ||
1406 | 84 | (const cpuinfo_task_t) { | |
1407 | .action = ENABLE_CPU, | ||
1408 | .pid = pid, | ||
1409 | .cpuid = cpuid, | ||
1410 | }); | ||
1411 | 84 | error = DLB_SUCCESS; | |
1412 |
1/2✓ Branch 0 taken 84 times.
✗ Branch 1 not taken.
|
84 | CPU_CLR(cpuid, &shdata->free_cpus); |
1413 |
2/2✓ Branch 0 taken 73 times.
✓ Branch 1 taken 11 times.
|
84 | if (cpuinfo->owner != NOBODY |
1414 |
5/6✓ Branch 0 taken 73 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 6 times.
✓ Branch 3 taken 67 times.
✓ Branch 4 taken 67 times.
✓ Branch 5 taken 6 times.
|
73 | && !CPU_ISSET(cpuid, &shdata->occupied_cores)) { |
1415 | 67 | update_occupied_cores(cpuinfo->owner, cpuinfo->id); | |
1416 | } | ||
1417 | } | ||
1418 | } | ||
1419 | |||
1420 | 314 | return error; | |
1421 | } | ||
1422 | |||
1423 | 16 | static int borrow_core(pid_t pid, cpuid_t core_id, array_cpuinfo_task_t *restrict tasks, | |
1424 | unsigned int *num_borrowed) { | ||
1425 | |||
1426 | 16 | int error = DLB_NOUPDT; | |
1427 | 16 | *num_borrowed = 0; | |
1428 | |||
1429 | 16 | const mu_cpuset_t *core_mask = mu_get_core_mask_by_coreid(core_id); | |
1430 | 16 | for (int cpuid_in_core = core_mask->first_cpuid; | |
1431 |
2/2✓ Branch 0 taken 46 times.
✓ Branch 1 taken 10 times.
|
56 | cpuid_in_core >= 0; |
1432 | 40 | cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) { | |
1433 |
2/2✓ Branch 1 taken 8 times.
✓ Branch 2 taken 38 times.
|
46 | if (borrow_cpu(pid, cpuid_in_core, tasks) == DLB_SUCCESS) { |
1434 | /* successfully borrowed, continue */ | ||
1435 | 8 | ++(*num_borrowed); | |
1436 | 8 | error = DLB_SUCCESS; | |
1437 |
2/2✓ Branch 0 taken 6 times.
✓ Branch 1 taken 32 times.
|
38 | } else if (shdata->node_info[cpuid_in_core].guest == pid) { |
1438 | /* already guested, continue */ | ||
1439 | } else { | ||
1440 | /* could not be borrowed for other reason, stop iterating */ | ||
1441 | 6 | break; | |
1442 | } | ||
1443 | } | ||
1444 | |||
1445 | 16 | return error; | |
1446 | } | ||
1447 | |||
1448 | /* Iterate array_cpuid_t and borrow all possible CPUs */ | ||
1449 | 108 | static int borrow_cpus_in_array_cpuid_t(pid_t pid, | |
1450 | const array_cpuid_t *restrict array_cpuid, | ||
1451 | int *restrict ncpus, array_cpuinfo_task_t *restrict tasks) { | ||
1452 | |||
1453 | 108 | int error = DLB_NOUPDT; | |
1454 |
2/2✓ Branch 0 taken 106 times.
✓ Branch 1 taken 2 times.
|
108 | int _ncpus = ncpus != NULL ? *ncpus : INT_MAX; |
1455 | 108 | const bool hw_has_smt = shdata->flags.hw_has_smt; | |
1456 | |||
1457 | /* Borrow all CPUs in core if possible | ||
1458 | * (there is a high chance that consecutive CPUs belong to the same core, | ||
1459 | * try to skip those ones) */ | ||
1460 | 108 | int prev_core_id = -1; | |
1461 | 108 | for (unsigned int i = 0; | |
1462 |
4/4✓ Branch 0 taken 392 times.
✓ Branch 1 taken 36 times.
✓ Branch 2 taken 320 times.
✓ Branch 3 taken 72 times.
|
428 | _ncpus > 0 && i < array_cpuid->count; |
1463 | 320 | ++i) { | |
1464 | |||
1465 | 320 | cpuid_t cpuid = array_cpuid->items[i]; | |
1466 | |||
1467 |
2/2✓ Branch 0 taken 58 times.
✓ Branch 1 taken 262 times.
|
320 | if (hw_has_smt) { |
1468 | 58 | cpuid_t core_id = shdata->node_info[cpuid].core_id; | |
1469 |
2/2✓ Branch 0 taken 16 times.
✓ Branch 1 taken 42 times.
|
58 | if (prev_core_id != core_id) { |
1470 | unsigned int num_borrowed; | ||
1471 |
2/2✓ Branch 1 taken 2 times.
✓ Branch 2 taken 14 times.
|
16 | if (borrow_core(pid, core_id, tasks, &num_borrowed) == DLB_SUCCESS) { |
1472 | 2 | _ncpus -= num_borrowed; | |
1473 | 2 | error = DLB_SUCCESS; | |
1474 | } | ||
1475 | 16 | prev_core_id = core_id; | |
1476 | } | ||
1477 | } else { | ||
1478 |
2/2✓ Branch 1 taken 84 times.
✓ Branch 2 taken 178 times.
|
262 | if (borrow_cpu(pid, cpuid, tasks) == DLB_SUCCESS) { |
1479 | 84 | --_ncpus; | |
1480 | 84 | error = DLB_SUCCESS; | |
1481 | } | ||
1482 | } | ||
1483 | } | ||
1484 | |||
1485 |
2/2✓ Branch 0 taken 106 times.
✓ Branch 1 taken 2 times.
|
108 | if (ncpus != NULL) { |
1486 | 106 | *ncpus = _ncpus; | |
1487 | } | ||
1488 | |||
1489 | 108 | return error; | |
1490 | } | ||
1491 | |||
1492 | /* Iterate cpu_set_t and borrow all possible CPUs */ | ||
1493 | ✗ | static int borrow_cpus_in_cpu_set_t(pid_t pid, | |
1494 | const cpu_set_t *restrict cpu_set, | ||
1495 | int *restrict ncpus, array_cpuinfo_task_t *restrict tasks) { | ||
1496 | |||
1497 | ✗ | int error = DLB_NOUPDT; | |
1498 | ✗ | int _ncpus = ncpus != NULL ? *ncpus : INT_MAX; | |
1499 | ✗ | const bool hw_has_smt = shdata->flags.hw_has_smt; | |
1500 | |||
1501 | /* Borrow all CPUs in core if possible | ||
1502 | * (there is a high chance that consecutive CPUs belong to the same core, | ||
1503 | * try to skip those ones) */ | ||
1504 | ✗ | int prev_core_id = -1; | |
1505 | ✗ | for (int cpuid = mu_get_first_cpu(cpu_set); | |
1506 | ✗ | cpuid >= 0 && _ncpus > 0; | |
1507 | ✗ | cpuid = mu_get_next_cpu(cpu_set, cpuid)) { | |
1508 | |||
1509 | ✗ | if (hw_has_smt) { | |
1510 | ✗ | cpuid_t core_id = shdata->node_info[cpuid].core_id; | |
1511 | ✗ | if (prev_core_id != core_id) { | |
1512 | unsigned int num_borrowed; | ||
1513 | ✗ | if (borrow_core(pid, core_id, tasks, &num_borrowed) == DLB_SUCCESS) { | |
1514 | ✗ | _ncpus -= num_borrowed; | |
1515 | ✗ | error = DLB_SUCCESS; | |
1516 | } | ||
1517 | ✗ | prev_core_id = core_id; | |
1518 | } | ||
1519 | } else { | ||
1520 | ✗ | if (borrow_cpu(pid, cpuid, tasks) == DLB_SUCCESS) { | |
1521 | ✗ | --_ncpus; | |
1522 | ✗ | error = DLB_SUCCESS; | |
1523 | } | ||
1524 | } | ||
1525 | } | ||
1526 | |||
1527 | ✗ | if (ncpus != NULL) { | |
1528 | ✗ | *ncpus = _ncpus; | |
1529 | } | ||
1530 | |||
1531 | ✗ | return error; | |
1532 | } | ||
1533 | |||
1534 | |||
1535 | 6 | int shmem_cpuinfo__borrow_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
1536 | |||
1537 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | if (cpuid >= node_size) return DLB_ERR_PERM; |
1538 | |||
1539 | int error; | ||
1540 | 6 | shmem_lock(shm_handler); | |
1541 | { | ||
1542 | 6 | error = borrow_cpu(pid, cpuid, tasks); | |
1543 | } | ||
1544 | 6 | shmem_unlock(shm_handler); | |
1545 | 6 | return error; | |
1546 | } | ||
1547 | |||
1548 | /* Simplification of shmem_cpuinfo__borrow_ncpus_from_cpu_subset when we just | ||
1549 | * want to iterate all CPUs in set. | ||
1550 | * This function is intended to be called when leaving a blocking call and the | ||
1551 | * process knows that the provided CPUs were previously lent and may try to | ||
1552 | * borrow again. | ||
1553 | */ | ||
1554 | 2 | int shmem_cpuinfo__borrow_from_cpu_subset( | |
1555 | pid_t pid, | ||
1556 | const array_cpuid_t *restrict array_cpuid, | ||
1557 | array_cpuinfo_task_t *restrict tasks) { | ||
1558 | |||
1559 | int error; | ||
1560 | 2 | shmem_lock(shm_handler); | |
1561 | { | ||
1562 | 2 | error = borrow_cpus_in_array_cpuid_t(pid, array_cpuid, NULL, tasks); | |
1563 | } | ||
1564 | 2 | shmem_unlock(shm_handler); | |
1565 | 2 | return error; | |
1566 | } | ||
1567 | |||
1568 | 22 | int shmem_cpuinfo__borrow_ncpus_from_cpu_subset( | |
1569 | pid_t pid, int *restrict requested_ncpus, | ||
1570 | const array_cpuid_t *restrict cpus_priority_array, lewi_affinity_t lewi_affinity, | ||
1571 | int max_parallelism, int64_t *restrict last_borrow, | ||
1572 | array_cpuinfo_task_t *restrict tasks) { | ||
1573 | |||
1574 | /* Return immediately if requested_ncpus is present and not greater than zero */ | ||
1575 |
3/4✓ Branch 0 taken 14 times.
✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 14 times.
|
22 | if (requested_ncpus && *requested_ncpus <= 0) { |
1576 | ✗ | return DLB_NOUPDT; | |
1577 | } | ||
1578 | |||
1579 | /* Return immediately if the timestamp of the last unsuccessful borrow is | ||
1580 | * newer than the last CPU lent */ | ||
1581 |
3/4✓ Branch 0 taken 21 times.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 21 times.
|
22 | if (last_borrow && *last_borrow > DLB_ATOMIC_LD_ACQ(&shdata->timestamp_cpu_lent)) { |
1582 | ✗ | return DLB_NOUPDT; | |
1583 | } | ||
1584 | |||
1585 | /* Return immediately if the process has reached the max_parallelism */ | ||
1586 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 20 times.
|
22 | if (max_parallelism != 0) { |
1587 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
|
10 | for (unsigned int i=0; i<cpus_priority_array->count; ++i) { |
1588 | 8 | cpuid_t cpuid = cpus_priority_array->items[i]; | |
1589 | 8 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1590 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
|
8 | if (cpuinfo->guest == pid) { |
1591 | 4 | --max_parallelism; | |
1592 | } | ||
1593 | } | ||
1594 |
1/2✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
|
2 | if (max_parallelism <= 0) { |
1595 | 2 | return DLB_NOUPDT; | |
1596 | } | ||
1597 | } | ||
1598 | |||
1599 | /* Compute the max number of CPUs to borrow */ | ||
1600 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 6 times.
|
20 | int ncpus = requested_ncpus ? *requested_ncpus : node_size; |
1601 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 20 times.
|
20 | if (max_parallelism > 0) { |
1602 | ✗ | ncpus = min_int(ncpus, max_parallelism); | |
1603 | } | ||
1604 | |||
1605 | 20 | int error = DLB_NOUPDT; | |
1606 | 20 | shmem_lock(shm_handler); | |
1607 | { | ||
1608 | /* Skip borrow if no CPUs in the free_cpus mask */ | ||
1609 |
2/2✓ Branch 1 taken 1 times.
✓ Branch 2 taken 19 times.
|
20 | if (CPU_COUNT(&shdata->free_cpus) == 0) { |
1610 | 1 | ncpus = 0; | |
1611 | } | ||
1612 | |||
1613 | /* Borrow CPUs in the cpus_priority_array */ | ||
1614 |
2/2✓ Branch 1 taken 16 times.
✓ Branch 2 taken 4 times.
|
20 | if (borrow_cpus_in_array_cpuid_t(pid, cpus_priority_array, &ncpus, tasks) == DLB_SUCCESS) { |
1615 | 16 | error = DLB_SUCCESS; | |
1616 | } | ||
1617 | |||
1618 | /* Only if --priority=spread-ifempty, borrow CPUs if there are free NUMA nodes */ | ||
1619 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
20 | if (lewi_affinity == LEWI_AFFINITY_SPREAD_IFEMPTY && ncpus > 0) { |
1620 | cpu_set_t free_nodes; | ||
1621 | ✗ | mu_get_nodes_subset_of_cpuset(&free_nodes, &shdata->free_cpus); | |
1622 | ✗ | if (borrow_cpus_in_cpu_set_t(pid, &free_nodes, &ncpus, tasks) == DLB_SUCCESS) { | |
1623 | ✗ | error = DLB_SUCCESS; | |
1624 | } | ||
1625 | } | ||
1626 | } | ||
1627 | 20 | shmem_unlock(shm_handler); | |
1628 | |||
1629 | /* Update timestamp if borrow did not succeed */ | ||
1630 |
4/4✓ Branch 0 taken 19 times.
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 4 times.
✓ Branch 3 taken 15 times.
|
20 | if (last_borrow != NULL && error != DLB_SUCCESS) { |
1631 | 4 | *last_borrow = get_time_in_ns(); | |
1632 | } | ||
1633 | |||
1634 | 20 | return error; | |
1635 | } | ||
1636 | |||
1637 | |||
1638 | /*********************************************************************************/ | ||
1639 | /* Return CPU */ | ||
1640 | /*********************************************************************************/ | ||
1641 | |||
1642 | /* Return CPU | ||
1643 | * Abandon CPU given that state == BUSY, owner != pid, guest == pid | ||
1644 | */ | ||
1645 | 33 | static int return_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
1646 | |||
1647 | 33 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1648 | |||
1649 |
2/2✓ Branch 0 taken 32 times.
✓ Branch 1 taken 1 times.
|
33 | if (cpuinfo->owner == pid |
1650 |
1/2✓ Branch 0 taken 32 times.
✗ Branch 1 not taken.
|
32 | || cpuinfo->guest != pid |
1651 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 27 times.
|
32 | || (cpuinfo->state == CPU_LENT |
1652 |
2/2✓ Branch 1 taken 2 times.
✓ Branch 2 taken 3 times.
|
5 | && core_is_eligible(pid, cpuid))) { |
1653 | 3 | return DLB_NOUPDT; | |
1654 | } | ||
1655 | |||
1656 | // Return CPU | ||
1657 |
2/2✓ Branch 0 taken 26 times.
✓ Branch 1 taken 4 times.
|
30 | if (cpuinfo->state == CPU_BUSY) { |
1658 | 26 | cpuinfo->guest = cpuinfo->owner; | |
1659 | } else { | ||
1660 | /* state is disabled or the core is not eligible */ | ||
1661 | 4 | cpuinfo->guest = NOBODY; | |
1662 |
1/2✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
|
4 | CPU_SET(cpuid, &shdata->free_cpus); |
1663 | } | ||
1664 | |||
1665 | // Possibly clear CPU from occupies cores set | ||
1666 | 30 | update_occupied_cores(cpuinfo->owner, cpuinfo->id); | |
1667 | |||
1668 | // current subprocess to disable cpu | ||
1669 | 30 | array_cpuinfo_task_t_push( | |
1670 | tasks, | ||
1671 | 30 | (const cpuinfo_task_t) { | |
1672 | .action = DISABLE_CPU, | ||
1673 | .pid = pid, | ||
1674 | .cpuid = cpuid, | ||
1675 | }); | ||
1676 | |||
1677 | 30 | return DLB_SUCCESS; | |
1678 | } | ||
1679 | |||
1680 | 6 | int shmem_cpuinfo__return_all(pid_t pid, array_cpuinfo_task_t *restrict tasks) { | |
1681 | |||
1682 | 6 | int error = DLB_NOUPDT; | |
1683 | 6 | shmem_lock(shm_handler); | |
1684 | { | ||
1685 | 6 | for (int cpuid = mu_get_first_cpu(&shdata->occupied_cores); | |
1686 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 6 times.
|
14 | cpuid >= 0; |
1687 | 8 | cpuid = mu_get_next_cpu(&shdata->occupied_cores, cpuid)) { | |
1688 | 8 | int local_error = return_cpu(pid, cpuid, tasks); | |
1689 |
1/4✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
|
8 | switch(local_error) { |
1690 | ✗ | case DLB_ERR_REQST: | |
1691 | // max priority, always overwrite | ||
1692 | ✗ | error = DLB_ERR_REQST; | |
1693 | ✗ | break; | |
1694 | 8 | case DLB_SUCCESS: | |
1695 | // medium priority, only update if error is in lowest priority | ||
1696 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 3 times.
|
8 | error = (error == DLB_NOUPDT) ? DLB_SUCCESS : error; |
1697 | 8 | break; | |
1698 | ✗ | case DLB_NOUPDT: | |
1699 | // lowest priority, default value | ||
1700 | ✗ | break; | |
1701 | } | ||
1702 | } | ||
1703 | } | ||
1704 | 6 | shmem_unlock(shm_handler); | |
1705 | 6 | return error; | |
1706 | } | ||
1707 | |||
1708 | 22 | int shmem_cpuinfo__return_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) { | |
1709 | |||
1710 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 22 times.
|
22 | if (cpuid >= node_size) return DLB_ERR_PERM; |
1711 | |||
1712 | int error; | ||
1713 | 22 | shmem_lock(shm_handler); | |
1714 | { | ||
1715 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 20 times.
|
22 | if (unlikely(shdata->node_info[cpuid].guest != pid)) { |
1716 | 2 | error = DLB_ERR_PERM; | |
1717 | } else { | ||
1718 | 20 | error = return_cpu(pid, cpuid, tasks); | |
1719 | } | ||
1720 | } | ||
1721 | 22 | shmem_unlock(shm_handler); | |
1722 | 22 | return error; | |
1723 | } | ||
1724 | |||
1725 | 5 | int shmem_cpuinfo__return_cpu_mask(pid_t pid, const cpu_set_t *mask, | |
1726 | array_cpuinfo_task_t *restrict tasks) { | ||
1727 | |||
1728 | 5 | int error = DLB_NOUPDT; | |
1729 | 5 | shmem_lock(shm_handler); | |
1730 | { | ||
1731 | cpu_set_t cpus_to_return; | ||
1732 |
2/2✓ Branch 0 taken 80 times.
✓ Branch 1 taken 5 times.
|
85 | CPU_AND(&cpus_to_return, mask, &shdata->occupied_cores); |
1733 | |||
1734 | 5 | for (int cpuid = mu_get_first_cpu(&cpus_to_return); | |
1735 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 5 times.
|
9 | cpuid >= 0; |
1736 | 4 | cpuid = mu_get_next_cpu(&cpus_to_return, cpuid)) { | |
1737 | 4 | int local_error = return_cpu(pid, cpuid, tasks); | |
1738 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | error = (error < 0) ? error : local_error; |
1739 | } | ||
1740 | } | ||
1741 | 5 | shmem_unlock(shm_handler); | |
1742 | 5 | return error; | |
1743 | } | ||
1744 | |||
1745 | 1 | static inline void shmem_cpuinfo__return_async(pid_t pid, cpuid_t cpuid) { | |
1746 | 1 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1747 | |||
1748 |
2/4✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 times.
✗ Branch 3 not taken.
|
1 | ensure(cpuinfo->owner != pid |
1749 | && cpuinfo->guest == pid, "cpuinfo inconsistency in %s", __func__); | ||
1750 | |||
1751 | /* 'cpuid' should only be a guested non-owned CPU */ | ||
1752 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (cpuinfo->state == CPU_BUSY) { |
1753 | ✗ | cpuinfo->guest = cpuinfo->owner; | |
1754 | } else { | ||
1755 | 1 | cpuinfo->guest = NOBODY; | |
1756 |
1/2✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
|
1 | CPU_SET(cpuid, &shdata->free_cpus); |
1757 | } | ||
1758 | |||
1759 | // Possibly clear CPU from occupies cores set | ||
1760 | 1 | update_occupied_cores(cpuinfo->owner, cpuinfo->id); | |
1761 | |||
1762 | /* Add another CPU request */ | ||
1763 | 1 | queue_pid_t_enqueue(&cpuinfo->requests, pid); | |
1764 | 1 | } | |
1765 | |||
1766 | /* Only for asynchronous mode. This function is intended to be called after | ||
1767 | * a disable_cpu callback. | ||
1768 | * This function resolves returned CPUs, fixes guest and add a new request */ | ||
1769 | 1 | void shmem_cpuinfo__return_async_cpu(pid_t pid, cpuid_t cpuid) { | |
1770 | |||
1771 | 1 | shmem_lock(shm_handler); | |
1772 | { | ||
1773 | 1 | shmem_cpuinfo__return_async(pid, cpuid); | |
1774 | } | ||
1775 | 1 | shmem_unlock(shm_handler); | |
1776 | 1 | } | |
1777 | |||
1778 | /* Only for asynchronous mode. This is function is intended to be called after | ||
1779 | * a disable_cpu callback. | ||
1780 | * This function resolves returned CPUs, fixes guest and add a new request */ | ||
1781 | ✗ | void shmem_cpuinfo__return_async_cpu_mask(pid_t pid, const cpu_set_t *mask) { | |
1782 | |||
1783 | ✗ | shmem_lock(shm_handler); | |
1784 | { | ||
1785 | ✗ | for (int cpuid = mu_get_first_cpu(mask); | |
1786 | ✗ | cpuid >= 0; | |
1787 | ✗ | cpuid = mu_get_next_cpu(mask, cpuid)) { | |
1788 | |||
1789 | ✗ | shmem_cpuinfo__return_async(pid, cpuid); | |
1790 | } | ||
1791 | } | ||
1792 | ✗ | shmem_unlock(shm_handler); | |
1793 | } | ||
1794 | |||
1795 | |||
1796 | /*********************************************************************************/ | ||
1797 | /* */ | ||
1798 | /*********************************************************************************/ | ||
1799 | |||
1800 | /* Called when lewi_mask_finalize. | ||
1801 | * This function deregisters pid, disabling or lending CPUs as needed */ | ||
1802 | 30 | int shmem_cpuinfo__deregister(pid_t pid, array_cpuinfo_task_t *restrict tasks) { | |
1803 | 30 | int error = DLB_SUCCESS; | |
1804 | 30 | shmem_lock(shm_handler); | |
1805 | { | ||
1806 | // Remove any request before acquiring and lending | ||
1807 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 25 times.
|
30 | if (shdata->flags.queues_enabled) { |
1808 | 5 | queue_lewi_mask_request_t_remove(&shdata->lewi_mask_requests, pid); | |
1809 |
2/2✓ Branch 0 taken 24 times.
✓ Branch 1 taken 5 times.
|
29 | for (int cpuid=0; cpuid<node_size; ++cpuid) { |
1810 | 24 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1811 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 16 times.
|
24 | if (cpuinfo->owner != pid) { |
1812 | 8 | queue_pid_t_remove(&cpuinfo->requests, pid); | |
1813 | } | ||
1814 | } | ||
1815 | } | ||
1816 | |||
1817 | // Iterate again to properly treat each CPU | ||
1818 |
2/2✓ Branch 0 taken 400 times.
✓ Branch 1 taken 30 times.
|
430 | for (int cpuid=0; cpuid<node_size; ++cpuid) { |
1819 | 400 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1820 |
2/2✓ Branch 0 taken 154 times.
✓ Branch 1 taken 246 times.
|
400 | if (cpuinfo->owner == pid) { |
1821 |
3/4✓ Branch 0 taken 140 times.
✓ Branch 1 taken 14 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 140 times.
|
154 | if (cpu_is_public_post_mortem || !respect_cpuset) { |
1822 | /* Lend if public */ | ||
1823 | 14 | lend_cpu(pid, cpuid, tasks); | |
1824 | } else { | ||
1825 | /* If CPU won't be public, it must be reclaimed beforehand */ | ||
1826 | 140 | reclaim_cpu(pid, cpuid, tasks); | |
1827 |
2/2✓ Branch 0 taken 139 times.
✓ Branch 1 taken 1 times.
|
140 | if (cpuinfo->guest == pid) { |
1828 | 139 | cpuinfo->guest = NOBODY; | |
1829 | } | ||
1830 | 140 | cpuinfo->state = CPU_DISABLED; | |
1831 |
1/2✓ Branch 0 taken 140 times.
✗ Branch 1 not taken.
|
140 | CPU_CLR(cpuid, &shdata->free_cpus); |
1832 | } | ||
1833 | 154 | cpuinfo->owner = NOBODY; | |
1834 | |||
1835 | /* It will be consistent as long as one core belongs to one process only */ | ||
1836 |
1/2✓ Branch 0 taken 154 times.
✗ Branch 1 not taken.
|
154 | CPU_CLR(cpuid, &shdata->occupied_cores); |
1837 | } else { | ||
1838 | // Free external CPUs that I might be using | ||
1839 |
2/2✓ Branch 0 taken 10 times.
✓ Branch 1 taken 236 times.
|
246 | if (cpuinfo->guest == pid) { |
1840 | 10 | lend_cpu(pid, cpuid, tasks); | |
1841 | } | ||
1842 | } | ||
1843 | } | ||
1844 | } | ||
1845 | 30 | shmem_unlock(shm_handler); | |
1846 | |||
1847 | 30 | update_shmem_timestamp(); | |
1848 | |||
1849 | 30 | return error; | |
1850 | } | ||
1851 | |||
1852 | /* Called when DLB_disable. | ||
1853 | * This function resets the initial status of pid: acquire owned, lend guested */ | ||
1854 | 2 | int shmem_cpuinfo__reset(pid_t pid, array_cpuinfo_task_t *restrict tasks) { | |
1855 | 2 | int error = DLB_SUCCESS; | |
1856 | 2 | shmem_lock(shm_handler); | |
1857 | { | ||
1858 | // Remove any request before acquiring and lending | ||
1859 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
|
2 | if (shdata->flags.queues_enabled) { |
1860 | ✗ | queue_lewi_mask_request_t_remove(&shdata->lewi_mask_requests, pid); | |
1861 | ✗ | for (int cpuid=0; cpuid<node_size; ++cpuid) { | |
1862 | ✗ | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1863 | ✗ | if (cpuinfo->owner != pid) { | |
1864 | ✗ | queue_pid_t_remove(&cpuinfo->requests, pid); | |
1865 | } | ||
1866 | } | ||
1867 | } | ||
1868 | |||
1869 | // Iterate again to properly reset each CPU | ||
1870 |
2/2✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
|
10 | for (int cpuid=0; cpuid<node_size; ++cpuid) { |
1871 | 8 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1872 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
|
8 | if (cpuinfo->owner == pid) { |
1873 | 4 | reclaim_cpu(pid, cpuid, tasks); | |
1874 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 2 times.
|
4 | } else if (cpuinfo->guest == pid) { |
1875 | 2 | lend_cpu(pid, cpuid, tasks); | |
1876 | 2 | array_cpuinfo_task_t_push( | |
1877 | tasks, | ||
1878 | 2 | (const cpuinfo_task_t) { | |
1879 | .action = DISABLE_CPU, | ||
1880 | .pid = pid, | ||
1881 | .cpuid = cpuid, | ||
1882 | }); | ||
1883 | } | ||
1884 | } | ||
1885 | } | ||
1886 | 2 | shmem_unlock(shm_handler); | |
1887 | |||
1888 | 2 | update_shmem_timestamp(); | |
1889 | |||
1890 | 2 | return error; | |
1891 | } | ||
1892 | |||
1893 | /* Lend as many CPUs as needed to only guest as much as 'max' CPUs */ | ||
1894 | 17 | int shmem_cpuinfo__update_max_parallelism(pid_t pid, unsigned int max, | |
1895 | array_cpuinfo_task_t *restrict tasks) { | ||
1896 | 17 | int error = DLB_SUCCESS; | |
1897 | 17 | unsigned int owned_count = 0; | |
1898 | 17 | unsigned int guested_count = 0; | |
1899 | 17 | SMALL_ARRAY(cpuid_t, guested_cpus, node_size); | |
1900 | 17 | shmem_lock(shm_handler); | |
1901 | { | ||
1902 |
2/2✓ Branch 0 taken 96 times.
✓ Branch 1 taken 17 times.
|
113 | for (cpuid_t cpuid=0; cpuid<node_size; ++cpuid) { |
1903 | 96 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1904 |
2/2✓ Branch 0 taken 76 times.
✓ Branch 1 taken 20 times.
|
96 | if (cpuinfo->owner == pid) { |
1905 | 76 | ++owned_count; | |
1906 |
2/2✓ Branch 0 taken 31 times.
✓ Branch 1 taken 45 times.
|
76 | if (max < owned_count) { |
1907 | // Lend owned CPUs if the number of owned is greater than max | ||
1908 | 31 | lend_cpu(pid, cpuid, tasks); | |
1909 | 31 | array_cpuinfo_task_t_push( | |
1910 | tasks, | ||
1911 | 31 | (const cpuinfo_task_t) { | |
1912 | .action = DISABLE_CPU, | ||
1913 | .pid = pid, | ||
1914 | .cpuid = cpuid, | ||
1915 | }); | ||
1916 | } | ||
1917 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 6 times.
|
20 | } else if (cpuinfo->guest == pid) { |
1918 | // Since owned_count is still unknown, just save our guested CPUs | ||
1919 | 14 | guested_cpus[guested_count++] = cpuid; | |
1920 | } | ||
1921 | } | ||
1922 | |||
1923 | // Iterate guested CPUs to lend them if needed | ||
1924 |
2/2✓ Branch 0 taken 14 times.
✓ Branch 1 taken 17 times.
|
31 | for (unsigned int i=0; i<guested_count; ++i) { |
1925 |
2/2✓ Branch 0 taken 12 times.
✓ Branch 1 taken 2 times.
|
14 | if (max < owned_count + i + 1) { |
1926 | 12 | cpuid_t cpuid = guested_cpus[i]; | |
1927 | 12 | lend_cpu(pid, cpuid, tasks); | |
1928 | 12 | array_cpuinfo_task_t_push( | |
1929 | tasks, | ||
1930 | 12 | (const cpuinfo_task_t) { | |
1931 | .action = DISABLE_CPU, | ||
1932 | .pid = pid, | ||
1933 | .cpuid = cpuid, | ||
1934 | }); | ||
1935 | } | ||
1936 | } | ||
1937 | } | ||
1938 | 17 | shmem_unlock(shm_handler); | |
1939 | |||
1940 | 17 | update_shmem_timestamp(); | |
1941 | |||
1942 | 17 | return error; | |
1943 | } | ||
1944 | |||
1945 | /* Update CPU ownership according to the new process mask. | ||
1946 | * To avoid collisions, we only release the ownership if we still own it | ||
1947 | */ | ||
1948 | 20 | void shmem_cpuinfo__update_ownership(pid_t pid, const cpu_set_t *restrict process_mask, | |
1949 | array_cpuinfo_task_t *restrict tasks) { | ||
1950 | |||
1951 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 20 times.
|
20 | verbose(VB_SHMEM, "Updating ownership: %s", mu_to_str(process_mask)); |
1952 | |||
1953 | 20 | shmem_lock(shm_handler); | |
1954 | |||
1955 | int cpuid; | ||
1956 |
2/2✓ Branch 0 taken 92 times.
✓ Branch 1 taken 20 times.
|
112 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
1957 | 92 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
1958 |
5/6✓ Branch 0 taken 92 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 58 times.
✓ Branch 3 taken 34 times.
✓ Branch 4 taken 58 times.
✓ Branch 5 taken 34 times.
|
92 | if (CPU_ISSET(cpuid, process_mask)) { |
1959 | // The CPU should be mine | ||
1960 |
2/2✓ Branch 0 taken 19 times.
✓ Branch 1 taken 39 times.
|
58 | if (cpuinfo->owner != pid) { |
1961 | // Not owned: Steal CPU | ||
1962 | 19 | cpuinfo->owner = pid; | |
1963 | 19 | cpuinfo->state = CPU_BUSY; | |
1964 |
2/2✓ Branch 0 taken 18 times.
✓ Branch 1 taken 1 times.
|
19 | if (cpuinfo->guest == NOBODY) { |
1965 | 18 | cpuinfo->guest = pid; | |
1966 |
1/2✓ Branch 0 taken 18 times.
✗ Branch 1 not taken.
|
18 | CPU_CLR(cpuid, &shdata->free_cpus); |
1967 |
1/2✓ Branch 0 taken 18 times.
✗ Branch 1 not taken.
|
18 | CPU_CLR(cpuid, &shdata->occupied_cores); |
1968 | } | ||
1969 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 15 times.
|
19 | if (tasks) { |
1970 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 3 times.
|
4 | if (cpuinfo->guest != pid) { |
1971 | 1 | array_cpuinfo_task_t_push( | |
1972 | tasks, | ||
1973 | 1 | (const cpuinfo_task_t) { | |
1974 | .action = DISABLE_CPU, | ||
1975 | 1 | .pid = cpuinfo->guest, | |
1976 | .cpuid = cpuid, | ||
1977 | }); | ||
1978 | } | ||
1979 | 4 | array_cpuinfo_task_t_push( | |
1980 | tasks, | ||
1981 | 4 | (const cpuinfo_task_t) { | |
1982 | .action = ENABLE_CPU, | ||
1983 | .pid = pid, | ||
1984 | .cpuid = cpuid, | ||
1985 | }); | ||
1986 | } | ||
1987 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 19 times.
|
19 | verbose(VB_SHMEM, "Acquiring ownership of CPU %d", cpuid); |
1988 | } else { | ||
1989 | // The CPU was already owned, no update needed | ||
1990 | } | ||
1991 | |||
1992 | } else { | ||
1993 | // The CPU should not be mine | ||
1994 |
2/2✓ Branch 0 taken 12 times.
✓ Branch 1 taken 22 times.
|
34 | if (cpuinfo->owner == pid) { |
1995 | // Previusly owned: Release CPU ownership | ||
1996 | 12 | cpuinfo->owner = NOBODY; | |
1997 | 12 | cpuinfo->state = CPU_DISABLED; | |
1998 |
2/2✓ Branch 0 taken 11 times.
✓ Branch 1 taken 1 times.
|
12 | if (cpuinfo->guest == pid ) { |
1999 | 11 | cpuinfo->guest = NOBODY; | |
2000 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 9 times.
|
11 | if (tasks) { |
2001 | 2 | array_cpuinfo_task_t_push( | |
2002 | tasks, | ||
2003 | 2 | (const cpuinfo_task_t) { | |
2004 | .action = DISABLE_CPU, | ||
2005 | .pid = pid, | ||
2006 | .cpuid = cpuid, | ||
2007 | }); | ||
2008 | } | ||
2009 |
1/2✓ Branch 0 taken 11 times.
✗ Branch 1 not taken.
|
11 | CPU_CLR(cpuid, &shdata->free_cpus); |
2010 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 11 times.
|
11 | verbose(VB_SHMEM, "Releasing ownership of CPU %d", cpuid); |
2011 | } | ||
2012 | } else { | ||
2013 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 20 times.
|
22 | if (cpuinfo->guest == pid |
2014 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
|
2 | && cpuinfo->state == CPU_BUSY) { |
2015 | /* 'tasks' may be NULL if LeWI is disabled, but if the process | ||
2016 | * is guesting an external CPU, LeWI should be enabled */ | ||
2017 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
|
1 | if (unlikely(tasks == NULL)) { |
2018 | ✗ | shmem_unlock(shm_handler); | |
2019 | ✗ | fatal("tasks pointer is NULL in %s. Please report bug at %s", | |
2020 | __func__, PACKAGE_BUGREPORT); | ||
2021 | } | ||
2022 | // The CPU has been either stolen or reclaimed, | ||
2023 | // return it anyway | ||
2024 | 1 | return_cpu(pid, cpuid, tasks); | |
2025 | } | ||
2026 | } | ||
2027 | } | ||
2028 | } | ||
2029 | |||
2030 | 20 | shmem_unlock(shm_handler); | |
2031 | 20 | } | |
2032 | |||
2033 | 58 | int shmem_cpuinfo__get_thread_binding(pid_t pid, int thread_num) { | |
2034 |
2/4✓ Branch 0 taken 58 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 58 times.
|
58 | if (unlikely(shm_handler == NULL || thread_num < 0)) return -1; |
2035 | |||
2036 | 116 | SMALL_ARRAY(cpuid_t, guested_cpus, node_size); | |
2037 | 58 | int owned_count = 0; | |
2038 | 58 | int guested_count = 0; | |
2039 | |||
2040 | int cpuid; | ||
2041 |
2/2✓ Branch 0 taken 178 times.
✓ Branch 1 taken 17 times.
|
195 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
2042 | 178 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
2043 |
2/2✓ Branch 0 taken 98 times.
✓ Branch 1 taken 80 times.
|
178 | if (cpuinfo->owner == pid |
2044 |
2/2✓ Branch 0 taken 89 times.
✓ Branch 1 taken 9 times.
|
98 | && cpuinfo->state == CPU_BUSY) { |
2045 | 89 | ++owned_count; | |
2046 |
2/2✓ Branch 0 taken 41 times.
✓ Branch 1 taken 48 times.
|
89 | if (thread_num < owned_count) { |
2047 | 41 | return cpuid; | |
2048 | } | ||
2049 |
2/2✓ Branch 0 taken 10 times.
✓ Branch 1 taken 79 times.
|
89 | } else if (cpuinfo->guest == pid) { |
2050 | 10 | guested_cpus[guested_count++] = cpuid; | |
2051 | } | ||
2052 | } | ||
2053 | |||
2054 | int binding; | ||
2055 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 12 times.
|
17 | if (thread_num - owned_count < guested_count) { |
2056 | 5 | binding = guested_cpus[thread_num-owned_count]; | |
2057 | } else { | ||
2058 | 12 | binding = -1; | |
2059 | } | ||
2060 | 17 | return binding; | |
2061 | } | ||
2062 | |||
2063 | /* Find the nth non owned CPU for a given PID. | ||
2064 | * The count always starts from the first owned CPU. | ||
2065 | * ex: process has mask [4,7] in a system mask [0-7], | ||
2066 | * 1st CPU (id=0) is 0 | ||
2067 | * 4th CPU (id=3) is 3 | ||
2068 | * id > 3 -> -1 | ||
2069 | */ | ||
2070 | 20 | int shmem_cpuinfo__get_nth_non_owned_cpu(pid_t pid, int nth_cpu) { | |
2071 | 20 | int idx = 0; | |
2072 | 20 | int owned_cpus = 0; | |
2073 | 20 | int non_owned_cpus = 0; | |
2074 | 40 | SMALL_ARRAY(cpuid_t, non_owned_cpu_list, node_size); | |
2075 | |||
2076 | /* Construct non owned CPU list */ | ||
2077 | int cpuid; | ||
2078 |
2/2✓ Branch 0 taken 160 times.
✓ Branch 1 taken 20 times.
|
180 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
2079 | 160 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
2080 |
2/2✓ Branch 0 taken 80 times.
✓ Branch 1 taken 80 times.
|
160 | if (cpuinfo->owner == pid) { |
2081 |
2/2✓ Branch 0 taken 20 times.
✓ Branch 1 taken 60 times.
|
80 | if (owned_cpus++ == 0) { |
2082 | 20 | idx = non_owned_cpus; | |
2083 | } | ||
2084 |
1/2✓ Branch 0 taken 80 times.
✗ Branch 1 not taken.
|
80 | } else if (cpuinfo->state != CPU_DISABLED) { |
2085 | 80 | non_owned_cpu_list[non_owned_cpus++] = cpuid; | |
2086 | } | ||
2087 | } | ||
2088 | |||
2089 | /* Find the nth element starting from the first owned CPU */ | ||
2090 |
1/2✓ Branch 0 taken 20 times.
✗ Branch 1 not taken.
|
20 | if (nth_cpu < non_owned_cpus) { |
2091 | 20 | idx = (idx + nth_cpu) % non_owned_cpus; | |
2092 | 20 | return non_owned_cpu_list[idx]; | |
2093 | } else { | ||
2094 | ✗ | return -1; | |
2095 | } | ||
2096 | } | ||
2097 | |||
2098 | /* Return the number of registered CPUs not owned by the given PID */ | ||
2099 | 33 | int shmem_cpuinfo__get_number_of_non_owned_cpus(pid_t pid) { | |
2100 | 33 | int num_non_owned_cpus = 0; | |
2101 | int cpuid; | ||
2102 |
2/2✓ Branch 0 taken 264 times.
✓ Branch 1 taken 33 times.
|
297 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
2103 | 264 | const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
2104 |
2/2✓ Branch 0 taken 132 times.
✓ Branch 1 taken 132 times.
|
264 | if (cpuinfo->owner != pid |
2105 |
1/2✓ Branch 0 taken 132 times.
✗ Branch 1 not taken.
|
132 | && cpuinfo->state != CPU_DISABLED) { |
2106 | 132 | ++num_non_owned_cpus; | |
2107 | } | ||
2108 | } | ||
2109 | 33 | return num_non_owned_cpus; | |
2110 | } | ||
2111 | |||
2112 | 50 | int shmem_cpuinfo__check_cpu_availability(pid_t pid, int cpuid) { | |
2113 | 50 | int error = DLB_NOTED; | |
2114 | 50 | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
2115 | |||
2116 |
2/2✓ Branch 0 taken 17 times.
✓ Branch 1 taken 33 times.
|
50 | if (cpuinfo->owner != pid |
2117 |
4/4✓ Branch 0 taken 9 times.
✓ Branch 1 taken 8 times.
✓ Branch 2 taken 4 times.
✓ Branch 3 taken 5 times.
|
17 | && (cpuinfo->state == CPU_BUSY || cpuinfo->state == CPU_DISABLED) ) { |
2118 | /* The CPU is reclaimed or disabled */ | ||
2119 | 12 | error = DLB_ERR_PERM; | |
2120 |
2/2✓ Branch 0 taken 33 times.
✓ Branch 1 taken 5 times.
|
38 | } else if (cpuinfo->guest == pid) { |
2121 | /* The CPU is already guested by the process */ | ||
2122 | 33 | error = DLB_SUCCESS; | |
2123 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
5 | } else if (cpuinfo->guest == NOBODY ) { |
2124 | /* Assign new guest if the CPU is empty */ | ||
2125 | ✗ | shmem_lock(shm_handler); | |
2126 | { | ||
2127 | ✗ | if (cpuinfo->guest == NOBODY) { | |
2128 | ✗ | cpuinfo->guest = pid; | |
2129 | ✗ | CPU_CLR(cpuid, &shdata->free_cpus); | |
2130 | ✗ | error = DLB_SUCCESS; | |
2131 | } | ||
2132 | } | ||
2133 | ✗ | shmem_unlock(shm_handler); | |
2134 |
1/2✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
|
5 | } else if (cpuinfo->owner == pid |
2135 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
|
5 | && cpuinfo->state == CPU_LENT) { |
2136 | /* The owner is asking for a CPU not reclaimed yet */ | ||
2137 | ✗ | error = DLB_NOUPDT; | |
2138 | } | ||
2139 | |||
2140 | 50 | return error; | |
2141 | } | ||
2142 | |||
2143 | ✗ | bool shmem_cpuinfo__exists(void) { | |
2144 | ✗ | return shm_handler != NULL; | |
2145 | } | ||
2146 | |||
2147 | 6 | void shmem_cpuinfo__enable_request_queues(void) { | |
2148 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | if (shm_handler == NULL) return; |
2149 | |||
2150 | /* Enable asynchronous request queues */ | ||
2151 | 6 | shdata->flags.queues_enabled = true; | |
2152 | } | ||
2153 | |||
2154 | 4 | void shmem_cpuinfo__remove_requests(pid_t pid) { | |
2155 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | if (shm_handler == NULL) return; |
2156 | 4 | shmem_lock(shm_handler); | |
2157 | { | ||
2158 | /* Remove any previous request for the specific pid */ | ||
2159 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
|
4 | if (shdata->flags.queues_enabled) { |
2160 | /* Remove global requests (pair <pid,howmany>) */ | ||
2161 | ✗ | queue_lewi_mask_request_t_remove(&shdata->lewi_mask_requests, pid); | |
2162 | |||
2163 | /* Remove specific CPU requests */ | ||
2164 | int cpuid; | ||
2165 | ✗ | for (cpuid=0; cpuid<node_size; ++cpuid) { | |
2166 | ✗ | cpuinfo_t *cpuinfo = &shdata->node_info[cpuid]; | |
2167 | ✗ | queue_pid_t_remove(&cpuinfo->requests, pid); | |
2168 | } | ||
2169 | } | ||
2170 | } | ||
2171 | 4 | shmem_unlock(shm_handler); | |
2172 | } | ||
2173 | |||
2174 | 1 | int shmem_cpuinfo__version(void) { | |
2175 | 1 | return SHMEM_CPUINFO_VERSION; | |
2176 | } | ||
2177 | |||
2178 | 76 | size_t shmem_cpuinfo__size(void) { | |
2179 | 76 | return sizeof(shdata_t) + sizeof(cpuinfo_t)*mu_get_system_size(); | |
2180 | } | ||
2181 | |||
2182 | 8 | void shmem_cpuinfo__print_info(const char *shmem_key, int shmem_color, int columns, | |
2183 | dlb_printshmem_flags_t print_flags) { | ||
2184 | |||
2185 | /* If the shmem is not opened, obtain a temporary fd */ | ||
2186 | 8 | bool temporary_shmem = shm_handler == NULL; | |
2187 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
|
8 | if (temporary_shmem) { |
2188 | 4 | shmem_cpuinfo_ext__init(shmem_key, shmem_color); | |
2189 | } | ||
2190 | |||
2191 | /* Make a full copy of the shared memory */ | ||
2192 | 8 | shdata_t *shdata_copy = malloc(sizeof(shdata_t) + sizeof(cpuinfo_t)*node_size); | |
2193 | 8 | shmem_lock(shm_handler); | |
2194 | { | ||
2195 | 8 | memcpy(shdata_copy, shdata, sizeof(shdata_t) + sizeof(cpuinfo_t)*node_size); | |
2196 | } | ||
2197 | 8 | shmem_unlock(shm_handler); | |
2198 | |||
2199 | /* Close shmem if needed */ | ||
2200 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
|
8 | if (temporary_shmem) { |
2201 | 4 | shmem_cpuinfo_ext__finalize(); | |
2202 | } | ||
2203 | |||
2204 | /* Find the largest pid registered in the shared memory */ | ||
2205 | 8 | pid_t max_pid = 0; | |
2206 | int cpuid; | ||
2207 |
2/2✓ Branch 0 taken 176 times.
✓ Branch 1 taken 8 times.
|
184 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
2208 | 176 | pid_t pid = shdata_copy->node_info[cpuid].owner; | |
2209 | 176 | max_pid = pid > max_pid ? pid : max_pid; | |
2210 | } | ||
2211 | 8 | int max_digits = snprintf(NULL, 0, "%d", max_pid); | |
2212 | |||
2213 | /* Do not print shared memory if nobody is registered */ | ||
2214 |
2/2✓ Branch 0 taken 5 times.
✓ Branch 1 taken 3 times.
|
8 | if (max_pid == 0) { |
2215 | 5 | free(shdata_copy); | |
2216 | 5 | return; | |
2217 | } | ||
2218 | |||
2219 | /* Set up color */ | ||
2220 | 3 | bool is_tty = isatty(STDOUT_FILENO); | |
2221 | 6 | bool color = print_flags & DLB_COLOR_ALWAYS | |
2222 |
3/6✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 3 times.
|
3 | || (print_flags & DLB_COLOR_AUTO && is_tty); |
2223 | |||
2224 | /* Set up number of columns */ | ||
2225 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 1 times.
|
3 | if (columns <= 0) { |
2226 | unsigned short width; | ||
2227 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
|
2 | if (is_tty) { |
2228 | struct winsize w; | ||
2229 | ✗ | ioctl(STDOUT_FILENO, TIOCGWINSZ, &w); | |
2230 | ✗ | width = w.ws_col ? w.ws_col : 80; | |
2231 | } else { | ||
2232 | 2 | width = 80; | |
2233 | } | ||
2234 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
|
2 | if (color) { |
2235 | ✗ | columns = width / (13+max_digits*2); | |
2236 | } else { | ||
2237 | 2 | columns = width / (20+max_digits*2); | |
2238 | } | ||
2239 | } | ||
2240 | |||
2241 | /* Initialize buffer */ | ||
2242 | print_buffer_t buffer; | ||
2243 | 3 | printbuffer_init(&buffer); | |
2244 | |||
2245 | /* Set up line buffer */ | ||
2246 | enum { MAX_LINE_LEN = 512 }; | ||
2247 | char line[MAX_LINE_LEN]; | ||
2248 | char *l; | ||
2249 | |||
2250 | /* Calculate number of rows and cpus per column (same) */ | ||
2251 | 3 | int rows = (node_size+columns-1) / columns; | |
2252 | 3 | int cpus_per_column = rows; | |
2253 | |||
2254 | /* Update flag here in case this is an external process */ | ||
2255 |
3/4✓ Branch 0 taken 2 times.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 2 times.
|
3 | if (thread_spd && !thread_spd->options.lewi_respect_cpuset) { |
2256 | ✗ | respect_cpuset = false; | |
2257 | } | ||
2258 | |||
2259 | int row; | ||
2260 |
2/2✓ Branch 0 taken 66 times.
✓ Branch 1 taken 3 times.
|
69 | for (row=0; row<rows; ++row) { |
2261 | /* Init line */ | ||
2262 | 66 | line[0] = '\0'; | |
2263 | 66 | l = line; | |
2264 | |||
2265 | /* Iterate columns */ | ||
2266 | int column; | ||
2267 |
2/2✓ Branch 0 taken 136 times.
✓ Branch 1 taken 66 times.
|
202 | for (column=0; column<columns; ++column) { |
2268 | 136 | cpuid = row + column*cpus_per_column; | |
2269 |
1/2✓ Branch 0 taken 136 times.
✗ Branch 1 not taken.
|
136 | if (cpuid < node_size) { |
2270 | 136 | const cpuinfo_t *cpuinfo = &shdata_copy->node_info[cpuid]; | |
2271 | 136 | pid_t owner = cpuinfo->owner; | |
2272 | 136 | pid_t guest = cpuinfo->guest; | |
2273 | 136 | cpu_state_t state = cpuinfo->state; | |
2274 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 136 times.
|
136 | if (color) { |
2275 | ✗ | const char *code_color = | |
2276 | ✗ | state == CPU_DISABLED && respect_cpuset ? ANSI_COLOR_RESET : | |
2277 | ✗ | state == CPU_BUSY && guest == owner ? ANSI_COLOR_RED : | |
2278 | state == CPU_BUSY ? ANSI_COLOR_YELLOW : | ||
2279 | guest == NOBODY ? ANSI_COLOR_GREEN : | ||
2280 | ANSI_COLOR_BLUE; | ||
2281 | ✗ | l += snprintf(l, MAX_LINE_LEN-strlen(line), | |
2282 | " %4d %s[ %*d / %*d ]" ANSI_COLOR_RESET, | ||
2283 | cpuid, | ||
2284 | code_color, | ||
2285 | max_digits, owner, | ||
2286 | max_digits, guest); | ||
2287 | } else { | ||
2288 |
8/8✓ Branch 0 taken 125 times.
✓ Branch 1 taken 11 times.
✓ Branch 2 taken 115 times.
✓ Branch 3 taken 10 times.
✓ Branch 4 taken 10 times.
✓ Branch 5 taken 4 times.
✓ Branch 6 taken 6 times.
✓ Branch 7 taken 4 times.
|
251 | const char *state_desc = |
2289 | state == CPU_DISABLED ? " off" : | ||
2290 |
2/2✓ Branch 0 taken 4 times.
✓ Branch 1 taken 111 times.
|
115 | state == CPU_BUSY && guest == owner ? "busy" : |
2291 | state == CPU_BUSY ? "recl" : | ||
2292 | guest == NOBODY ? "idle" : | ||
2293 | "lent"; | ||
2294 | 136 | l += snprintf(l, MAX_LINE_LEN-strlen(line), | |
2295 | " %4d [ %*d / %*d / %s ]", | ||
2296 | cpuid, | ||
2297 | max_digits, owner, | ||
2298 | max_digits, guest, | ||
2299 | state_desc); | ||
2300 | } | ||
2301 | } | ||
2302 | } | ||
2303 | 66 | printbuffer_append(&buffer, line); | |
2304 | } | ||
2305 | |||
2306 | /* Print format */ | ||
2307 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | snprintf(line, MAX_LINE_LEN, |
2308 | " Format: <cpuid> [ <owner> / <guest> %s]", color ? "" : "/ <state> "); | ||
2309 | 3 | printbuffer_append(&buffer, line); | |
2310 | |||
2311 | /* Print color legend */ | ||
2312 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if (color) { |
2313 | ✗ | snprintf(line, MAX_LINE_LEN, | |
2314 | " Status: Disabled, " | ||
2315 | ANSI_COLOR_RED "Owned" ANSI_COLOR_RESET ", " | ||
2316 | ANSI_COLOR_YELLOW "Reclaimed" ANSI_COLOR_RESET ", " | ||
2317 | ANSI_COLOR_GREEN "Idle" ANSI_COLOR_RESET ", " | ||
2318 | ANSI_COLOR_BLUE "Lent" ANSI_COLOR_RESET); | ||
2319 | ✗ | printbuffer_append(&buffer, line); | |
2320 | } | ||
2321 | |||
2322 | /* Cpu requests */ | ||
2323 | 3 | bool any_cpu_request = false; | |
2324 |
4/4✓ Branch 0 taken 93 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 92 times.
✓ Branch 3 taken 1 times.
|
95 | for (cpuid=0; cpuid<node_size && !any_cpu_request; ++cpuid) { |
2325 | 92 | any_cpu_request = queue_pid_t_size(&shdata_copy->node_info[cpuid].requests) > 0; | |
2326 | } | ||
2327 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 2 times.
|
3 | if (any_cpu_request) { |
2328 | 1 | snprintf(line, MAX_LINE_LEN, "\n Cpu requests (<cpuid>: <spids>):"); | |
2329 | 1 | printbuffer_append(&buffer, line); | |
2330 |
2/2✓ Branch 0 taken 64 times.
✓ Branch 1 taken 1 times.
|
65 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
2331 | 64 | queue_pid_t *requests = &shdata_copy->node_info[cpuid].requests; | |
2332 |
2/2✓ Branch 1 taken 1 times.
✓ Branch 2 taken 63 times.
|
64 | if (queue_pid_t_size(requests) > 0) { |
2333 | /* Set up line */ | ||
2334 | 1 | line[0] = '\0'; | |
2335 | 1 | l = line; | |
2336 | 1 | l += snprintf(l, MAX_LINE_LEN-strlen(line), " %4d: ", cpuid); | |
2337 | /* Iterate requests */ | ||
2338 | 1 | for (pid_t *it = queue_pid_t_front(requests); | |
2339 |
2/2✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
|
2 | it != NULL; |
2340 | 1 | it = queue_pid_t_next(requests, it)) { | |
2341 | 1 | l += snprintf(l, MAX_LINE_LEN-strlen(line), " %u,", *it); | |
2342 | } | ||
2343 | /* Remove trailing comma and append line */ | ||
2344 | 1 | *(l-1) = '\0'; | |
2345 | 1 | printbuffer_append(&buffer, line); | |
2346 | } | ||
2347 | } | ||
2348 | } | ||
2349 | |||
2350 | /* Proc requests */ | ||
2351 |
2/2✓ Branch 1 taken 1 times.
✓ Branch 2 taken 2 times.
|
3 | if (queue_lewi_mask_request_t_size(&shdata_copy->lewi_mask_requests) > 0) { |
2352 | 1 | snprintf(line, MAX_LINE_LEN, | |
2353 | "\n Process requests (<spids>: <howmany>, <allowed_cpus>):"); | ||
2354 | 1 | printbuffer_append(&buffer, line); | |
2355 | } | ||
2356 | 3 | for (lewi_mask_request_t *it = | |
2357 | 3 | queue_lewi_mask_request_t_front(&shdata_copy->lewi_mask_requests); | |
2358 |
2/2✓ Branch 0 taken 2 times.
✓ Branch 1 taken 3 times.
|
5 | it != NULL; |
2359 | 2 | it = queue_lewi_mask_request_t_next(&shdata_copy->lewi_mask_requests, it)) { | |
2360 | 4 | snprintf(line, MAX_LINE_LEN, | |
2361 | " %*d: %d, %s", | ||
2362 | 2 | max_digits, it->pid, it->howmany, mu_to_str(&it->allowed)); | |
2363 | 2 | printbuffer_append(&buffer, line); | |
2364 | } | ||
2365 | |||
2366 | 3 | info0("=== CPU States ===\n%s", buffer.addr); | |
2367 | 3 | printbuffer_destroy(&buffer); | |
2368 | 3 | free(shdata_copy); | |
2369 | } | ||
2370 | |||
2371 | 7 | int shmem_cpuinfo_testing__get_num_proc_requests(void) { | |
2372 | 7 | return shdata->flags.queues_enabled ? | |
2373 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 7 times.
|
7 | queue_lewi_mask_request_t_size(&shdata->lewi_mask_requests) : 0; |
2374 | } | ||
2375 | |||
2376 | 28 | int shmem_cpuinfo_testing__get_num_cpu_requests(int cpuid) { | |
2377 | 28 | return shdata->flags.queues_enabled ? | |
2378 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 28 times.
|
28 | queue_pid_t_size(&shdata->node_info[cpuid].requests) : 0; |
2379 | } | ||
2380 | |||
2381 | 7 | const cpu_set_t* shmem_cpuinfo_testing__get_free_cpu_set(void) { | |
2382 | 7 | return &shdata->free_cpus; | |
2383 | } | ||
2384 | |||
2385 | 7 | const cpu_set_t* shmem_cpuinfo_testing__get_occupied_core_set(void) { | |
2386 | 7 | return &shdata->occupied_cores; | |
2387 | } | ||
2388 | |||
2389 | /*** Helper functions, the shm lock must have been acquired beforehand ***/ | ||
2390 | static inline bool is_idle(int cpu) { | ||
2391 | return shdata->node_info[cpu].state == CPU_LENT && shdata->node_info[cpu].guest == NOBODY; | ||
2392 | } | ||
2393 | |||
2394 | static inline bool is_borrowed(pid_t pid, int cpu) { | ||
2395 | return shdata->node_info[cpu].state == CPU_BUSY && shdata->node_info[cpu].owner == pid; | ||
2396 | } | ||
2397 | |||
2398 | 75 | static inline bool is_shmem_empty(void) { | |
2399 | int cpuid; | ||
2400 |
2/2✓ Branch 0 taken 643 times.
✓ Branch 1 taken 60 times.
|
703 | for (cpuid=0; cpuid<node_size; ++cpuid) { |
2401 |
2/2✓ Branch 0 taken 15 times.
✓ Branch 1 taken 628 times.
|
643 | if (shdata->node_info[cpuid].owner != NOBODY) { |
2402 | 15 | return false; | |
2403 | } | ||
2404 | } | ||
2405 | 60 | return true; | |
2406 | } | ||
2407 | |||
2408 | /*** End of helper functions ***/ | ||
2409 |