GCC Code Coverage Report


Directory: src/
File: src/LB_comm/shmem_cpuinfo.c
Date: 2025-11-21 10:34:40
Exec Total Coverage
Lines: 977 1071 91.2%
Functions: 67 69 97.1%
Branches: 651 859 75.8%

Line Branch Exec Source
1 /*********************************************************************************/
2 /* Copyright 2009-2024 Barcelona Supercomputing Center */
3 /* */
4 /* This file is part of the DLB library. */
5 /* */
6 /* DLB is free software: you can redistribute it and/or modify */
7 /* it under the terms of the GNU Lesser General Public License as published by */
8 /* the Free Software Foundation, either version 3 of the License, or */
9 /* (at your option) any later version. */
10 /* */
11 /* DLB is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Lesser General Public License for more details. */
15 /* */
16 /* You should have received a copy of the GNU Lesser General Public License */
17 /* along with DLB. If not, see <https://www.gnu.org/licenses/>. */
18 /*********************************************************************************/
19
20 #include "LB_comm/shmem_cpuinfo.h"
21
22 #include "LB_comm/shmem.h"
23 #include "LB_core/spd.h"
24 #include "apis/dlb_errors.h"
25 #include "apis/dlb_types.h"
26 #include "support/debug.h"
27 #include "support/types.h"
28 #include "support/mytime.h"
29 #include "support/tracing.h"
30 #include "support/options.h"
31 #include "support/mask_utils.h"
32 #include "support/queues.h"
33 #include "support/small_array.h"
34 #include "support/atomic.h"
35
36 #include <limits.h>
37 #include <sched.h>
38 #include <unistd.h>
39 #include <string.h>
40 #include <sys/types.h>
41 #include <sys/ioctl.h>
42
43 /* array_cpuid_t */
44 #define ARRAY_T cpuid_t
45 #include "support/array_template.h"
46
47 /* array_cpuinfo_task_t */
48 #define ARRAY_T cpuinfo_task_t
49 #define ARRAY_KEY_T pid_t
50 #include "support/array_template.h"
51
52 /* queue_pid_t */
53 #define QUEUE_T pid_t
54 #define QUEUE_SIZE 8
55 #include "support/queue_template.h"
56
57 /* queue_lewi_mask_request_t */
58 typedef struct {
59 pid_t pid;
60 unsigned int howmany;
61 cpu_set_t allowed;
62 } lewi_mask_request_t;
63 #define QUEUE_T lewi_mask_request_t
64 #define QUEUE_KEY_T pid_t
65 #define QUEUE_SIZE 1024
66 #include "support/queue_template.h"
67
68
69 /* NOTE on default values:
70 * The shared memory will be initialized to 0 when created,
71 * thus all default values should represent 0
72 * A CPU, by default, is DISABLED and owned by NOBODY
73 */
74
75 enum { NOBODY = 0 };
76
77 typedef enum {
78 CPU_DISABLED = 0, // Not owned by any process nor part of the DLB mask
79 CPU_BUSY,
80 CPU_LENT
81 } cpu_state_t;
82
83 typedef struct {
84 cpuid_t id; // logical ID, or hwthread ID
85 cpuid_t core_id; // core ID
86 pid_t owner; // Current owner
87 pid_t guest; // Current user of the CPU
88 cpu_state_t state; // owner's POV state (busy or lent)
89 queue_pid_t requests; // List of PIDs requesting the CPU
90 } cpuinfo_t;
91
92 typedef struct cpuinfo_flags {
93 bool initialized:1;
94 bool queues_enabled:1;
95 bool hw_has_smt:1;
96 } cpuinfo_flags_t;
97
98 typedef struct {
99 cpuinfo_flags_t flags;
100 struct timespec initial_time;
101 atomic_int_least64_t timestamp_cpu_lent;
102 queue_lewi_mask_request_t lewi_mask_requests;
103 cpu_set_t free_cpus; /* redundant info for speeding up queries:
104 lent, non-guested CPUs (idle) */
105 cpu_set_t occupied_cores; /* redundant info for speeding up queries:
106 lent or busy cores and guested by other
107 than the owner (lent or reclaimed) */
108 cpuinfo_t node_info[];
109 } shdata_t;
110
111 enum { SHMEM_CPUINFO_VERSION = 6 };
112
113 static shmem_handler_t *shm_handler = NULL;
114 static shdata_t *shdata = NULL;
115 static int node_size;
116 static bool cpu_is_public_post_mortem = false;
117 static bool respect_cpuset = true;
118 static const char *shmem_name = "cpuinfo";
119 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
120 static int subprocesses_attached = 0;
121
122 static inline bool is_idle(int cpu) __attribute__((unused));
123 static inline bool is_borrowed(pid_t pid, int cpu) __attribute__((unused));
124 static inline bool is_shmem_empty(void);
125
126
127 467 static void update_shmem_timestamp(void) {
128 467 DLB_ATOMIC_ST_REL(&shdata->timestamp_cpu_lent, get_time_in_ns());
129 467 }
130
131 /* A core is eligible if all the CPUs in the core are not guested, or guested
132 * by the process, and none of them are reclaimed */
133 254 static bool core_is_eligible(pid_t pid, int cpuid) {
134
135
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 254 times.
254 ensure(cpuid < node_size, "cpuid %d greater than node_size %d in %s",
136 cpuid, node_size, __func__);
137
138
2/2
✓ Branch 0 taken 33 times.
✓ Branch 1 taken 221 times.
254 if (shdata->flags.hw_has_smt) {
139 33 const mu_cpuset_t *core_mask = mu_get_core_mask(cpuid);
140 33 for (int cpuid_in_core = core_mask->first_cpuid;
141
3/4
✓ Branch 0 taken 123 times.
✓ Branch 1 taken 24 times.
✓ Branch 2 taken 123 times.
✗ Branch 3 not taken.
147 cpuid_in_core >= 0 && cpuid_in_core != DLB_CPUID_INVALID;
142 114 cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) {
143 123 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid_in_core];
144
4/4
✓ Branch 0 taken 82 times.
✓ Branch 1 taken 41 times.
✓ Branch 2 taken 75 times.
✓ Branch 3 taken 7 times.
123 if ((cpuinfo->guest != pid && cpuinfo->guest != NOBODY)
145
3/4
✓ Branch 0 taken 2 times.
✓ Branch 1 taken 114 times.
✓ Branch 2 taken 2 times.
✗ Branch 3 not taken.
116 || (cpuinfo->state == CPU_BUSY && cpuinfo->owner != pid)) {
146 9 return false;
147 }
148 }
149 24 return true;
150 } else {
151 221 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
152 221 return cpuinfo->owner == pid
153
2/2
✓ Branch 0 taken 192 times.
✓ Branch 1 taken 2 times.
194 || cpuinfo->guest == pid
154
3/4
✓ Branch 0 taken 194 times.
✓ Branch 1 taken 27 times.
✓ Branch 2 taken 192 times.
✗ Branch 3 not taken.
415 || cpuinfo->guest == NOBODY;
155 }
156 }
157
158 /* A core is occupied if any of the CPUs in the core are guested by some
159 * process that is not the owner. The owner is provided as parameter since
160 * this function may be called during the core registration */
161 24 static bool core_is_occupied(pid_t owner, int cpuid) {
162
163
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 24 times.
24 ensure(cpuid < node_size, "cpuid %d greater than node_size %d in %s",
164 cpuid, node_size, __func__);
165
166
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 24 times.
24 if (owner == NOBODY) return false;
167
168
1/2
✓ Branch 0 taken 24 times.
✗ Branch 1 not taken.
24 if (shdata->flags.hw_has_smt) {
169 24 const mu_cpuset_t *core_mask = mu_get_core_mask(cpuid);
170 24 for (int cpuid_in_core = core_mask->first_cpuid;
171
3/4
✓ Branch 0 taken 78 times.
✓ Branch 1 taken 6 times.
✓ Branch 2 taken 78 times.
✗ Branch 3 not taken.
84 cpuid_in_core >= 0 && cpuid_in_core != DLB_CPUID_INVALID;
172 60 cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) {
173 78 pid_t guest = shdata->node_info[cpuid_in_core].guest;
174
2/2
✓ Branch 0 taken 42 times.
✓ Branch 1 taken 36 times.
78 if (guest != NOBODY
175
2/2
✓ Branch 0 taken 18 times.
✓ Branch 1 taken 24 times.
42 && guest != owner) {
176 18 return true;
177 }
178 }
179 6 return false;
180 } else {
181 pid_t guest = shdata->node_info[cpuid].guest;
182 return guest != NOBODY
183 && guest != owner;
184 }
185 }
186
187 /* A CPU is occupied if it's guested by some process that is not the owner. */
188 1153 static bool cpu_is_occupied(pid_t owner, int cpuid) {
189
190
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1153 times.
1153 ensure(cpuid < node_size, "cpuid %d greater than node_size %d in %s",
191 cpuid, node_size, __func__);
192
193 1153 pid_t guest = shdata->node_info[cpuid].guest;
194 return guest != NOBODY
195
2/2
✓ Branch 0 taken 749 times.
✓ Branch 1 taken 1 times.
750 && owner != NOBODY
196
4/4
✓ Branch 0 taken 750 times.
✓ Branch 1 taken 403 times.
✓ Branch 2 taken 175 times.
✓ Branch 3 taken 574 times.
1903 && guest != owner;
197 }
198
199 /* Assuming that only cpuid has changed its state, update occupied_cores accordingly */
200 1153 static void update_occupied_cores(pid_t owner, int cpuid) {
201
202
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1153 times.
1153 ensure(cpuid < node_size, "cpuid %d greater than node_size %d in %s",
203 cpuid, node_size, __func__);
204
205
2/2
✓ Branch 0 taken 116 times.
✓ Branch 1 taken 1037 times.
1153 if (shdata->flags.hw_has_smt) {
206
2/2
✓ Branch 1 taken 10 times.
✓ Branch 2 taken 106 times.
116 if (cpu_is_occupied(owner, cpuid)) {
207
5/6
✓ Branch 0 taken 10 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 4 times.
✓ Branch 3 taken 6 times.
✓ Branch 4 taken 6 times.
✓ Branch 5 taken 4 times.
10 if (!CPU_ISSET(cpuid, &shdata->occupied_cores)) {
208 // Core state has changed
209 6 const cpu_set_t *core_mask = mu_get_core_mask(cpuid)->set;
210 6 mu_or(&shdata->occupied_cores, &shdata->occupied_cores, core_mask);
211 } else {
212 // no change
213 }
214 } else {
215
5/6
✓ Branch 0 taken 106 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 24 times.
✓ Branch 3 taken 82 times.
✓ Branch 4 taken 24 times.
✓ Branch 5 taken 82 times.
106 if (!CPU_ISSET(cpuid, &shdata->occupied_cores)) {
216 // no change
217 } else {
218 // need to check all cores
219 24 const cpu_set_t *core_mask = mu_get_core_mask(cpuid)->set;
220
2/2
✓ Branch 1 taken 18 times.
✓ Branch 2 taken 6 times.
24 if (core_is_occupied(owner, cpuid)) {
221 18 mu_or(&shdata->occupied_cores, &shdata->occupied_cores, core_mask);
222 } else {
223 6 mu_subtract(&shdata->occupied_cores, &shdata->occupied_cores, core_mask);
224 }
225 }
226 }
227 } else {
228
2/2
✓ Branch 1 taken 165 times.
✓ Branch 2 taken 872 times.
1037 if (cpu_is_occupied(owner, cpuid)) {
229
1/2
✓ Branch 0 taken 165 times.
✗ Branch 1 not taken.
165 CPU_SET(cpuid, &shdata->occupied_cores);
230 } else {
231
1/2
✓ Branch 0 taken 872 times.
✗ Branch 1 not taken.
872 CPU_CLR(cpuid, &shdata->occupied_cores);
232 }
233 }
234 1153 }
235
236 477 static pid_t find_new_guest(cpuinfo_t *cpuinfo) {
237 477 pid_t new_guest = NOBODY;
238
2/2
✓ Branch 0 taken 28 times.
✓ Branch 1 taken 449 times.
477 if (cpuinfo->state == CPU_BUSY) {
239 /* If CPU is claimed, ignore requests and assign owner */
240 28 new_guest = cpuinfo->owner;
241
2/2
✓ Branch 0 taken 185 times.
✓ Branch 1 taken 264 times.
449 } else if (shdata->flags.queues_enabled) {
242 /* Pop first PID in queue that is eligible for this CPU */
243 185 for (pid_t *it = queue_pid_t_front(&cpuinfo->requests);
244
3/4
✓ Branch 0 taken 22 times.
✓ Branch 1 taken 185 times.
✓ Branch 2 taken 22 times.
✗ Branch 3 not taken.
207 it != NULL && new_guest == NOBODY;
245 22 it = queue_pid_t_next(&cpuinfo->requests, it)) {
246
2/2
✓ Branch 1 taken 19 times.
✓ Branch 2 taken 3 times.
22 if (core_is_eligible(*it, cpuinfo->id)) {
247 19 new_guest = *it;
248 19 queue_pid_t_delete(&cpuinfo->requests, it);
249 }
250 }
251
252 /* If CPU did not have requests, pop global queue */
253
2/2
✓ Branch 0 taken 166 times.
✓ Branch 1 taken 19 times.
185 if (new_guest == NOBODY) {
254 166 for (lewi_mask_request_t *it =
255 166 queue_lewi_mask_request_t_front(&shdata->lewi_mask_requests);
256
4/4
✓ Branch 0 taken 56 times.
✓ Branch 1 taken 154 times.
✓ Branch 2 taken 44 times.
✓ Branch 3 taken 12 times.
210 it != NULL && new_guest == NOBODY;
257 44 it = queue_lewi_mask_request_t_next(&shdata->lewi_mask_requests, it)) {
258
5/6
✓ Branch 0 taken 44 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 39 times.
✓ Branch 3 taken 5 times.
✓ Branch 4 taken 39 times.
✓ Branch 5 taken 5 times.
44 if (CPU_ISSET(cpuinfo->id, &it->allowed)
259
2/2
✓ Branch 1 taken 36 times.
✓ Branch 2 taken 3 times.
39 && core_is_eligible(it->pid, cpuinfo->id)) {
260 36 new_guest = it->pid;
261
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 28 times.
36 if (--(it->howmany) == 0) {
262 8 queue_lewi_mask_request_t_delete(&shdata->lewi_mask_requests, it);
263 }
264 }
265 }
266 }
267 } else {
268 /* No suitable guest */
269 264 new_guest = NOBODY;
270 }
271 477 return new_guest;
272 }
273
274
275 /*********************************************************************************/
276 /* Register / Deregister CPU */
277 /*********************************************************************************/
278
279 431 static void register_cpu(cpuinfo_t *cpuinfo, int pid, int preinit_pid) {
280
281 /* Set basic fields */
282 431 cpuinfo->owner = pid;
283 431 cpuinfo->state = CPU_BUSY;
284
4/4
✓ Branch 0 taken 23 times.
✓ Branch 1 taken 408 times.
✓ Branch 2 taken 21 times.
✓ Branch 3 taken 2 times.
431 if (cpuinfo->guest == NOBODY || cpuinfo->guest == preinit_pid) {
285 429 cpuinfo->guest = pid;
286 }
287
1/2
✓ Branch 0 taken 431 times.
✗ Branch 1 not taken.
431 CPU_CLR(cpuinfo->id, &shdata->free_cpus);
288
289 /* Add or remove CPUs in core to the occupied cores set */
290 431 update_occupied_cores(pid, cpuinfo->id);
291 431 }
292
293 1220 static void deregister_cpu(cpuinfo_t *cpuinfo, int pid) {
294
295 1220 cpuid_t cpuid = cpuinfo->id;
296
297
2/2
✓ Branch 0 taken 183 times.
✓ Branch 1 taken 1037 times.
1220 if (cpuinfo->owner == pid) {
298 183 cpuinfo->owner = NOBODY;
299
2/2
✓ Branch 0 taken 168 times.
✓ Branch 1 taken 15 times.
183 if (cpuinfo->guest == pid) {
300 168 cpuinfo->guest = NOBODY;
301 }
302
3/4
✓ Branch 0 taken 147 times.
✓ Branch 1 taken 36 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 147 times.
183 if (cpu_is_public_post_mortem || !respect_cpuset) {
303 36 cpuinfo->state = CPU_LENT;
304
1/2
✓ Branch 0 taken 36 times.
✗ Branch 1 not taken.
36 if (cpuinfo->guest == NOBODY) {
305
1/2
✓ Branch 0 taken 36 times.
✗ Branch 1 not taken.
36 CPU_SET(cpuid, &shdata->free_cpus);
306 }
307 } else {
308 147 cpuinfo->state = CPU_DISABLED;
309 147 queue_pid_t_clear(&cpuinfo->requests);
310
1/2
✓ Branch 0 taken 147 times.
✗ Branch 1 not taken.
147 CPU_CLR(cpuid, &shdata->free_cpus);
311 }
312 /* Clear all CPUs in core from the occupied */
313 183 const cpu_set_t *core_mask = mu_get_core_mask(cpuinfo->id)->set;
314 183 mu_subtract(&shdata->occupied_cores, &shdata->occupied_cores, core_mask);
315 } else {
316 // Free external CPUs that I may be using
317
2/2
✓ Branch 0 taken 33 times.
✓ Branch 1 taken 1004 times.
1037 if (cpuinfo->guest == pid) {
318 33 cpuinfo->guest = NOBODY;
319
1/2
✓ Branch 0 taken 33 times.
✗ Branch 1 not taken.
33 CPU_SET(cpuid, &shdata->free_cpus);
320 }
321
322 // Remove any previous CPU request
323
2/2
✓ Branch 0 taken 345 times.
✓ Branch 1 taken 692 times.
1037 if (shdata->flags.queues_enabled) {
324 345 queue_pid_t_remove(&cpuinfo->requests, pid);
325 }
326 }
327 1220 }
328
329
330 /*********************************************************************************/
331 /* Init / Register */
332 /*********************************************************************************/
333
334 1 static void cleanup_shmem(void *shdata_ptr, int pid) {
335 1 shdata_t *shared_data = shdata_ptr;
336 int cpuid;
337
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 1 times.
9 for (cpuid=0; cpuid<node_size; ++cpuid) {
338 8 cpuinfo_t *cpuinfo = &shared_data->node_info[cpuid];
339 8 deregister_cpu(cpuinfo, pid);
340 }
341 1 }
342
343 165 static void open_shmem(const char *shmem_key, int shmem_color) {
344 165 pthread_mutex_lock(&mutex);
345 {
346
2/2
✓ Branch 0 taken 101 times.
✓ Branch 1 taken 64 times.
165 if (shm_handler == NULL) {
347 101 node_size = mu_get_system_size();
348 202 shm_handler = shmem_init((void**)&shdata,
349 101 &(const shmem_props_t) {
350 101 .size = shmem_cpuinfo__size(),
351 .name = shmem_name,
352 .key = shmem_key,
353 .color = shmem_color,
354 .version = SHMEM_CPUINFO_VERSION,
355 .cleanup_fn = cleanup_shmem,
356 });
357 101 subprocesses_attached = 1;
358 } else {
359 64 ++subprocesses_attached;
360 }
361 }
362 165 pthread_mutex_unlock(&mutex);
363 165 }
364
365 139 static void init_shmem(void) {
366 // Initialize some values if this is the 1st process attached to the shmem
367
2/2
✓ Branch 0 taken 71 times.
✓ Branch 1 taken 68 times.
139 if (!shdata->flags.initialized) {
368 142 shdata->flags = (const cpuinfo_flags_t) {
369 .initialized = true,
370 71 .hw_has_smt = mu_system_has_smt(),
371 };
372 71 get_time(&shdata->initial_time);
373 71 shdata->timestamp_cpu_lent = 0;
374
375 /* Initialize helper cpu sets */
376 71 CPU_ZERO(&shdata->free_cpus);
377 71 CPU_ZERO(&shdata->occupied_cores);
378
379 /* Initialize global requests */
380 71 queue_lewi_mask_request_t_init(&shdata->lewi_mask_requests);
381
382 /* Initialize CPU ids */
383 struct timespec now;
384 71 get_time(&now);
385 int cpuid;
386
2/2
✓ Branch 0 taken 716 times.
✓ Branch 1 taken 71 times.
787 for (cpuid=0; cpuid<node_size; ++cpuid) {
387 1432 shdata->node_info[cpuid] = (const cpuinfo_t) {
388 .id = cpuid,
389 716 .core_id = mu_get_core_id(cpuid),
390 };
391
392 /* Initialize cpuinfo queue */
393 716 queue_pid_t_init(&shdata->node_info[cpuid].requests);
394
395 /* If registered CPU set is not respected, all CPUs start as
396 * available from the beginning */
397
2/2
✓ Branch 0 taken 96 times.
✓ Branch 1 taken 620 times.
716 if (!respect_cpuset) {
398 96 shdata->node_info[cpuid].state = CPU_LENT;
399
1/2
✓ Branch 0 taken 96 times.
✗ Branch 1 not taken.
96 CPU_SET(cpuid, &shdata->free_cpus);
400 }
401 }
402 }
403 139 }
404
405 139 static int register_process(pid_t pid, pid_t preinit_pid, const cpu_set_t *mask, bool steal) {
406
2/2
✓ Branch 1 taken 18 times.
✓ Branch 2 taken 121 times.
139 if (CPU_COUNT(mask) == 0) return DLB_SUCCESS;
407
408
2/2
✓ Branch 0 taken 13 times.
✓ Branch 1 taken 108 times.
121 verbose(VB_SHMEM, "Registering process %d with mask %s", pid, mu_to_str(mask));
409
410
2/2
✓ Branch 0 taken 116 times.
✓ Branch 1 taken 5 times.
121 if (!steal) {
411 // Check first that my mask is not already owned
412 116 for (int cpuid = mu_get_first_cpu(mask);
413
3/4
✓ Branch 0 taken 432 times.
✓ Branch 1 taken 110 times.
✓ Branch 2 taken 432 times.
✗ Branch 3 not taken.
542 cpuid >= 0 && cpuid < node_size;
414 426 cpuid = mu_get_next_cpu(mask, cpuid)) {
415
416 432 pid_t owner = shdata->node_info[cpuid].owner;
417
418
6/6
✓ Branch 0 taken 28 times.
✓ Branch 1 taken 404 times.
✓ Branch 2 taken 18 times.
✓ Branch 3 taken 10 times.
✓ Branch 4 taken 6 times.
✓ Branch 5 taken 12 times.
432 if (owner != NOBODY && owner != pid && owner != preinit_pid) {
419
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 verbose(VB_SHMEM,
420 "Error registering CPU %d, already owned by %d",
421 cpuid, owner);
422 6 return DLB_ERR_PERM;
423 }
424 }
425 }
426
427 // Register mask
428 115 for (int cpuid = mu_get_first_cpu(mask);
429
3/4
✓ Branch 0 taken 431 times.
✓ Branch 1 taken 115 times.
✓ Branch 2 taken 431 times.
✗ Branch 3 not taken.
546 cpuid >= 0 && cpuid < node_size;
430 431 cpuid = mu_get_next_cpu(mask, cpuid)) {
431
432 431 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
433
434
5/6
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 426 times.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 4 times.
✓ Branch 4 taken 1 times.
✗ Branch 5 not taken.
431 if (steal && cpuinfo->owner != NOBODY && cpuinfo->owner != pid) {
435
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 verbose(VB_SHMEM, "Acquiring ownership of CPU %d", cpuid);
436 }
437
438 431 register_cpu(cpuinfo, pid, preinit_pid);
439 }
440
441 115 return DLB_SUCCESS;
442 }
443
444 125 int shmem_cpuinfo__init(pid_t pid, pid_t preinit_pid, const cpu_set_t *process_mask,
445 const char *shmem_key, int shmem_color) {
446 125 int error = DLB_SUCCESS;
447
448 // Determine if CPU should be public in post-mortem mode
449
2/2
✓ Branch 0 taken 71 times.
✓ Branch 1 taken 54 times.
196 cpu_is_public_post_mortem = thread_spd &&
450
2/2
✓ Branch 0 taken 43 times.
✓ Branch 1 taken 28 times.
71 (thread_spd->options.debug_opts & DBG_LPOSTMORTEM
451
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 42 times.
43 || !thread_spd->options.lewi_respect_cpuset);
452
453 // Determine whether to respect the cpuset
454
4/4
✓ Branch 0 taken 71 times.
✓ Branch 1 taken 54 times.
✓ Branch 2 taken 54 times.
✓ Branch 3 taken 17 times.
125 respect_cpuset = !(thread_spd && !thread_spd->options.lewi_respect_cpuset);
455
456 // Shared memory creation
457 125 open_shmem(shmem_key, shmem_color);
458
459 //cpu_set_t affinity_mask;
460 //mu_get_nodes_intersecting_with_cpuset(&affinity_mask, process_mask);
461
462 //DLB_INSTR( int idle_count = 0; )
463
464 125 shmem_lock(shm_handler);
465 {
466 // Initialize shared memory, if needed
467 125 init_shmem();
468
469 // Register process_mask, with stealing = false always in normal Init()
470 125 error = register_process(pid, preinit_pid, process_mask, /* steal */ false);
471 }
472 125 shmem_unlock(shm_handler);
473
474 // TODO mask info should go in shmem_procinfo. Print something else here?
475 //verbose( VB_SHMEM, "Process Mask: %s", mu_to_str(process_mask) );
476 //verbose( VB_SHMEM, "Process Affinity Mask: %s", mu_to_str(&affinity_mask) );
477
478 //add_event(IDLE_CPUS_EVENT, idle_count);
479
480
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 119 times.
125 if (error == DLB_ERR_PERM) {
481 6 warn_error(DLB_ERR_PERM);
482 }
483
484
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 119 times.
125 if (error != DLB_SUCCESS) {
485
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 verbose(VB_SHMEM,
486 "Error during shmem_cpuinfo initialization, finalizing shared memory");
487 6 shmem_cpuinfo__finalize(pid, shmem_key, shmem_color);
488 }
489
490 125 return error;
491 }
492
493 40 int shmem_cpuinfo_ext__init(const char *shmem_key, int shmem_color) {
494 40 open_shmem(shmem_key, shmem_color);
495 40 return DLB_SUCCESS;
496 }
497
498 14 int shmem_cpuinfo_ext__preinit(pid_t pid, const cpu_set_t *mask, dlb_drom_flags_t flags) {
499
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
14 if (shm_handler == NULL) return DLB_ERR_NOSHMEM;
500 int error;
501 14 shmem_lock(shm_handler);
502 {
503 // Initialize shared memory, if needed
504 14 init_shmem();
505
506 // Register process_mask, with stealing according to user arguments
507 14 error = register_process(pid, /* preinit_pid */ 0, mask, flags & DLB_STEAL_CPUS);
508 }
509 14 shmem_unlock(shm_handler);
510
511
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 14 times.
14 if (error == DLB_ERR_PERM) {
512 warn_error(DLB_ERR_PERM);
513 }
514
515 14 return error;
516 }
517
518
519 /*********************************************************************************/
520 /* Finalize / Deregister */
521 /*********************************************************************************/
522
523 165 static void close_shmem(void) {
524 165 pthread_mutex_lock(&mutex);
525 {
526
2/2
✓ Branch 0 taken 101 times.
✓ Branch 1 taken 64 times.
165 if (--subprocesses_attached == 0) {
527 101 shmem_finalize(shm_handler, is_shmem_empty);
528 101 shm_handler = NULL;
529 101 shdata = NULL;
530 }
531 }
532 165 pthread_mutex_unlock(&mutex);
533 165 }
534
535 /* Even though the correct deregistration should be done through shmem_cpuinfo__deregister,
536 * this function is kept to allow deregistration from outside the LeWI_mask policy */
537 130 static void deregister_process(pid_t pid) {
538 int cpuid;
539
2/2
✓ Branch 0 taken 1212 times.
✓ Branch 1 taken 130 times.
1342 for (cpuid=0; cpuid<node_size; ++cpuid) {
540 1212 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
541 1212 deregister_cpu(cpuinfo, pid);
542 }
543
544 // Remove any previous global request
545
2/2
✓ Branch 0 taken 43 times.
✓ Branch 1 taken 87 times.
130 if (shdata->flags.queues_enabled) {
546 43 queue_lewi_mask_request_t_remove(&shdata->lewi_mask_requests, pid);
547 }
548 130 }
549
550 135 int shmem_cpuinfo__finalize(pid_t pid, const char *shmem_key, int shmem_color) {
551
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 125 times.
135 if (shm_handler == NULL) {
552 /* cpuinfo_finalize may be called to finalize existing process
553 * even if the file descriptor is not opened. (DLB_PreInit + forc-exec case) */
554
1/2
✗ Branch 1 not taken.
✓ Branch 2 taken 10 times.
10 if (shmem_exists(shmem_name, shmem_key)) {
555 open_shmem(shmem_key, shmem_color);
556 } else {
557 10 return DLB_ERR_NOSHMEM;
558 }
559 }
560
561 //DLB_INSTR( int idle_count = 0; )
562
563 // Lock the shmem to deregister CPUs
564 125 shmem_lock(shm_handler);
565 {
566 125 deregister_process(pid);
567 //DLB_INSTR( if (is_idle(cpuid)) idle_count++; )
568 }
569 125 shmem_unlock(shm_handler);
570
571 125 update_shmem_timestamp();
572
573 // Shared memory destruction
574 125 close_shmem();
575
576 //add_event(IDLE_CPUS_EVENT, idle_count);
577
578 125 return DLB_SUCCESS;
579 }
580
581 40 int shmem_cpuinfo_ext__finalize(void) {
582
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 40 times.
40 if (shm_handler == NULL) return DLB_ERR_NOSHMEM;
583
584 // Shared memory destruction
585 40 close_shmem();
586
587 40 return DLB_SUCCESS;
588 }
589
590 5 int shmem_cpuinfo_ext__postfinalize(pid_t pid) {
591
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
5 if (shm_handler == NULL) return DLB_ERR_NOSHMEM;
592
593 5 int error = DLB_SUCCESS;
594 5 shmem_lock(shm_handler);
595 {
596 5 deregister_process(pid);
597 }
598 5 shmem_unlock(shm_handler);
599
600 5 update_shmem_timestamp();
601
602 5 return error;
603 }
604
605
606 /*********************************************************************************/
607 /* Lend CPU */
608 /*********************************************************************************/
609
610 /* Add cpu_mask to the Shared Mask
611 * If the process originally owns the CPU: State => CPU_LENT
612 * If the process is currently using the CPU: Guest => NOBODY
613 */
614 534 static void lend_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
615 534 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
616
617
2/2
✓ Branch 0 taken 14 times.
✓ Branch 1 taken 520 times.
534 if (unlikely(cpuinfo->state == CPU_DISABLED)) return;
618
619
2/2
✓ Branch 0 taken 362 times.
✓ Branch 1 taken 158 times.
520 if (cpuinfo->owner == pid) {
620 // If the CPU is owned by the process, just change the state
621 362 cpuinfo->state = CPU_LENT;
622
2/2
✓ Branch 0 taken 76 times.
✓ Branch 1 taken 82 times.
158 } else if (shdata->flags.queues_enabled) {
623 // Otherwise, remove any previous request
624 76 queue_pid_t_remove(&cpuinfo->requests, pid);
625 }
626
627 // If the process is the guest, free it
628
2/2
✓ Branch 0 taken 459 times.
✓ Branch 1 taken 61 times.
520 if (cpuinfo->guest == pid) {
629 459 cpuinfo->guest = NOBODY;
630 }
631
632 // If the CPU is free, find a new guest
633
2/2
✓ Branch 0 taken 471 times.
✓ Branch 1 taken 49 times.
520 if (cpuinfo->guest == NOBODY) {
634 471 pid_t new_guest = find_new_guest(cpuinfo);
635
2/2
✓ Branch 0 taken 77 times.
✓ Branch 1 taken 394 times.
471 if (new_guest != NOBODY) {
636 77 cpuinfo->guest = new_guest;
637 77 array_cpuinfo_task_t_push(
638 tasks,
639 77 (const cpuinfo_task_t) {
640 .action = ENABLE_CPU,
641 .pid = new_guest,
642 .cpuid = cpuid,
643 });
644
645 // If SMT is enabled, this CPU could have been the last lent
646 // CPU in core, allowing find_new_guest to find new guests for
647 // the rest of CPUs in the core. Iterate the rest of cpus now:
648 77 const mu_cpuset_t *core_mask = mu_get_core_mask(cpuid);
649 77 for (int cpuid_in_core = core_mask->first_cpuid;
650
3/4
✓ Branch 0 taken 83 times.
✓ Branch 1 taken 77 times.
✓ Branch 2 taken 83 times.
✗ Branch 3 not taken.
160 cpuid_in_core >= 0 && cpuid_in_core != DLB_CPUID_INVALID;
651 83 cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) {
652
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 77 times.
83 if (cpuid_in_core != cpuid) {
653 6 cpuinfo_t *cpuinfo_in_core = &shdata->node_info[cpuid_in_core];
654
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 if (cpuinfo_in_core->guest == NOBODY) {
655 6 new_guest = find_new_guest(cpuinfo_in_core);
656
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 if (new_guest != NOBODY) {
657 6 cpuinfo_in_core->guest = new_guest;
658 6 array_cpuinfo_task_t_push(
659 tasks,
660 6 (const cpuinfo_task_t) {
661 .action = ENABLE_CPU,
662 .pid = new_guest,
663 .cpuid = cpuid_in_core,
664 });
665
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 CPU_CLR(cpuid_in_core, &shdata->free_cpus);
666 }
667 }
668 }
669 }
670 }
671 }
672
673 // Add CPU to the appropriate CPU sets
674
2/2
✓ Branch 0 taken 394 times.
✓ Branch 1 taken 126 times.
520 if (cpuinfo->guest == NOBODY) {
675
1/2
✓ Branch 0 taken 394 times.
✗ Branch 1 not taken.
394 CPU_SET(cpuid, &shdata->free_cpus);
676 }
677
678 // Add or remove CPUs in core to the occupied cores set
679 520 update_occupied_cores(cpuinfo->owner, cpuinfo->id);
680 }
681
682 164 int shmem_cpuinfo__lend_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
683
684
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 164 times.
164 if (cpuid >= node_size) return DLB_ERR_PERM;
685
686 164 int error = DLB_SUCCESS;
687 //DLB_DEBUG( cpu_set_t freed_cpus; )
688 //DLB_DEBUG( cpu_set_t idle_cpus; )
689 //DLB_DEBUG( CPU_ZERO( &freed_cpus ); )
690 //DLB_DEBUG( CPU_ZERO( &idle_cpus ); )
691
692 //DLB_INSTR( int idle_count = 0; )
693
694 164 shmem_lock(shm_handler);
695 {
696 164 lend_cpu(pid, cpuid, tasks);
697
698 //// Look for Idle CPUs, only in DEBUG or INSTRUMENTATION
699 //int i;
700 //for (i = 0; i < node_size; i++) {
701 //if (is_idle(i)) {
702 //DLB_INSTR( idle_count++; )
703 //DLB_DEBUG( CPU_SET(i, &idle_cpus); )
704 //}
705 //}
706 }
707 164 shmem_unlock(shm_handler);
708
709 164 update_shmem_timestamp();
710
711 //DLB_DEBUG( int size = CPU_COUNT(&freed_cpus); )
712 //DLB_DEBUG( int post_size = CPU_COUNT(&idle_cpus); )
713 //verbose(VB_SHMEM, "Lending %s", mu_to_str(&freed_cpus));
714 //verbose(VB_SHMEM, "Increasing %d Idle Threads (%d now)", size, post_size);
715 //verbose(VB_SHMEM, "Available mask: %s", mu_to_str(&idle_cpus));
716
717 //add_event(IDLE_CPUS_EVENT, idle_count);
718 164 return error;
719 }
720
721 84 int shmem_cpuinfo__lend_cpu_mask(pid_t pid, const cpu_set_t *restrict mask,
722 array_cpuinfo_task_t *restrict tasks) {
723
724 84 int error = DLB_SUCCESS;
725
726 //DLB_DEBUG( cpu_set_t freed_cpus; )
727 //DLB_DEBUG( cpu_set_t idle_cpus; )
728 //DLB_DEBUG( CPU_ZERO( &freed_cpus ); )
729 //DLB_DEBUG( CPU_ZERO( &idle_cpus ); )
730
731 //DLB_INSTR( int idle_count = 0; )
732
733 84 shmem_lock(shm_handler);
734 {
735 84 for (int cpuid = mu_get_first_cpu(mask);
736
4/4
✓ Branch 0 taken 268 times.
✓ Branch 1 taken 83 times.
✓ Branch 2 taken 267 times.
✓ Branch 3 taken 1 times.
351 cpuid >= 0 && cpuid < node_size;
737 267 cpuid = mu_get_next_cpu(mask, cpuid)) {
738 267 lend_cpu(pid, cpuid, tasks);
739
740 //// Look for Idle CPUs, only in DEBUG or INSTRUMENTATION
741 //if (is_idle(cpuid)) {
742 //DLB_INSTR( idle_count++; )
743 //DLB_DEBUG( CPU_SET(cpu, &idle_cpus); )
744 //}
745 }
746 }
747 84 shmem_unlock(shm_handler);
748
749 84 update_shmem_timestamp();
750
751 //DLB_DEBUG( int size = CPU_COUNT(&freed_cpus); )
752 //DLB_DEBUG( int post_size = CPU_COUNT(&idle_cpus); )
753 //verbose(VB_SHMEM, "Lending %s", mu_to_str(&freed_cpus));
754 //verbose(VB_SHMEM, "Increasing %d Idle Threads (%d now)", size, post_size);
755 //verbose(VB_SHMEM, "Available mask: %s", mu_to_str(&idle_cpus));
756
757 //add_event(IDLE_CPUS_EVENT, idle_count);
758 84 return error;
759 }
760
761
762 /*********************************************************************************/
763 /* Reclaim CPU */
764 /*********************************************************************************/
765
766 /* Recover CPU from the Shared Mask
767 * CPUs that owner == ME: State => CPU_BUSY
768 * CPUs that guest == NOBODY Guest => ME
769 */
770 359 static int reclaim_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
771 int error;
772 359 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
773
2/2
✓ Branch 0 taken 353 times.
✓ Branch 1 taken 6 times.
359 if (cpuinfo->owner == pid) {
774 353 cpuinfo->state = CPU_BUSY;
775
2/2
✓ Branch 0 taken 209 times.
✓ Branch 1 taken 144 times.
353 if (cpuinfo->guest == pid) {
776 209 error = DLB_NOUPDT;
777 }
778
2/2
✓ Branch 0 taken 73 times.
✓ Branch 1 taken 71 times.
144 else if (cpuinfo->guest == NOBODY) {
779 /* The CPU was idle, acquire it */
780 73 cpuinfo->guest = pid;
781 73 array_cpuinfo_task_t_push(
782 tasks,
783 73 (const cpuinfo_task_t) {
784 .action = ENABLE_CPU,
785 .pid = pid,
786 .cpuid = cpuid,
787 });
788
1/2
✓ Branch 0 taken 73 times.
✗ Branch 1 not taken.
73 CPU_CLR(cpuid, &shdata->free_cpus);
789 73 error = DLB_SUCCESS;
790 } else {
791 /* The CPU was guested, reclaim it */
792 71 array_cpuinfo_task_t_push(
793 tasks,
794 71 (const cpuinfo_task_t) {
795 .action = DISABLE_CPU,
796 71 .pid = cpuinfo->guest,
797 .cpuid = cpuid,
798 });
799 71 array_cpuinfo_task_t_push(
800 tasks,
801 71 (const cpuinfo_task_t) {
802 .action = ENABLE_CPU,
803 .pid = pid,
804 .cpuid = cpuid,
805 });
806 71 error = DLB_NOTED;
807 }
808 } else {
809 6 error = DLB_ERR_PERM;
810 }
811
812 359 return error;
813 }
814
815 1 static int reclaim_core(pid_t pid, cpuid_t core_id,
816 array_cpuinfo_task_t *restrict tasks,
817 unsigned int *num_reclaimed) {
818
819 1 int error = DLB_NOUPDT;
820 1 *num_reclaimed = 0;
821
822 1 const mu_cpuset_t *core_mask = mu_get_core_mask_by_coreid(core_id);
823 1 for (int cpuid_in_core = core_mask->first_cpuid;
824
3/4
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 1 times.
✗ Branch 3 not taken.
2 cpuid_in_core >= 0 && cpuid_in_core != DLB_CPUID_INVALID;
825 1 cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) {
826 1 int local_error = reclaim_cpu(pid, cpuid_in_core, tasks);
827
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
1 if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) {
828 1 ++(*num_reclaimed);
829
1/2
✓ Branch 0 taken 1 times.
✗ Branch 1 not taken.
1 if (error != DLB_NOTED) {
830 1 error = local_error;
831 }
832 } else if (shdata->node_info[cpuid_in_core].guest == pid) {
833 /* already guested, continue */
834 } else {
835 /* could not be borrowed for other reason, stop iterating */
836 break;
837 }
838 }
839
840 1 return error;
841 }
842
843 21 int shmem_cpuinfo__reclaim_all(pid_t pid, array_cpuinfo_task_t *restrict tasks) {
844 21 int error = DLB_NOUPDT;
845 21 shmem_lock(shm_handler);
846 {
847 cpu_set_t cpus_to_reclaim;
848
2/2
✓ Branch 0 taken 336 times.
✓ Branch 1 taken 21 times.
357 CPU_OR(&cpus_to_reclaim, &shdata->free_cpus, &shdata->occupied_cores);
849
850 21 for (int cpuid = mu_get_first_cpu(&cpus_to_reclaim);
851
2/2
✓ Branch 0 taken 46 times.
✓ Branch 1 taken 21 times.
67 cpuid >= 0;
852 46 cpuid = mu_get_next_cpu(&cpus_to_reclaim, cpuid)) {
853
1/2
✓ Branch 0 taken 46 times.
✗ Branch 1 not taken.
46 if (shdata->node_info[cpuid].owner == pid
854
1/2
✓ Branch 0 taken 46 times.
✗ Branch 1 not taken.
46 && shdata->node_info[cpuid].guest != pid) {
855 46 int local_error = reclaim_cpu(pid, cpuid, tasks);
856
2/5
✓ Branch 0 taken 26 times.
✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
46 switch(local_error) {
857 26 case DLB_NOTED:
858 // max priority, always overwrite
859 26 error = DLB_NOTED;
860 26 break;
861 20 case DLB_SUCCESS:
862 // medium priority, only update if error is in lowest priority
863 20 error = (error == DLB_NOTED) ? DLB_NOTED : DLB_SUCCESS;
864 20 break;
865 case DLB_NOUPDT:
866 // lowest priority, default value
867 break;
868 case DLB_ERR_PERM:
869 // ignore
870 break;
871 }
872 }
873 }
874 }
875 21 shmem_unlock(shm_handler);
876 21 return error;
877 }
878
879 20 int shmem_cpuinfo__reclaim_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
880
881
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 19 times.
20 if (cpuid >= node_size) return DLB_ERR_PERM;
882
883 int error;
884 //DLB_DEBUG( cpu_set_t recovered_cpus; )
885 //DLB_DEBUG( cpu_set_t idle_cpus; )
886 //DLB_DEBUG( CPU_ZERO(&recovered_cpus); )
887 //DLB_DEBUG( CPU_ZERO(&idle_cpus); )
888
889 //DLB_INSTR( int idle_count = 0; )
890
891 19 shmem_lock(shm_handler);
892 {
893 19 error = reclaim_cpu(pid, cpuid, tasks);
894
895 /* If the CPU was actually reclaimed and SMT is enabled, the rest of
896 * the CPUs in the core need to be disabled.
897 * Note that we are not changing the CPU state to BUSY because the owner
898 * still has not reclaim it. */
899
2/2
✓ Branch 0 taken 12 times.
✓ Branch 1 taken 7 times.
19 if (error == DLB_NOTED
900
2/2
✓ Branch 0 taken 2 times.
✓ Branch 1 taken 10 times.
12 && shdata->flags.hw_has_smt) {
901 2 const mu_cpuset_t *core_mask = mu_get_core_mask(cpuid);
902 2 for (int cpuid_in_core = core_mask->first_cpuid;
903
3/4
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 8 times.
✗ Branch 3 not taken.
10 cpuid_in_core >= 0 && cpuid_in_core != DLB_CPUID_INVALID;
904 8 cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) {
905
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 2 times.
8 if (cpuid_in_core != cpuid) {
906 6 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid_in_core];
907
1/2
✓ Branch 0 taken 6 times.
✗ Branch 1 not taken.
6 if (cpuinfo->guest != pid) {
908 6 array_cpuinfo_task_t_push(
909 tasks,
910 6 (const cpuinfo_task_t) {
911 .action = DISABLE_CPU,
912 6 .pid = cpuinfo->guest,
913 .cpuid = cpuid_in_core,
914 });
915 }
916 }
917 }
918 }
919
920 // if (!error) //DLB_DEBUG( CPU_SET(cpu, &recovered_cpus); )
921
922 // Look for Idle CPUs, only in DEBUG or INSTRUMENTATION
923 //if (is_idle(cpu)) {
924 //DLB_INSTR( idle_count++; )
925 //DLB_DEBUG( CPU_SET(cpu, &idle_cpus); )
926 //}
927 }
928 19 shmem_unlock(shm_handler);
929
930 //DLB_DEBUG( int recovered = CPU_COUNT(&recovered_cpus); )
931 //DLB_DEBUG( int post_size = CPU_COUNT(&idle_cpus); )
932 //verbose(VB_SHMEM, "Decreasing %d Idle Threads (%d now)", recovered, post_size);
933 //verbose(VB_SHMEM, "Available mask: %s", mu_to_str(&idle_cpus));
934
935 //add_event(IDLE_CPUS_EVENT, idle_count);
936 19 return error;
937 }
938
939 1 int shmem_cpuinfo__reclaim_cpus(pid_t pid, int ncpus, array_cpuinfo_task_t *restrict tasks) {
940 1 int error = DLB_NOUPDT;
941 //DLB_DEBUG( cpu_set_t idle_cpus; )
942 //DLB_DEBUG( CPU_ZERO(&idle_cpus); )
943
944 //DLB_INSTR( int idle_count = 0; )
945
946 //cpu_set_t recovered_cpus;
947 //CPU_ZERO(&recovered_cpus);
948
949 1 shmem_lock(shm_handler);
950 {
951 1 int num_cores = mu_get_num_cores();
952
3/4
✓ Branch 0 taken 2 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 1 times.
✓ Branch 3 taken 1 times.
2 for (int core_id = 0; core_id < num_cores && ncpus>0; ++core_id) {
953 unsigned int num_reclaimed;
954 1 int local_error = reclaim_core(pid, core_id, tasks, &num_reclaimed);
955
1/5
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
1 switch(local_error) {
956 case DLB_NOTED:
957 // max priority, always overwrite
958 error = DLB_NOTED;
959 ncpus -= num_reclaimed;
960 break;
961 1 case DLB_SUCCESS:
962 // medium priority, only update if error is in lowest priority
963 1 error = (error == DLB_NOTED) ? DLB_NOTED : DLB_SUCCESS;
964 1 ncpus -= num_reclaimed;
965 1 break;
966 case DLB_NOUPDT:
967 // lowest priority, default value
968 break;
969 case DLB_ERR_PERM:
970 // ignore
971 break;
972 }
973 // Look for Idle CPUs, only in DEBUG or INSTRUMENTATION
974 //if (is_idle(cpu)) {
975 //DLB_INSTR( idle_count++; )
976 //DLB_DEBUG( CPU_SET(cpu, &idle_cpus); )
977 //}
978 }
979 }
980 1 shmem_unlock(shm_handler);
981
982 //DLB_DEBUG( int recovered = CPU_COUNT(&recovered_cpus); )
983 //DLB_DEBUG( int post_size = CPU_COUNT(&idle_cpus); )
984 //verbose(VB_SHMEM, "Decreasing %d Idle Threads (%d now)", recovered, post_size);
985 //verbose(VB_SHMEM, "Available mask: %s", mu_to_str(&idle_cpus));
986
987 //add_event(IDLE_CPUS_EVENT, idle_count);
988 1 return error;
989 }
990
991 34 int shmem_cpuinfo__reclaim_cpu_mask(pid_t pid, const cpu_set_t *restrict mask,
992 array_cpuinfo_task_t *restrict tasks) {
993 34 int error = DLB_NOUPDT;
994 34 shmem_lock(shm_handler);
995 {
996 cpu_set_t cpus_to_reclaim;
997
2/2
✓ Branch 0 taken 544 times.
✓ Branch 1 taken 34 times.
578 CPU_OR(&cpus_to_reclaim, &shdata->free_cpus, &shdata->occupied_cores);
998
2/2
✓ Branch 0 taken 544 times.
✓ Branch 1 taken 34 times.
578 CPU_AND(&cpus_to_reclaim, &cpus_to_reclaim, mask);
999
1000 34 for (int cpuid = mu_get_first_cpu(&cpus_to_reclaim);
1001
3/4
✓ Branch 0 taken 79 times.
✓ Branch 1 taken 34 times.
✓ Branch 2 taken 79 times.
✗ Branch 3 not taken.
113 cpuid >= 0 && cpuid < node_size;
1002 79 cpuid = mu_get_next_cpu(&cpus_to_reclaim, cpuid)) {
1003 79 int local_error = reclaim_cpu(pid, cpuid, tasks);
1004
3/5
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 32 times.
✓ Branch 2 taken 43 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
79 switch(local_error) {
1005 4 case DLB_ERR_PERM:
1006 // max priority, always overwrite
1007 4 error = DLB_ERR_PERM;
1008 4 break;
1009 32 case DLB_NOTED:
1010 // max priority unless there was a previous error
1011
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 32 times.
32 error = error < 0 ? error : DLB_NOTED;
1012 32 break;
1013 43 case DLB_SUCCESS:
1014 // medium priority, only update if error is in lowest priority
1015
2/2
✓ Branch 0 taken 30 times.
✓ Branch 1 taken 13 times.
43 error = (error == DLB_NOUPDT) ? DLB_SUCCESS : error;
1016 43 break;
1017 case DLB_NOUPDT:
1018 // lowest priority, default value
1019 break;
1020 }
1021 }
1022 }
1023 34 shmem_unlock(shm_handler);
1024 34 return error;
1025 }
1026
1027
1028 /*********************************************************************************/
1029 /* Acquire CPU */
1030 /*********************************************************************************/
1031
1032 /* Acquire CPU
1033 * If successful: Guest => ME
1034 */
1035 197 static int acquire_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
1036 int error;
1037 197 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1038
1039
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 196 times.
197 if (cpuinfo->guest == pid) {
1040 // CPU already guested
1041 1 error = DLB_NOUPDT;
1042
2/2
✓ Branch 0 taken 146 times.
✓ Branch 1 taken 50 times.
196 } else if (cpuinfo->owner == pid) {
1043 // CPU is owned by the process
1044 146 cpuinfo->state = CPU_BUSY;
1045
2/2
✓ Branch 0 taken 119 times.
✓ Branch 1 taken 27 times.
146 if (cpuinfo->guest == NOBODY) {
1046 // CPU empty
1047 119 cpuinfo->guest = pid;
1048 119 array_cpuinfo_task_t_push(
1049 tasks,
1050 119 (const cpuinfo_task_t) {
1051 .action = ENABLE_CPU,
1052 .pid = pid,
1053 .cpuid = cpuid,
1054 });
1055
1/2
✓ Branch 0 taken 119 times.
✗ Branch 1 not taken.
119 CPU_CLR(cpuid, &shdata->free_cpus);
1056 119 error = DLB_SUCCESS;
1057 } else {
1058 // CPU needs to be reclaimed
1059 27 array_cpuinfo_task_t_push(
1060 tasks,
1061 27 (const cpuinfo_task_t) {
1062 .action = DISABLE_CPU,
1063 27 .pid = cpuinfo->guest,
1064 .cpuid = cpuid,
1065 });
1066 27 array_cpuinfo_task_t_push(
1067 tasks,
1068 27 (const cpuinfo_task_t) {
1069 .action = ENABLE_CPU,
1070 .pid = pid,
1071 .cpuid = cpuid,
1072 });
1073
1074
3/6
✓ Branch 0 taken 27 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 27 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 27 times.
27 if (!CPU_ISSET(cpuid, &shdata->occupied_cores)) {
1075 update_occupied_cores(cpuinfo->owner, cpuinfo->id);
1076 }
1077
1078 27 error = DLB_NOTED;
1079 }
1080
2/2
✓ Branch 0 taken 36 times.
✓ Branch 1 taken 14 times.
50 } else if (cpuinfo->guest == NOBODY
1081
2/2
✓ Branch 0 taken 33 times.
✓ Branch 1 taken 3 times.
36 && cpuinfo->state == CPU_LENT
1082
1/2
✓ Branch 1 taken 33 times.
✗ Branch 2 not taken.
33 && core_is_eligible(pid, cpuid)) {
1083 // CPU is available
1084 33 cpuinfo->guest = pid;
1085 33 array_cpuinfo_task_t_push(
1086 tasks,
1087 33 (const cpuinfo_task_t) {
1088 .action = ENABLE_CPU,
1089 .pid = pid,
1090 .cpuid = cpuid,
1091 });
1092
1093
2/2
✓ Branch 0 taken 23 times.
✓ Branch 1 taken 10 times.
33 if (cpuinfo->owner != NOBODY
1094
3/6
✓ Branch 0 taken 23 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 23 times.
✓ Branch 4 taken 23 times.
✗ Branch 5 not taken.
23 && !CPU_ISSET(cpuid, &shdata->occupied_cores)) {
1095 23 update_occupied_cores(cpuinfo->owner, cpuinfo->id);
1096 }
1097
1098
1/2
✓ Branch 0 taken 33 times.
✗ Branch 1 not taken.
33 CPU_CLR(cpuid, &shdata->free_cpus);
1099
1100 33 error = DLB_SUCCESS;
1101
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 7 times.
17 } else if (shdata->flags.queues_enabled) {
1102 // CPU is not available, add request (async mode)
1103
1/2
✓ Branch 1 taken 10 times.
✗ Branch 2 not taken.
10 if (queue_pid_t_enqueue(&cpuinfo->requests, pid) == 0) {
1104 10 error = DLB_NOTED;
1105 } else {
1106 error = DLB_ERR_REQST;
1107 }
1108
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 1 times.
7 } else if (cpuinfo->state != CPU_DISABLED) {
1109 // CPU is busy, or lent to another process (polling mode)
1110 6 error = DLB_NOUPDT;
1111 } else {
1112 // CPU is disabled (polling mode)
1113 1 error = DLB_ERR_PERM;
1114 }
1115
1116 197 return error;
1117 }
1118
1119 4 static int acquire_core(pid_t pid, cpuid_t core_id,
1120 array_cpuinfo_task_t *restrict tasks,
1121 unsigned int *num_acquired) {
1122
1123 4 int error = DLB_NOUPDT;
1124 4 *num_acquired = 0;
1125
1126 4 const mu_cpuset_t *core_mask = mu_get_core_mask_by_coreid(core_id);
1127 4 for (int cpuid_in_core = core_mask->first_cpuid;
1128
3/4
✓ Branch 0 taken 16 times.
✓ Branch 1 taken 4 times.
✓ Branch 2 taken 16 times.
✗ Branch 3 not taken.
20 cpuid_in_core >= 0 && cpuid_in_core != DLB_CPUID_INVALID;
1129 16 cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) {
1130 16 int local_error = acquire_cpu(pid, cpuid_in_core, tasks);
1131
3/4
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 12 times.
✓ Branch 2 taken 4 times.
✗ Branch 3 not taken.
16 if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) {
1132 16 ++(*num_acquired);
1133
2/2
✓ Branch 0 taken 13 times.
✓ Branch 1 taken 3 times.
16 if (error != DLB_NOTED) {
1134 13 error = local_error;
1135 }
1136 } else if (shdata->node_info[cpuid_in_core].guest == pid) {
1137 /* already guested, continue */
1138 } else {
1139 /* could not be borrowed for other reason, stop iterating */
1140 break;
1141 }
1142 }
1143
1144 4 return error;
1145 }
1146
1147 63 static int acquire_cpus_in_array_cpuid_t(pid_t pid,
1148 const array_cpuid_t *restrict array_cpuid,
1149 int *restrict ncpus, array_cpuinfo_task_t *restrict tasks) {
1150
1151 63 int error = DLB_NOUPDT;
1152
2/2
✓ Branch 0 taken 57 times.
✓ Branch 1 taken 6 times.
63 int _ncpus = ncpus != NULL ? *ncpus : INT_MAX;
1153 63 const bool hw_has_smt = shdata->flags.hw_has_smt;
1154
1155 /* Acquire all CPUs in core if possible
1156 * (there is a high chance that consecutive CPUs belong to the same core,
1157 * try to skip those ones) */
1158 63 int prev_core_id = -1;
1159 63 for (unsigned int i = 0;
1160
4/4
✓ Branch 0 taken 162 times.
✓ Branch 1 taken 20 times.
✓ Branch 2 taken 119 times.
✓ Branch 3 taken 43 times.
182 _ncpus > 0 && i < array_cpuid->count;
1161 119 ++i) {
1162
1163 119 cpuid_t cpuid = array_cpuid->items[i];
1164 int local_error;
1165
1166
2/2
✓ Branch 0 taken 16 times.
✓ Branch 1 taken 103 times.
119 if (hw_has_smt) {
1167 16 cpuid_t core_id = shdata->node_info[cpuid].core_id;
1168
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 12 times.
16 if (prev_core_id != core_id) {
1169 unsigned int num_acquired;
1170 4 local_error = acquire_core(pid, core_id, tasks, &num_acquired);
1171
3/4
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 3 times.
✓ Branch 2 taken 1 times.
✗ Branch 3 not taken.
4 if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) {
1172 4 _ncpus -= num_acquired;
1173
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 1 times.
4 if (error != DLB_NOTED) error = local_error;
1174 }
1175 4 prev_core_id = core_id;
1176 }
1177 } else {
1178 103 local_error = acquire_cpu(pid, cpuid, tasks);
1179
3/4
✓ Branch 0 taken 17 times.
✓ Branch 1 taken 86 times.
✓ Branch 2 taken 17 times.
✗ Branch 3 not taken.
103 if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) {
1180 103 --_ncpus;
1181
2/2
✓ Branch 0 taken 99 times.
✓ Branch 1 taken 4 times.
103 if (error != DLB_NOTED) error = local_error;
1182 }
1183 }
1184 }
1185
1186
2/2
✓ Branch 0 taken 57 times.
✓ Branch 1 taken 6 times.
63 if (ncpus != NULL) {
1187 57 *ncpus = _ncpus;
1188 }
1189
1190 63 return error;
1191 }
1192
1193 79 int shmem_cpuinfo__acquire_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
1194
1195
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 78 times.
79 if (cpuid >= node_size) return DLB_ERR_PERM;
1196
1197 int error;
1198 78 shmem_lock(shm_handler);
1199 {
1200 78 error = acquire_cpu(pid, cpuid, tasks);
1201 }
1202 78 shmem_unlock(shm_handler);
1203 78 return error;
1204 }
1205
1206 /* Simplification of shmem_cpuinfo__acquire_ncpus_from_cpu_subset when we just
1207 * want to iterate all CPUs in set.
1208 * This function is intended to be called when leaving a blocking call and the
1209 * process knows that the provided CPUs were previously lent and need to be
1210 * reclaimed.
1211 *
1212 * PRE: all CPUS are owned */
1213 6 int shmem_cpuinfo__acquire_from_cpu_subset(
1214 pid_t pid,
1215 const array_cpuid_t *restrict array_cpuid,
1216 array_cpuinfo_task_t *restrict tasks) {
1217
1218 int error;
1219 6 shmem_lock(shm_handler);
1220 {
1221 6 error = acquire_cpus_in_array_cpuid_t(pid, array_cpuid, NULL, tasks);
1222 }
1223 6 shmem_unlock(shm_handler);
1224 6 return error;
1225 }
1226
1227 static int borrow_cpus_in_array_cpuid_t(pid_t pid,
1228 const array_cpuid_t *restrict array_cpuid,
1229 int *restrict ncpus, array_cpuinfo_task_t *restrict tasks);
1230
1231 151 int shmem_cpuinfo__acquire_ncpus_from_cpu_subset(
1232 pid_t pid, int *restrict requested_ncpus,
1233 const array_cpuid_t *restrict cpus_priority_array,
1234 lewi_affinity_t lewi_affinity, int max_parallelism,
1235 int64_t *restrict last_borrow, array_cpuinfo_task_t *restrict tasks) {
1236
1237 /* Return immediately if requested_ncpus is present and not greater than zero */
1238
3/4
✓ Branch 0 taken 98 times.
✓ Branch 1 taken 53 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 98 times.
151 if (requested_ncpus && *requested_ncpus <= 0) {
1239 return DLB_NOUPDT;
1240 }
1241
1242 /* Return immediately if there is nothing left to acquire */
1243 /* 1) If the timestamp of the last unsuccessful borrow is newer than the last CPU lent */
1244
4/4
✓ Branch 0 taken 105 times.
✓ Branch 1 taken 46 times.
✓ Branch 2 taken 8 times.
✓ Branch 3 taken 97 times.
151 if (last_borrow && *last_borrow > DLB_ATOMIC_LD_ACQ(&shdata->timestamp_cpu_lent)) {
1245 /* 2) Unless there's an owned CPUs not guested, in that case we will acquire anyway */
1246 8 bool all_owned_cpus_are_guested = true;
1247
1/2
✓ Branch 0 taken 32 times.
✗ Branch 1 not taken.
32 for (unsigned int i=0; i<cpus_priority_array->count; ++i) {
1248 32 cpuid_t cpuid = cpus_priority_array->items[i];
1249 32 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1250 /* Iterate until the first not owned CPU */
1251
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 24 times.
32 if (cpuinfo->owner != pid) break;
1252
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 24 times.
24 if (cpuinfo->guest != pid) {
1253 /* This CPU is owned and not guested, no need to iterate anymore */
1254 all_owned_cpus_are_guested = false;
1255 break;
1256 }
1257 }
1258
1/2
✓ Branch 0 taken 8 times.
✗ Branch 1 not taken.
8 if (all_owned_cpus_are_guested) {
1259 8 return DLB_NOUPDT;
1260 }
1261 }
1262
1263 /* Return immediately if the process has reached the max_parallelism */
1264
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 143 times.
143 if (max_parallelism != 0) {
1265 for (unsigned int i=0; i<cpus_priority_array->count; ++i) {
1266 cpuid_t cpuid = cpus_priority_array->items[i];
1267 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1268 if (cpuinfo->guest == pid) {
1269 --max_parallelism;
1270 }
1271 }
1272 if (max_parallelism <= 0) {
1273 return DLB_NOUPDT;
1274 }
1275 }
1276
1277 /* Compute the max number of CPUs to acquire */
1278
2/2
✓ Branch 0 taken 90 times.
✓ Branch 1 taken 53 times.
143 int ncpus = requested_ncpus ? *requested_ncpus : node_size;
1279
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 143 times.
143 if (max_parallelism > 0) {
1280 ncpus = min_int(ncpus, max_parallelism);
1281 }
1282
1283 /* Arrays for temporary CPU priority (lazy initialized and always used within the lock) */
1284 static array_cpuid_t owned_idle = {};
1285 static array_cpuid_t owned_non_idle = {};
1286 static array_cpuid_t non_owned = {};
1287
1288 143 int error = DLB_NOUPDT;
1289 143 shmem_lock(shm_handler);
1290 {
1291 /* Lazy init first time, clear afterwards */
1292
2/2
✓ Branch 0 taken 125 times.
✓ Branch 1 taken 18 times.
143 if (likely(owned_idle.items != NULL)) {
1293 125 array_cpuid_t_clear(&owned_idle);
1294 125 array_cpuid_t_clear(&owned_non_idle);
1295 125 array_cpuid_t_clear(&non_owned);
1296 } else {
1297 18 array_cpuid_t_init(&owned_idle, node_size);
1298 18 array_cpuid_t_init(&owned_non_idle, node_size);
1299 18 array_cpuid_t_init(&non_owned, node_size);
1300 }
1301
1302 /* Iterate cpus_priority_array and construct all sub-arrays */
1303
2/2
✓ Branch 0 taken 874 times.
✓ Branch 1 taken 143 times.
1017 for (unsigned int i = 0; i < cpus_priority_array->count; ++i) {
1304 874 cpuid_t cpuid = cpus_priority_array->items[i];
1305 874 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1306
1307
2/2
✓ Branch 0 taken 306 times.
✓ Branch 1 taken 568 times.
874 if (cpuinfo->state == CPU_DISABLED) continue;
1308
1309
2/2
✓ Branch 0 taken 290 times.
✓ Branch 1 taken 278 times.
568 if (cpuinfo->owner == pid) {
1310
2/2
✓ Branch 0 taken 84 times.
✓ Branch 1 taken 206 times.
290 if (cpuinfo->guest == NOBODY) {
1311 84 array_cpuid_t_push(&owned_idle, cpuid);
1312
2/2
✓ Branch 0 taken 17 times.
✓ Branch 1 taken 189 times.
206 } else if (cpuinfo->guest != pid) {
1313 17 array_cpuid_t_push(&owned_non_idle, cpuid);
1314 }
1315
2/2
✓ Branch 0 taken 105 times.
✓ Branch 1 taken 173 times.
278 } else if (cpuinfo->guest == NOBODY) {
1316 105 array_cpuid_t_push(&non_owned, cpuid);
1317 }
1318 }
1319
1320 /* Acquire first owned CPUs that are IDLE */
1321
2/2
✓ Branch 1 taken 44 times.
✓ Branch 2 taken 99 times.
143 if (array_cpuid_t_count(&owned_idle) > 0) {
1322 44 int local_error = acquire_cpus_in_array_cpuid_t(pid, &owned_idle, &ncpus, tasks);
1323
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 44 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
44 if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) {
1324 /* Update error code if needed */
1325
1/2
✓ Branch 0 taken 44 times.
✗ Branch 1 not taken.
44 if (error != DLB_NOTED) error = local_error;
1326 }
1327 }
1328
1329 /* Acquire the rest of owned CPUs */
1330
2/2
✓ Branch 1 taken 13 times.
✓ Branch 2 taken 130 times.
143 if (array_cpuid_t_count(&owned_non_idle) > 0) {
1331 13 int local_error = acquire_cpus_in_array_cpuid_t(pid, &owned_non_idle, &ncpus, tasks);
1332
3/4
✓ Branch 0 taken 13 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 11 times.
✓ Branch 3 taken 2 times.
13 if (local_error == DLB_SUCCESS || local_error == DLB_NOTED) {
1333 /* Update error code if needed */
1334
1/2
✓ Branch 0 taken 11 times.
✗ Branch 1 not taken.
11 if (error != DLB_NOTED) error = local_error;
1335 }
1336 }
1337
1338 /* Borrow non-owned CPUs */
1339
2/2
✓ Branch 1 taken 65 times.
✓ Branch 2 taken 78 times.
143 if (array_cpuid_t_count(&non_owned) > 0) {
1340 65 int local_error = borrow_cpus_in_array_cpuid_t(pid, &non_owned, &ncpus, tasks);
1341
2/2
✓ Branch 0 taken 61 times.
✓ Branch 1 taken 4 times.
65 if (local_error == DLB_SUCCESS) {
1342 /* Update error code if needed */
1343
1/2
✓ Branch 0 taken 61 times.
✗ Branch 1 not taken.
61 if (error != DLB_NOTED) error = local_error;
1344 }
1345 }
1346
1347 /* Add petition if asynchronous mode */
1348
2/2
✓ Branch 0 taken 59 times.
✓ Branch 1 taken 84 times.
143 if (shdata->flags.queues_enabled) {
1349
1350 /* Add a global petition if a number of CPUs was requested and not fully allocated */
1351
2/2
✓ Branch 0 taken 37 times.
✓ Branch 1 taken 22 times.
59 if (requested_ncpus) {
1352
2/2
✓ Branch 0 taken 24 times.
✓ Branch 1 taken 13 times.
37 if (ncpus > 0) {
1353 /* Construct a mask of allowed CPUs */
1354 24 lewi_mask_request_t request = {
1355 .pid = pid,
1356 .howmany = ncpus,
1357 };
1358 24 CPU_ZERO(&request.allowed);
1359
2/2
✓ Branch 0 taken 203 times.
✓ Branch 1 taken 24 times.
227 for (unsigned int i=0; i<cpus_priority_array->count; ++i) {
1360 203 cpuid_t cpuid = cpus_priority_array->items[i];
1361
1/2
✓ Branch 0 taken 203 times.
✗ Branch 1 not taken.
203 CPU_SET(cpuid, &request.allowed);
1362 }
1363
1364 /* Enqueue request */
1365
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 21 times.
24 verbose(VB_SHMEM, "Requesting %d CPUs more after acquiring", ncpus);
1366 lewi_mask_request_t *it;
1367 24 for (it = queue_lewi_mask_request_t_front(&shdata->lewi_mask_requests);
1368
2/2
✓ Branch 0 taken 13 times.
✓ Branch 1 taken 24 times.
37 it != NULL;
1369 13 it = queue_lewi_mask_request_t_next(&shdata->lewi_mask_requests, it)) {
1370
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 13 times.
13 if (it->pid == pid
1371 && CPU_EQUAL(&request.allowed, &it->allowed)) {
1372 /* update entry */
1373 it->howmany += request.howmany;
1374 error = DLB_NOTED;
1375 break;
1376 }
1377 }
1378
1/2
✓ Branch 0 taken 24 times.
✗ Branch 1 not taken.
24 if (it == NULL) {
1379 /* or add new entry */
1380
1/2
✓ Branch 0 taken 24 times.
✗ Branch 1 not taken.
24 if (queue_lewi_mask_request_t_enqueue(
1381 24 &shdata->lewi_mask_requests, request) == 0) {
1382 24 error = DLB_NOTED;
1383 } else {
1384 error = DLB_ERR_REQST;
1385 }
1386 }
1387 }
1388 }
1389
1390 /* Otherwise, add petitions to all CPUs that are either non-owned or disabled */
1391 else {
1392
2/2
✓ Branch 0 taken 63 times.
✓ Branch 1 taken 22 times.
85 for (unsigned int i = 0; i < cpus_priority_array->count; ++i) {
1393 63 cpuid_t cpuid = cpus_priority_array->items[i];
1394 63 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1395
1396
2/2
✓ Branch 0 taken 57 times.
✓ Branch 1 taken 6 times.
63 if (cpuinfo->state == CPU_DISABLED
1397
2/2
✓ Branch 0 taken 19 times.
✓ Branch 1 taken 38 times.
57 || (cpuinfo->owner != pid
1398
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 19 times.
19 && cpuinfo->guest != pid)) {
1399
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 if (queue_pid_t_search(&cpuinfo->requests, pid) == NULL
1400
1/2
✓ Branch 1 taken 6 times.
✗ Branch 2 not taken.
6 && queue_pid_t_has_space(&cpuinfo->requests)) {
1401 6 queue_pid_t_enqueue(&cpuinfo->requests, pid);
1402 6 error = DLB_NOTED;
1403 }
1404 }
1405 }
1406 }
1407 }
1408
1409 /* Update timestamp if borrow did not succeed */
1410
6/6
✓ Branch 0 taken 97 times.
✓ Branch 1 taken 46 times.
✓ Branch 2 taken 28 times.
✓ Branch 3 taken 69 times.
✓ Branch 4 taken 18 times.
✓ Branch 5 taken 10 times.
143 if (last_borrow != NULL && error != DLB_SUCCESS && error != DLB_NOTED) {
1411 18 *last_borrow = get_time_in_ns();
1412 }
1413 }
1414 143 shmem_unlock(shm_handler);
1415 143 return error;
1416 }
1417
1418
1419 /*********************************************************************************/
1420 /* Borrow CPU */
1421 /*********************************************************************************/
1422
1423 274 static int borrow_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
1424
1425 274 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1426
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 274 times.
274 if (unlikely(cpuinfo->state == CPU_DISABLED)) return DLB_ERR_PERM;
1427
1428 274 int error = DLB_NOUPDT;
1429
1430
2/2
✓ Branch 0 taken 155 times.
✓ Branch 1 taken 119 times.
274 if (cpuinfo->guest == NOBODY
1431
1/2
✓ Branch 1 taken 155 times.
✗ Branch 2 not taken.
155 && core_is_eligible(pid, cpuid)) {
1432
2/2
✓ Branch 0 taken 19 times.
✓ Branch 1 taken 136 times.
155 if (cpuinfo->owner == pid) {
1433 // CPU is owned by the process
1434 19 cpuinfo->state = CPU_BUSY;
1435 19 cpuinfo->guest = pid;
1436 19 array_cpuinfo_task_t_push(
1437 tasks,
1438 19 (const cpuinfo_task_t) {
1439 .action = ENABLE_CPU,
1440 .pid = pid,
1441 .cpuid = cpuid,
1442 });
1443 19 error = DLB_SUCCESS;
1444
1/2
✓ Branch 0 taken 19 times.
✗ Branch 1 not taken.
19 CPU_CLR(cpuid, &shdata->free_cpus);
1445
1/2
✓ Branch 0 taken 136 times.
✗ Branch 1 not taken.
136 } else if (cpuinfo->state == CPU_LENT) {
1446 // CPU is available
1447 136 cpuinfo->guest = pid;
1448 136 array_cpuinfo_task_t_push(
1449 tasks,
1450 136 (const cpuinfo_task_t) {
1451 .action = ENABLE_CPU,
1452 .pid = pid,
1453 .cpuid = cpuid,
1454 });
1455 136 error = DLB_SUCCESS;
1456
1/2
✓ Branch 0 taken 136 times.
✗ Branch 1 not taken.
136 CPU_CLR(cpuid, &shdata->free_cpus);
1457
2/2
✓ Branch 0 taken 118 times.
✓ Branch 1 taken 18 times.
136 if (cpuinfo->owner != NOBODY
1458
5/6
✓ Branch 0 taken 118 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 12 times.
✓ Branch 3 taken 106 times.
✓ Branch 4 taken 106 times.
✓ Branch 5 taken 12 times.
118 && !CPU_ISSET(cpuid, &shdata->occupied_cores)) {
1459 106 update_occupied_cores(cpuinfo->owner, cpuinfo->id);
1460 }
1461 }
1462 }
1463
1464 274 return error;
1465 }
1466
1467 32 static int borrow_core(pid_t pid, cpuid_t core_id, array_cpuinfo_task_t *restrict tasks,
1468 unsigned int *num_borrowed) {
1469
1470 32 int error = DLB_NOUPDT;
1471 32 *num_borrowed = 0;
1472
1473 32 const mu_cpuset_t *core_mask = mu_get_core_mask_by_coreid(core_id);
1474 32 for (int cpuid_in_core = core_mask->first_cpuid;
1475
3/4
✓ Branch 0 taken 92 times.
✓ Branch 1 taken 20 times.
✓ Branch 2 taken 92 times.
✗ Branch 3 not taken.
112 cpuid_in_core >= 0 && cpuid_in_core != DLB_CPUID_INVALID;
1476 80 cpuid_in_core = mu_get_next_cpu(core_mask->set, cpuid_in_core)) {
1477
2/2
✓ Branch 1 taken 16 times.
✓ Branch 2 taken 76 times.
92 if (borrow_cpu(pid, cpuid_in_core, tasks) == DLB_SUCCESS) {
1478 /* successfully borrowed, continue */
1479 16 ++(*num_borrowed);
1480 16 error = DLB_SUCCESS;
1481
2/2
✓ Branch 0 taken 12 times.
✓ Branch 1 taken 64 times.
76 } else if (shdata->node_info[cpuid_in_core].guest == pid) {
1482 /* already guested, continue */
1483 } else {
1484 /* could not be borrowed for other reason, stop iterating */
1485 12 break;
1486 }
1487 }
1488
1489 32 return error;
1490 }
1491
1492 /* Iterate array_cpuid_t and borrow all possible CPUs */
1493 107 static int borrow_cpus_in_array_cpuid_t(pid_t pid,
1494 const array_cpuid_t *restrict array_cpuid,
1495 int *restrict ncpus, array_cpuinfo_task_t *restrict tasks) {
1496
1497 107 int error = DLB_NOUPDT;
1498
2/2
✓ Branch 0 taken 101 times.
✓ Branch 1 taken 6 times.
107 int _ncpus = ncpus != NULL ? *ncpus : INT_MAX;
1499 107 const bool hw_has_smt = shdata->flags.hw_has_smt;
1500
1501 /* Borrow all CPUs in core if possible
1502 * (there is a high chance that consecutive CPUs belong to the same core,
1503 * try to skip those ones) */
1504 107 int prev_core_id = -1;
1505 107 for (unsigned int i = 0;
1506
4/4
✓ Branch 0 taken 349 times.
✓ Branch 1 taken 44 times.
✓ Branch 2 taken 286 times.
✓ Branch 3 taken 63 times.
393 _ncpus > 0 && i < array_cpuid->count;
1507 286 ++i) {
1508
1509 286 cpuid_t cpuid = array_cpuid->items[i];
1510
1511
2/2
✓ Branch 0 taken 116 times.
✓ Branch 1 taken 170 times.
286 if (hw_has_smt) {
1512 116 cpuid_t core_id = shdata->node_info[cpuid].core_id;
1513
2/2
✓ Branch 0 taken 32 times.
✓ Branch 1 taken 84 times.
116 if (prev_core_id != core_id) {
1514 unsigned int num_borrowed;
1515
2/2
✓ Branch 1 taken 4 times.
✓ Branch 2 taken 28 times.
32 if (borrow_core(pid, core_id, tasks, &num_borrowed) == DLB_SUCCESS) {
1516 4 _ncpus -= num_borrowed;
1517 4 error = DLB_SUCCESS;
1518 }
1519 32 prev_core_id = core_id;
1520 }
1521 } else {
1522
2/2
✓ Branch 1 taken 127 times.
✓ Branch 2 taken 43 times.
170 if (borrow_cpu(pid, cpuid, tasks) == DLB_SUCCESS) {
1523 127 --_ncpus;
1524 127 error = DLB_SUCCESS;
1525 }
1526 }
1527 }
1528
1529
2/2
✓ Branch 0 taken 101 times.
✓ Branch 1 taken 6 times.
107 if (ncpus != NULL) {
1530 101 *ncpus = _ncpus;
1531 }
1532
1533 107 return error;
1534 }
1535
1536 /* Iterate cpu_set_t and borrow all possible CPUs */
1537 static int borrow_cpus_in_cpu_set_t(pid_t pid,
1538 const cpu_set_t *restrict cpu_set,
1539 int *restrict ncpus, array_cpuinfo_task_t *restrict tasks) {
1540
1541 int error = DLB_NOUPDT;
1542 int _ncpus = ncpus != NULL ? *ncpus : INT_MAX;
1543 const bool hw_has_smt = shdata->flags.hw_has_smt;
1544
1545 /* Borrow all CPUs in core if possible
1546 * (there is a high chance that consecutive CPUs belong to the same core,
1547 * try to skip those ones) */
1548 int prev_core_id = -1;
1549 for (int cpuid = mu_get_first_cpu(cpu_set);
1550 cpuid >= 0 && cpuid < node_size &&_ncpus > 0;
1551 cpuid = mu_get_next_cpu(cpu_set, cpuid)) {
1552
1553 if (hw_has_smt) {
1554 cpuid_t core_id = shdata->node_info[cpuid].core_id;
1555 if (prev_core_id != core_id) {
1556 unsigned int num_borrowed;
1557 if (borrow_core(pid, core_id, tasks, &num_borrowed) == DLB_SUCCESS) {
1558 _ncpus -= num_borrowed;
1559 error = DLB_SUCCESS;
1560 }
1561 prev_core_id = core_id;
1562 }
1563 } else {
1564 if (borrow_cpu(pid, cpuid, tasks) == DLB_SUCCESS) {
1565 --_ncpus;
1566 error = DLB_SUCCESS;
1567 }
1568 }
1569 }
1570
1571 if (ncpus != NULL) {
1572 *ncpus = _ncpus;
1573 }
1574
1575 return error;
1576 }
1577
1578
1579 12 int shmem_cpuinfo__borrow_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
1580
1581
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 12 times.
12 if (cpuid >= node_size) return DLB_ERR_PERM;
1582
1583 int error;
1584 12 shmem_lock(shm_handler);
1585 {
1586 12 error = borrow_cpu(pid, cpuid, tasks);
1587 }
1588 12 shmem_unlock(shm_handler);
1589 12 return error;
1590 }
1591
1592 /* Simplification of shmem_cpuinfo__borrow_ncpus_from_cpu_subset when we just
1593 * want to iterate all CPUs in set.
1594 * This function is intended to be called when leaving a blocking call and the
1595 * process knows that the provided CPUs were previously lent and may try to
1596 * borrow again.
1597 */
1598 6 int shmem_cpuinfo__borrow_from_cpu_subset(
1599 pid_t pid,
1600 const array_cpuid_t *restrict array_cpuid,
1601 array_cpuinfo_task_t *restrict tasks) {
1602
1603 int error;
1604 6 shmem_lock(shm_handler);
1605 {
1606 6 error = borrow_cpus_in_array_cpuid_t(pid, array_cpuid, NULL, tasks);
1607 }
1608 6 shmem_unlock(shm_handler);
1609 6 return error;
1610 }
1611
1612 40 int shmem_cpuinfo__borrow_ncpus_from_cpu_subset(
1613 pid_t pid, int *restrict requested_ncpus,
1614 const array_cpuid_t *restrict cpus_priority_array, lewi_affinity_t lewi_affinity,
1615 int max_parallelism, int64_t *restrict last_borrow,
1616 array_cpuinfo_task_t *restrict tasks) {
1617
1618 /* Return immediately if requested_ncpus is present and not greater than zero */
1619
3/4
✓ Branch 0 taken 27 times.
✓ Branch 1 taken 13 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 27 times.
40 if (requested_ncpus && *requested_ncpus <= 0) {
1620 return DLB_NOUPDT;
1621 }
1622
1623 /* Return immediately if the timestamp of the last unsuccessful borrow is
1624 * newer than the last CPU lent */
1625
3/4
✓ Branch 0 taken 21 times.
✓ Branch 1 taken 19 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 21 times.
40 if (last_borrow && *last_borrow > DLB_ATOMIC_LD_ACQ(&shdata->timestamp_cpu_lent)) {
1626 return DLB_NOUPDT;
1627 }
1628
1629 /* Return immediately if the process has reached the max_parallelism */
1630
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 36 times.
40 if (max_parallelism != 0) {
1631
2/2
✓ Branch 0 taken 16 times.
✓ Branch 1 taken 4 times.
20 for (unsigned int i=0; i<cpus_priority_array->count; ++i) {
1632 16 cpuid_t cpuid = cpus_priority_array->items[i];
1633 16 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1634
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 8 times.
16 if (cpuinfo->guest == pid) {
1635 8 --max_parallelism;
1636 }
1637 }
1638
1/2
✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
4 if (max_parallelism <= 0) {
1639 4 return DLB_NOUPDT;
1640 }
1641 }
1642
1643 /* Compute the max number of CPUs to borrow */
1644
2/2
✓ Branch 0 taken 27 times.
✓ Branch 1 taken 9 times.
36 int ncpus = requested_ncpus ? *requested_ncpus : node_size;
1645
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 36 times.
36 if (max_parallelism > 0) {
1646 ncpus = min_int(ncpus, max_parallelism);
1647 }
1648
1649 36 int error = DLB_NOUPDT;
1650 36 shmem_lock(shm_handler);
1651 {
1652 /* Skip borrow if no CPUs in the free_cpus mask */
1653
2/2
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 34 times.
36 if (CPU_COUNT(&shdata->free_cpus) == 0) {
1654 2 ncpus = 0;
1655 }
1656
1657 /* Borrow CPUs in the cpus_priority_array */
1658
2/2
✓ Branch 1 taken 28 times.
✓ Branch 2 taken 8 times.
36 if (borrow_cpus_in_array_cpuid_t(pid, cpus_priority_array, &ncpus, tasks) == DLB_SUCCESS) {
1659 28 error = DLB_SUCCESS;
1660 }
1661
1662 /* Only if --priority=spread-ifempty, borrow CPUs if there are free NUMA nodes */
1663
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 36 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
36 if (lewi_affinity == LEWI_AFFINITY_SPREAD_IFEMPTY && ncpus > 0) {
1664 cpu_set_t free_nodes;
1665 mu_get_nodes_subset_of_cpuset(&free_nodes, &shdata->free_cpus);
1666 if (borrow_cpus_in_cpu_set_t(pid, &free_nodes, &ncpus, tasks) == DLB_SUCCESS) {
1667 error = DLB_SUCCESS;
1668 }
1669 }
1670 }
1671 36 shmem_unlock(shm_handler);
1672
1673 /* Update timestamp if borrow did not succeed */
1674
4/4
✓ Branch 0 taken 19 times.
✓ Branch 1 taken 17 times.
✓ Branch 2 taken 4 times.
✓ Branch 3 taken 15 times.
36 if (last_borrow != NULL && error != DLB_SUCCESS) {
1675 4 *last_borrow = get_time_in_ns();
1676 }
1677
1678 36 return error;
1679 }
1680
1681
1682 /*********************************************************************************/
1683 /* Return CPU */
1684 /*********************************************************************************/
1685
1686 /* Return CPU
1687 * Abandon CPU given that state == BUSY, owner != pid, guest == pid
1688 */
1689 33 static int return_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
1690
1691 33 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1692
1693
2/2
✓ Branch 0 taken 32 times.
✓ Branch 1 taken 1 times.
33 if (cpuinfo->owner == pid
1694
1/2
✓ Branch 0 taken 32 times.
✗ Branch 1 not taken.
32 || cpuinfo->guest != pid
1695
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 27 times.
32 || (cpuinfo->state == CPU_LENT
1696
2/2
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 3 times.
5 && core_is_eligible(pid, cpuid))) {
1697 3 return DLB_NOUPDT;
1698 }
1699
1700 // Return CPU
1701
2/2
✓ Branch 0 taken 26 times.
✓ Branch 1 taken 4 times.
30 if (cpuinfo->state == CPU_BUSY) {
1702 26 cpuinfo->guest = cpuinfo->owner;
1703 } else {
1704 /* state is disabled or the core is not eligible */
1705 4 cpuinfo->guest = NOBODY;
1706
1/2
✓ Branch 0 taken 4 times.
✗ Branch 1 not taken.
4 CPU_SET(cpuid, &shdata->free_cpus);
1707 }
1708
1709 // Possibly clear CPU from occupies cores set
1710 30 update_occupied_cores(cpuinfo->owner, cpuinfo->id);
1711
1712 // current subprocess to disable cpu
1713 30 array_cpuinfo_task_t_push(
1714 tasks,
1715 30 (const cpuinfo_task_t) {
1716 .action = DISABLE_CPU,
1717 .pid = pid,
1718 .cpuid = cpuid,
1719 });
1720
1721 30 return DLB_SUCCESS;
1722 }
1723
1724 6 int shmem_cpuinfo__return_all(pid_t pid, array_cpuinfo_task_t *restrict tasks) {
1725
1726 6 int error = DLB_NOUPDT;
1727 6 shmem_lock(shm_handler);
1728 {
1729 6 for (int cpuid = mu_get_first_cpu(&shdata->occupied_cores);
1730
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 6 times.
14 cpuid >= 0;
1731 8 cpuid = mu_get_next_cpu(&shdata->occupied_cores, cpuid)) {
1732 8 int local_error = return_cpu(pid, cpuid, tasks);
1733
1/4
✗ Branch 0 not taken.
✓ Branch 1 taken 8 times.
✗ Branch 2 not taken.
✗ Branch 3 not taken.
8 switch(local_error) {
1734 case DLB_ERR_REQST:
1735 // max priority, always overwrite
1736 error = DLB_ERR_REQST;
1737 break;
1738 8 case DLB_SUCCESS:
1739 // medium priority, only update if error is in lowest priority
1740
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 3 times.
8 error = (error == DLB_NOUPDT) ? DLB_SUCCESS : error;
1741 8 break;
1742 case DLB_NOUPDT:
1743 // lowest priority, default value
1744 break;
1745 }
1746 }
1747 }
1748 6 shmem_unlock(shm_handler);
1749 6 return error;
1750 }
1751
1752 23 int shmem_cpuinfo__return_cpu(pid_t pid, int cpuid, array_cpuinfo_task_t *restrict tasks) {
1753
1754
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 23 times.
23 if (cpuid >= node_size) return DLB_ERR_PERM;
1755
1756 int error;
1757 23 shmem_lock(shm_handler);
1758 {
1759
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 20 times.
23 if (unlikely(shdata->node_info[cpuid].guest != pid)) {
1760 3 error = DLB_ERR_PERM;
1761 } else {
1762 20 error = return_cpu(pid, cpuid, tasks);
1763 }
1764 }
1765 23 shmem_unlock(shm_handler);
1766 23 return error;
1767 }
1768
1769 5 int shmem_cpuinfo__return_cpu_mask(pid_t pid, const cpu_set_t *mask,
1770 array_cpuinfo_task_t *restrict tasks) {
1771
1772 5 int error = DLB_NOUPDT;
1773 5 shmem_lock(shm_handler);
1774 {
1775 cpu_set_t cpus_to_return;
1776
2/2
✓ Branch 0 taken 80 times.
✓ Branch 1 taken 5 times.
85 CPU_AND(&cpus_to_return, mask, &shdata->occupied_cores);
1777
1778 5 for (int cpuid = mu_get_first_cpu(&cpus_to_return);
1779
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 5 times.
9 cpuid >= 0;
1780 4 cpuid = mu_get_next_cpu(&cpus_to_return, cpuid)) {
1781 4 int local_error = return_cpu(pid, cpuid, tasks);
1782
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
4 error = (error < 0) ? error : local_error;
1783 }
1784 }
1785 5 shmem_unlock(shm_handler);
1786 5 return error;
1787 }
1788
1789 43 static inline void shmem_cpuinfo__return_async(pid_t pid, cpuid_t cpuid) {
1790 43 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1791
1792
2/4
✓ Branch 0 taken 43 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 43 times.
✗ Branch 3 not taken.
43 ensure(cpuinfo->owner != pid
1793 && cpuinfo->guest == pid, "cpuinfo inconsistency in %s", __func__);
1794
1795 /* 'cpuid' should only be a guested non-owned CPU */
1796
2/2
✓ Branch 0 taken 38 times.
✓ Branch 1 taken 5 times.
43 if (cpuinfo->state == CPU_BUSY) {
1797 38 cpuinfo->guest = cpuinfo->owner;
1798 } else {
1799 5 cpuinfo->guest = NOBODY;
1800
1/2
✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
5 CPU_SET(cpuid, &shdata->free_cpus);
1801 }
1802
1803 // Possibly clear CPU from occupies cores set
1804 43 update_occupied_cores(cpuinfo->owner, cpuinfo->id);
1805
1806 /* Add another CPU request */
1807 43 queue_pid_t_enqueue(&cpuinfo->requests, pid);
1808 43 }
1809
1810 /* Only for asynchronous mode. This function is intended to be called after
1811 * a disable_cpu callback.
1812 * This function resolves returned CPUs, fixes guest and add a new request */
1813 15 void shmem_cpuinfo__return_async_cpu(pid_t pid, cpuid_t cpuid) {
1814
1815 15 shmem_lock(shm_handler);
1816 {
1817 15 shmem_cpuinfo__return_async(pid, cpuid);
1818 }
1819 15 shmem_unlock(shm_handler);
1820 15 }
1821
1822 /* Only for asynchronous mode. This is function is intended to be called after
1823 * a disable_cpu callback.
1824 * This function resolves returned CPUs, fixes guest and add a new request */
1825 11 void shmem_cpuinfo__return_async_cpu_mask(pid_t pid, const cpu_set_t *mask) {
1826
1827 11 shmem_lock(shm_handler);
1828 {
1829 11 for (int cpuid = mu_get_first_cpu(mask);
1830
3/4
✓ Branch 0 taken 28 times.
✓ Branch 1 taken 11 times.
✓ Branch 2 taken 28 times.
✗ Branch 3 not taken.
39 cpuid >= 0 && cpuid < node_size;
1831 28 cpuid = mu_get_next_cpu(mask, cpuid)) {
1832
1833 28 shmem_cpuinfo__return_async(pid, cpuid);
1834 }
1835 }
1836 11 shmem_unlock(shm_handler);
1837 11 }
1838
1839
1840 /*********************************************************************************/
1841 /* */
1842 /*********************************************************************************/
1843
1844 /* Called when lewi_mask_finalize.
1845 * This function deregisters pid, disabling or lending CPUs as needed */
1846 57 int shmem_cpuinfo__deregister(pid_t pid, array_cpuinfo_task_t *restrict tasks) {
1847 57 int error = DLB_SUCCESS;
1848 57 shmem_lock(shm_handler);
1849 {
1850 // Remove any request before acquiring and lending
1851
2/2
✓ Branch 0 taken 26 times.
✓ Branch 1 taken 31 times.
57 if (shdata->flags.queues_enabled) {
1852 26 queue_lewi_mask_request_t_remove(&shdata->lewi_mask_requests, pid);
1853
2/2
✓ Branch 0 taken 180 times.
✓ Branch 1 taken 26 times.
206 for (int cpuid=0; cpuid<node_size; ++cpuid) {
1854 180 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1855
2/2
✓ Branch 0 taken 120 times.
✓ Branch 1 taken 60 times.
180 if (cpuinfo->owner != pid) {
1856 120 queue_pid_t_remove(&cpuinfo->requests, pid);
1857 }
1858 }
1859 }
1860
1861 // Iterate again to properly treat each CPU
1862
2/2
✓ Branch 0 taken 656 times.
✓ Branch 1 taken 57 times.
713 for (int cpuid=0; cpuid<node_size; ++cpuid) {
1863 656 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1864
2/2
✓ Branch 0 taken 224 times.
✓ Branch 1 taken 432 times.
656 if (cpuinfo->owner == pid) {
1865
3/4
✓ Branch 0 taken 204 times.
✓ Branch 1 taken 20 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 204 times.
224 if (cpu_is_public_post_mortem || !respect_cpuset) {
1866 /* Lend if public */
1867 20 lend_cpu(pid, cpuid, tasks);
1868 } else {
1869 /* If CPU won't be public, it must be reclaimed beforehand */
1870 204 reclaim_cpu(pid, cpuid, tasks);
1871
2/2
✓ Branch 0 taken 203 times.
✓ Branch 1 taken 1 times.
204 if (cpuinfo->guest == pid) {
1872 203 cpuinfo->guest = NOBODY;
1873 }
1874 204 cpuinfo->state = CPU_DISABLED;
1875
1/2
✓ Branch 0 taken 204 times.
✗ Branch 1 not taken.
204 CPU_CLR(cpuid, &shdata->free_cpus);
1876 }
1877 224 cpuinfo->owner = NOBODY;
1878
1879 /* It will be consistent as long as one core belongs to one process only */
1880
1/2
✓ Branch 0 taken 224 times.
✗ Branch 1 not taken.
224 CPU_CLR(cpuid, &shdata->occupied_cores);
1881 } else {
1882 // Free external CPUs that I might be using
1883
2/2
✓ Branch 0 taken 25 times.
✓ Branch 1 taken 407 times.
432 if (cpuinfo->guest == pid) {
1884 25 lend_cpu(pid, cpuid, tasks);
1885 }
1886 }
1887 }
1888 }
1889 57 shmem_unlock(shm_handler);
1890
1891 57 update_shmem_timestamp();
1892
1893 57 return error;
1894 }
1895
1896 /* Called when DLB_disable.
1897 * This function resets the initial status of pid: acquire owned, lend guested */
1898 5 int shmem_cpuinfo__reset(pid_t pid, array_cpuinfo_task_t *restrict tasks) {
1899 5 int error = DLB_SUCCESS;
1900 5 shmem_lock(shm_handler);
1901 {
1902 // Remove any request before acquiring and lending
1903
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 2 times.
5 if (shdata->flags.queues_enabled) {
1904 3 queue_lewi_mask_request_t_remove(&shdata->lewi_mask_requests, pid);
1905
2/2
✓ Branch 0 taken 12 times.
✓ Branch 1 taken 3 times.
15 for (int cpuid=0; cpuid<node_size; ++cpuid) {
1906 12 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1907
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 6 times.
12 if (cpuinfo->owner != pid) {
1908 6 queue_pid_t_remove(&cpuinfo->requests, pid);
1909 }
1910 }
1911 }
1912
1913 // Iterate again to properly reset each CPU
1914
2/2
✓ Branch 0 taken 20 times.
✓ Branch 1 taken 5 times.
25 for (int cpuid=0; cpuid<node_size; ++cpuid) {
1915 20 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1916
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 10 times.
20 if (cpuinfo->owner == pid) {
1917 10 reclaim_cpu(pid, cpuid, tasks);
1918
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 6 times.
10 } else if (cpuinfo->guest == pid) {
1919 4 lend_cpu(pid, cpuid, tasks);
1920 4 array_cpuinfo_task_t_push(
1921 tasks,
1922 4 (const cpuinfo_task_t) {
1923 .action = DISABLE_CPU,
1924 .pid = pid,
1925 .cpuid = cpuid,
1926 });
1927 }
1928 }
1929 }
1930 5 shmem_unlock(shm_handler);
1931
1932 5 update_shmem_timestamp();
1933
1934 5 return error;
1935 }
1936
1937 /* Lend as many CPUs as needed to only guest as much as 'max' CPUs */
1938 27 int shmem_cpuinfo__update_max_parallelism(pid_t pid, unsigned int max,
1939 array_cpuinfo_task_t *restrict tasks) {
1940 27 int error = DLB_SUCCESS;
1941 27 unsigned int owned_count = 0;
1942 27 unsigned int guested_count = 0;
1943 27 SMALL_ARRAY(cpuid_t, guested_cpus, node_size);
1944 27 shmem_lock(shm_handler);
1945 {
1946
2/2
✓ Branch 0 taken 136 times.
✓ Branch 1 taken 27 times.
163 for (cpuid_t cpuid=0; cpuid<node_size; ++cpuid) {
1947 136 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
1948
2/2
✓ Branch 0 taken 96 times.
✓ Branch 1 taken 40 times.
136 if (cpuinfo->owner == pid) {
1949 96 ++owned_count;
1950
2/2
✓ Branch 0 taken 34 times.
✓ Branch 1 taken 62 times.
96 if (max < owned_count) {
1951 // Lend owned CPUs if the number of owned is greater than max
1952 34 lend_cpu(pid, cpuid, tasks);
1953 34 array_cpuinfo_task_t_push(
1954 tasks,
1955 34 (const cpuinfo_task_t) {
1956 .action = DISABLE_CPU,
1957 .pid = pid,
1958 .cpuid = cpuid,
1959 });
1960 }
1961
2/2
✓ Branch 0 taken 24 times.
✓ Branch 1 taken 16 times.
40 } else if (cpuinfo->guest == pid) {
1962 // Since owned_count is still unknown, just save our guested CPUs
1963 24 guested_cpus[guested_count++] = cpuid;
1964 }
1965 }
1966
1967 // Iterate guested CPUs to lend them if needed
1968
2/2
✓ Branch 0 taken 24 times.
✓ Branch 1 taken 27 times.
51 for (unsigned int i=0; i<guested_count; ++i) {
1969
2/2
✓ Branch 0 taken 20 times.
✓ Branch 1 taken 4 times.
24 if (max < owned_count + i + 1) {
1970 20 cpuid_t cpuid = guested_cpus[i];
1971 20 lend_cpu(pid, cpuid, tasks);
1972 20 array_cpuinfo_task_t_push(
1973 tasks,
1974 20 (const cpuinfo_task_t) {
1975 .action = DISABLE_CPU,
1976 .pid = pid,
1977 .cpuid = cpuid,
1978 });
1979 }
1980 }
1981 }
1982 27 shmem_unlock(shm_handler);
1983
1984 27 update_shmem_timestamp();
1985
1986 27 return error;
1987 }
1988
1989 /* Update CPU ownership according to the new process mask.
1990 * To avoid collisions, we only release the ownership if we still own it
1991 */
1992 27 void shmem_cpuinfo__update_ownership(pid_t pid, const cpu_set_t *restrict process_mask,
1993 array_cpuinfo_task_t *restrict tasks) {
1994
1995
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
27 verbose(VB_SHMEM, "Updating ownership: %s", mu_to_str(process_mask));
1996
1997 27 shmem_lock(shm_handler);
1998
1999 int cpuid;
2000
2/2
✓ Branch 0 taken 120 times.
✓ Branch 1 taken 27 times.
147 for (cpuid=0; cpuid<node_size; ++cpuid) {
2001 120 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
2002
5/6
✓ Branch 0 taken 120 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 71 times.
✓ Branch 3 taken 49 times.
✓ Branch 4 taken 71 times.
✓ Branch 5 taken 49 times.
120 if (CPU_ISSET(cpuid, process_mask)) {
2003 // The CPU should be mine
2004
2/2
✓ Branch 0 taken 23 times.
✓ Branch 1 taken 48 times.
71 if (cpuinfo->owner != pid) {
2005 // Not owned: Steal CPU
2006 23 cpuinfo->owner = pid;
2007 23 cpuinfo->state = CPU_BUSY;
2008
2/2
✓ Branch 0 taken 21 times.
✓ Branch 1 taken 2 times.
23 if (cpuinfo->guest == NOBODY) {
2009 21 cpuinfo->guest = pid;
2010
1/2
✓ Branch 0 taken 21 times.
✗ Branch 1 not taken.
21 CPU_CLR(cpuid, &shdata->free_cpus);
2011
1/2
✓ Branch 0 taken 21 times.
✗ Branch 1 not taken.
21 CPU_CLR(cpuid, &shdata->occupied_cores);
2012 }
2013
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 15 times.
23 if (tasks) {
2014
2/2
✓ Branch 0 taken 2 times.
✓ Branch 1 taken 6 times.
8 if (cpuinfo->guest != pid) {
2015 2 array_cpuinfo_task_t_push(
2016 tasks,
2017 2 (const cpuinfo_task_t) {
2018 .action = DISABLE_CPU,
2019 2 .pid = cpuinfo->guest,
2020 .cpuid = cpuid,
2021 });
2022 }
2023 8 array_cpuinfo_task_t_push(
2024 tasks,
2025 8 (const cpuinfo_task_t) {
2026 .action = ENABLE_CPU,
2027 .pid = pid,
2028 .cpuid = cpuid,
2029 });
2030 }
2031
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 23 times.
23 verbose(VB_SHMEM, "Acquiring ownership of CPU %d", cpuid);
2032 } else {
2033 // The CPU was already owned, no update needed
2034 }
2035
2036
1/2
✓ Branch 0 taken 49 times.
✗ Branch 1 not taken.
49 } else if (cpuid < node_size) {
2037 // The CPU should not be mine
2038
2/2
✓ Branch 0 taken 15 times.
✓ Branch 1 taken 34 times.
49 if (cpuinfo->owner == pid) {
2039 // Previously owned: Release CPU ownership
2040 15 cpuinfo->owner = NOBODY;
2041 15 cpuinfo->state = CPU_DISABLED;
2042
2/2
✓ Branch 0 taken 13 times.
✓ Branch 1 taken 2 times.
15 if (cpuinfo->guest == pid ) {
2043 13 cpuinfo->guest = NOBODY;
2044
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 9 times.
13 if (tasks) {
2045 4 array_cpuinfo_task_t_push(
2046 tasks,
2047 4 (const cpuinfo_task_t) {
2048 .action = DISABLE_CPU,
2049 .pid = pid,
2050 .cpuid = cpuid,
2051 });
2052 }
2053
1/2
✓ Branch 0 taken 13 times.
✗ Branch 1 not taken.
13 CPU_CLR(cpuid, &shdata->free_cpus);
2054
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 13 times.
13 verbose(VB_SHMEM, "Releasing ownership of CPU %d", cpuid);
2055 }
2056 } else {
2057
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 31 times.
34 if (cpuinfo->guest == pid
2058
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 2 times.
3 && cpuinfo->state == CPU_BUSY) {
2059 /* 'tasks' may be NULL if LeWI is disabled, but if the process
2060 * is guesting an external CPU, LeWI should be enabled */
2061
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 if (unlikely(tasks == NULL)) {
2062 shmem_unlock(shm_handler);
2063 fatal("tasks pointer is NULL in %s. Please report bug at %s",
2064 __func__, PACKAGE_BUGREPORT);
2065 }
2066 // The CPU has been either stolen or reclaimed,
2067 // return it anyway
2068 1 return_cpu(pid, cpuid, tasks);
2069 }
2070 }
2071 }
2072 }
2073
2074 27 shmem_unlock(shm_handler);
2075 27 }
2076
2077 58 int shmem_cpuinfo__get_thread_binding(pid_t pid, int thread_num) {
2078
2/4
✓ Branch 0 taken 58 times.
✗ Branch 1 not taken.
✗ Branch 2 not taken.
✓ Branch 3 taken 58 times.
58 if (unlikely(shm_handler == NULL || thread_num < 0)) return -1;
2079
2080 116 SMALL_ARRAY(cpuid_t, guested_cpus, node_size);
2081 58 int owned_count = 0;
2082 58 int guested_count = 0;
2083
2084 int cpuid;
2085
2/2
✓ Branch 0 taken 178 times.
✓ Branch 1 taken 17 times.
195 for (cpuid=0; cpuid<node_size; ++cpuid) {
2086 178 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
2087
2/2
✓ Branch 0 taken 98 times.
✓ Branch 1 taken 80 times.
178 if (cpuinfo->owner == pid
2088
2/2
✓ Branch 0 taken 89 times.
✓ Branch 1 taken 9 times.
98 && cpuinfo->state == CPU_BUSY) {
2089 89 ++owned_count;
2090
2/2
✓ Branch 0 taken 41 times.
✓ Branch 1 taken 48 times.
89 if (thread_num < owned_count) {
2091 41 return cpuid;
2092 }
2093
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 79 times.
89 } else if (cpuinfo->guest == pid) {
2094 10 guested_cpus[guested_count++] = cpuid;
2095 }
2096 }
2097
2098 int binding;
2099
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 12 times.
17 if (thread_num - owned_count < guested_count) {
2100 5 binding = guested_cpus[thread_num-owned_count];
2101 } else {
2102 12 binding = -1;
2103 }
2104 17 return binding;
2105 }
2106
2107 /* Find the nth non owned CPU for a given PID.
2108 * The count always starts from the first owned CPU.
2109 * ex: process has mask [4,7] in a system mask [0-7],
2110 * 1st CPU (id=0) is 0
2111 * 4th CPU (id=3) is 3
2112 * id > 3 -> -1
2113 */
2114 20 int shmem_cpuinfo__get_nth_non_owned_cpu(pid_t pid, int nth_cpu) {
2115 20 int idx = 0;
2116 20 int owned_cpus = 0;
2117 20 int non_owned_cpus = 0;
2118 40 SMALL_ARRAY(cpuid_t, non_owned_cpu_list, node_size);
2119
2120 /* Construct non owned CPU list */
2121 int cpuid;
2122
2/2
✓ Branch 0 taken 160 times.
✓ Branch 1 taken 20 times.
180 for (cpuid=0; cpuid<node_size; ++cpuid) {
2123 160 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
2124
2/2
✓ Branch 0 taken 80 times.
✓ Branch 1 taken 80 times.
160 if (cpuinfo->owner == pid) {
2125
2/2
✓ Branch 0 taken 20 times.
✓ Branch 1 taken 60 times.
80 if (owned_cpus++ == 0) {
2126 20 idx = non_owned_cpus;
2127 }
2128
1/2
✓ Branch 0 taken 80 times.
✗ Branch 1 not taken.
80 } else if (cpuinfo->state != CPU_DISABLED) {
2129 80 non_owned_cpu_list[non_owned_cpus++] = cpuid;
2130 }
2131 }
2132
2133 /* Find the nth element starting from the first owned CPU */
2134
1/2
✓ Branch 0 taken 20 times.
✗ Branch 1 not taken.
20 if (nth_cpu < non_owned_cpus) {
2135 20 idx = (idx + nth_cpu) % non_owned_cpus;
2136 20 return non_owned_cpu_list[idx];
2137 } else {
2138 return -1;
2139 }
2140 }
2141
2142 /* Return the number of registered CPUs not owned by the given PID */
2143 33 int shmem_cpuinfo__get_number_of_non_owned_cpus(pid_t pid) {
2144 33 int num_non_owned_cpus = 0;
2145 int cpuid;
2146
2/2
✓ Branch 0 taken 264 times.
✓ Branch 1 taken 33 times.
297 for (cpuid=0; cpuid<node_size; ++cpuid) {
2147 264 const cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
2148
2/2
✓ Branch 0 taken 132 times.
✓ Branch 1 taken 132 times.
264 if (cpuinfo->owner != pid
2149
1/2
✓ Branch 0 taken 132 times.
✗ Branch 1 not taken.
132 && cpuinfo->state != CPU_DISABLED) {
2150 132 ++num_non_owned_cpus;
2151 }
2152 }
2153 33 return num_non_owned_cpus;
2154 }
2155
2156 61 int shmem_cpuinfo__check_cpu_availability(pid_t pid, int cpuid) {
2157 61 int error = DLB_NOTED;
2158 61 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
2159
2160
2/2
✓ Branch 0 taken 24 times.
✓ Branch 1 taken 37 times.
61 if (cpuinfo->owner != pid
2161
4/4
✓ Branch 0 taken 14 times.
✓ Branch 1 taken 10 times.
✓ Branch 2 taken 8 times.
✓ Branch 3 taken 6 times.
24 && (cpuinfo->state == CPU_BUSY || cpuinfo->state == CPU_DISABLED) ) {
2162 /* The CPU is reclaimed or disabled */
2163 18 error = DLB_ERR_PERM;
2164
2/2
✓ Branch 0 taken 38 times.
✓ Branch 1 taken 5 times.
43 } else if (cpuinfo->guest == pid) {
2165 /* The CPU is already guested by the process */
2166 38 error = DLB_SUCCESS;
2167
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
5 } else if (cpuinfo->guest == NOBODY ) {
2168 /* Assign new guest if the CPU is empty */
2169 shmem_lock(shm_handler);
2170 {
2171 if (cpuinfo->guest == NOBODY) {
2172 cpuinfo->guest = pid;
2173 CPU_CLR(cpuid, &shdata->free_cpus);
2174 error = DLB_SUCCESS;
2175 }
2176 }
2177 shmem_unlock(shm_handler);
2178
1/2
✓ Branch 0 taken 5 times.
✗ Branch 1 not taken.
5 } else if (cpuinfo->owner == pid
2179
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 5 times.
5 && cpuinfo->state == CPU_LENT) {
2180 /* The owner is asking for a CPU not reclaimed yet */
2181 error = DLB_NOUPDT;
2182 }
2183
2184 61 return error;
2185 }
2186
2187 6 int shmem_cpuinfo__is_cpu_enabled(int cpuid) {
2188 6 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
2189 6 return cpuinfo->state != CPU_DISABLED;
2190 }
2191
2192 bool shmem_cpuinfo__exists(void) {
2193 return shm_handler != NULL;
2194 }
2195
2196 29 void shmem_cpuinfo__enable_request_queues(void) {
2197
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 29 times.
29 if (shm_handler == NULL) return;
2198
2199 /* Enable asynchronous request queues */
2200 29 shdata->flags.queues_enabled = true;
2201 }
2202
2203 13 void shmem_cpuinfo__remove_requests(pid_t pid) {
2204
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 13 times.
13 if (shm_handler == NULL) return;
2205 13 shmem_lock(shm_handler);
2206 {
2207 /* Remove any previous request for the specific pid */
2208
2/2
✓ Branch 0 taken 9 times.
✓ Branch 1 taken 4 times.
13 if (shdata->flags.queues_enabled) {
2209 /* Remove global requests (pair <pid,howmany>) */
2210 9 queue_lewi_mask_request_t_remove(&shdata->lewi_mask_requests, pid);
2211
2212 /* Remove specific CPU requests */
2213 int cpuid;
2214
2/2
✓ Branch 0 taken 48 times.
✓ Branch 1 taken 9 times.
57 for (cpuid=0; cpuid<node_size; ++cpuid) {
2215 48 cpuinfo_t *cpuinfo = &shdata->node_info[cpuid];
2216 48 queue_pid_t_remove(&cpuinfo->requests, pid);
2217 }
2218 }
2219 }
2220 13 shmem_unlock(shm_handler);
2221 }
2222
2223 1 int shmem_cpuinfo__version(void) {
2224 1 return SHMEM_CPUINFO_VERSION;
2225 }
2226
2227 102 size_t shmem_cpuinfo__size(void) {
2228 102 return sizeof(shdata_t) + sizeof(cpuinfo_t)*mu_get_system_size();
2229 }
2230
2231 8 void shmem_cpuinfo__print_info(const char *shmem_key, int shmem_color, int columns,
2232 dlb_printshmem_flags_t print_flags) {
2233
2234 /* If the shmem is not opened, obtain a temporary fd */
2235 8 bool temporary_shmem = shm_handler == NULL;
2236
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
8 if (temporary_shmem) {
2237 4 shmem_cpuinfo_ext__init(shmem_key, shmem_color);
2238 }
2239
2240 /* Make a full copy of the shared memory */
2241 8 shdata_t *shdata_copy = malloc(sizeof(shdata_t) + sizeof(cpuinfo_t)*node_size);
2242 8 shmem_lock(shm_handler);
2243 {
2244 8 memcpy(shdata_copy, shdata, sizeof(shdata_t) + sizeof(cpuinfo_t)*node_size);
2245 }
2246 8 shmem_unlock(shm_handler);
2247
2248 /* Close shmem if needed */
2249
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 4 times.
8 if (temporary_shmem) {
2250 4 shmem_cpuinfo_ext__finalize();
2251 }
2252
2253 /* Find the largest pid registered in the shared memory */
2254 8 pid_t max_pid = 0;
2255 int cpuid;
2256
2/2
✓ Branch 0 taken 176 times.
✓ Branch 1 taken 8 times.
184 for (cpuid=0; cpuid<node_size; ++cpuid) {
2257 176 pid_t pid = shdata_copy->node_info[cpuid].owner;
2258 176 max_pid = pid > max_pid ? pid : max_pid;
2259 }
2260 8 int max_digits = snprintf(NULL, 0, "%d", max_pid);
2261
2262 /* Do not print shared memory if nobody is registered */
2263
2/2
✓ Branch 0 taken 5 times.
✓ Branch 1 taken 3 times.
8 if (max_pid == 0) {
2264 5 free(shdata_copy);
2265 5 return;
2266 }
2267
2268 /* Set up color */
2269 3 bool is_tty = isatty(STDOUT_FILENO);
2270 6 bool color = print_flags & DLB_COLOR_ALWAYS
2271
3/6
✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
✗ Branch 4 not taken.
✓ Branch 5 taken 3 times.
3 || (print_flags & DLB_COLOR_AUTO && is_tty);
2272
2273 /* Set up number of columns */
2274
2/2
✓ Branch 0 taken 2 times.
✓ Branch 1 taken 1 times.
3 if (columns <= 0) {
2275 unsigned short width;
2276
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
2 if (is_tty) {
2277 struct winsize w;
2278 ioctl(STDOUT_FILENO, TIOCGWINSZ, &w);
2279 width = w.ws_col ? w.ws_col : 80;
2280 } else {
2281 2 width = 80;
2282 }
2283
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
2 if (color) {
2284 columns = width / (13+max_digits*2);
2285 } else {
2286 2 columns = width / (20+max_digits*2);
2287 }
2288 }
2289
2290 /* Initialize buffer */
2291 print_buffer_t buffer;
2292 3 printbuffer_init(&buffer);
2293
2294 /* Set up line buffer */
2295 enum { MAX_LINE_LEN = 512 };
2296 char line[MAX_LINE_LEN];
2297 char *l;
2298
2299 /* Calculate number of rows and cpus per column (same) */
2300 3 int rows = (node_size+columns-1) / columns;
2301 3 int cpus_per_column = rows;
2302
2303 /* Update flag here in case this is an external process */
2304
3/4
✓ Branch 0 taken 2 times.
✓ Branch 1 taken 1 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 2 times.
3 if (thread_spd && !thread_spd->options.lewi_respect_cpuset) {
2305 respect_cpuset = false;
2306 }
2307
2308 int row;
2309
2/2
✓ Branch 0 taken 66 times.
✓ Branch 1 taken 3 times.
69 for (row=0; row<rows; ++row) {
2310 /* Init line */
2311 66 line[0] = '\0';
2312 66 l = line;
2313
2314 /* Iterate columns */
2315 int column;
2316
2/2
✓ Branch 0 taken 136 times.
✓ Branch 1 taken 66 times.
202 for (column=0; column<columns; ++column) {
2317 136 cpuid = row + column*cpus_per_column;
2318
1/2
✓ Branch 0 taken 136 times.
✗ Branch 1 not taken.
136 if (cpuid < node_size) {
2319 136 const cpuinfo_t *cpuinfo = &shdata_copy->node_info[cpuid];
2320 136 pid_t owner = cpuinfo->owner;
2321 136 pid_t guest = cpuinfo->guest;
2322 136 cpu_state_t state = cpuinfo->state;
2323
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 136 times.
136 if (color) {
2324 const char *code_color =
2325 state == CPU_DISABLED && respect_cpuset ? ANSI_COLOR_RESET :
2326 state == CPU_BUSY && guest == owner ? ANSI_COLOR_RED :
2327 state == CPU_BUSY ? ANSI_COLOR_YELLOW :
2328 guest == NOBODY ? ANSI_COLOR_GREEN :
2329 ANSI_COLOR_BLUE;
2330 l += snprintf(l, MAX_LINE_LEN-strlen(line),
2331 " %4d %s[ %*d / %*d ]" ANSI_COLOR_RESET,
2332 cpuid,
2333 code_color,
2334 max_digits, owner,
2335 max_digits, guest);
2336 } else {
2337
8/8
✓ Branch 0 taken 125 times.
✓ Branch 1 taken 11 times.
✓ Branch 2 taken 115 times.
✓ Branch 3 taken 10 times.
✓ Branch 4 taken 10 times.
✓ Branch 5 taken 4 times.
✓ Branch 6 taken 6 times.
✓ Branch 7 taken 4 times.
251 const char *state_desc =
2338 state == CPU_DISABLED ? " off" :
2339
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 111 times.
115 state == CPU_BUSY && guest == owner ? "busy" :
2340 state == CPU_BUSY ? "recl" :
2341 guest == NOBODY ? "idle" :
2342 "lent";
2343 136 l += snprintf(l, MAX_LINE_LEN-strlen(line),
2344 " %4d [ %*d / %*d / %s ]",
2345 cpuid,
2346 max_digits, owner,
2347 max_digits, guest,
2348 state_desc);
2349 }
2350 }
2351 }
2352 66 printbuffer_append(&buffer, line);
2353 }
2354
2355 /* Print format */
2356
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
3 snprintf(line, MAX_LINE_LEN,
2357 " Format: <cpuid> [ <owner> / <guest> %s]", color ? "" : "/ <state> ");
2358 3 printbuffer_append(&buffer, line);
2359
2360 /* Print color legend */
2361
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
3 if (color) {
2362 snprintf(line, MAX_LINE_LEN,
2363 " Status: Disabled, "
2364 ANSI_COLOR_RED "Owned" ANSI_COLOR_RESET ", "
2365 ANSI_COLOR_YELLOW "Reclaimed" ANSI_COLOR_RESET ", "
2366 ANSI_COLOR_GREEN "Idle" ANSI_COLOR_RESET ", "
2367 ANSI_COLOR_BLUE "Lent" ANSI_COLOR_RESET);
2368 printbuffer_append(&buffer, line);
2369 }
2370
2371 /* CPU requests */
2372 3 bool any_cpu_request = false;
2373
4/4
✓ Branch 0 taken 93 times.
✓ Branch 1 taken 2 times.
✓ Branch 2 taken 92 times.
✓ Branch 3 taken 1 times.
95 for (cpuid=0; cpuid<node_size && !any_cpu_request; ++cpuid) {
2374 92 any_cpu_request = queue_pid_t_size(&shdata_copy->node_info[cpuid].requests) > 0;
2375 }
2376
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 2 times.
3 if (any_cpu_request) {
2377 1 snprintf(line, MAX_LINE_LEN, "\n CPU requests (<cpuid>: <spids>):");
2378 1 printbuffer_append(&buffer, line);
2379
2/2
✓ Branch 0 taken 64 times.
✓ Branch 1 taken 1 times.
65 for (cpuid=0; cpuid<node_size; ++cpuid) {
2380 64 queue_pid_t *requests = &shdata_copy->node_info[cpuid].requests;
2381
2/2
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 63 times.
64 if (queue_pid_t_size(requests) > 0) {
2382 /* Set up line */
2383 1 line[0] = '\0';
2384 1 l = line;
2385 1 l += snprintf(l, MAX_LINE_LEN-strlen(line), " %4d: ", cpuid);
2386 /* Iterate requests */
2387 1 for (pid_t *it = queue_pid_t_front(requests);
2388
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 1 times.
2 it != NULL;
2389 1 it = queue_pid_t_next(requests, it)) {
2390 1 l += snprintf(l, MAX_LINE_LEN-strlen(line), " %u,", *it);
2391 }
2392 /* Remove trailing comma and append line */
2393 1 *(l-1) = '\0';
2394 1 printbuffer_append(&buffer, line);
2395 }
2396 }
2397 }
2398
2399 /* Proc requests */
2400
2/2
✓ Branch 1 taken 1 times.
✓ Branch 2 taken 2 times.
3 if (queue_lewi_mask_request_t_size(&shdata_copy->lewi_mask_requests) > 0) {
2401 1 snprintf(line, MAX_LINE_LEN,
2402 "\n Process requests (<spids>: <howmany>, <allowed_cpus>):");
2403 1 printbuffer_append(&buffer, line);
2404 }
2405 3 for (lewi_mask_request_t *it =
2406 3 queue_lewi_mask_request_t_front(&shdata_copy->lewi_mask_requests);
2407
2/2
✓ Branch 0 taken 2 times.
✓ Branch 1 taken 3 times.
5 it != NULL;
2408 2 it = queue_lewi_mask_request_t_next(&shdata_copy->lewi_mask_requests, it)) {
2409 4 snprintf(line, MAX_LINE_LEN,
2410 " %*d: %d, %s",
2411 2 max_digits, it->pid, it->howmany, mu_to_str(&it->allowed));
2412 2 printbuffer_append(&buffer, line);
2413 }
2414
2415 3 info0("=== CPU States ===\n%s", buffer.addr);
2416 3 printbuffer_destroy(&buffer);
2417 3 free(shdata_copy);
2418 }
2419
2420 15 int shmem_cpuinfo_testing__get_num_proc_requests(void) {
2421 15 return shdata->flags.queues_enabled ?
2422
2/2
✓ Branch 0 taken 8 times.
✓ Branch 1 taken 7 times.
15 queue_lewi_mask_request_t_size(&shdata->lewi_mask_requests) : 0;
2423 }
2424
2425 60 int shmem_cpuinfo_testing__get_num_cpu_requests(int cpuid) {
2426 60 return shdata->flags.queues_enabled ?
2427
2/2
✓ Branch 0 taken 32 times.
✓ Branch 1 taken 28 times.
60 queue_pid_t_size(&shdata->node_info[cpuid].requests) : 0;
2428 }
2429
2430 15 const cpu_set_t* shmem_cpuinfo_testing__get_free_cpu_set(void) {
2431 15 return &shdata->free_cpus;
2432 }
2433
2434 15 const cpu_set_t* shmem_cpuinfo_testing__get_occupied_core_set(void) {
2435 15 return &shdata->occupied_cores;
2436 }
2437
2438 /*** Helper functions, the shm lock must have been acquired beforehand ***/
2439 static inline bool is_idle(int cpu) {
2440 return shdata->node_info[cpu].state == CPU_LENT && shdata->node_info[cpu].guest == NOBODY;
2441 }
2442
2443 static inline bool is_borrowed(pid_t pid, int cpu) {
2444 return shdata->node_info[cpu].state == CPU_BUSY && shdata->node_info[cpu].owner == pid;
2445 }
2446
2447 101 static inline bool is_shmem_empty(void) {
2448 int cpuid;
2449
2/2
✓ Branch 0 taken 851 times.
✓ Branch 1 taken 86 times.
937 for (cpuid=0; cpuid<node_size; ++cpuid) {
2450
2/2
✓ Branch 0 taken 15 times.
✓ Branch 1 taken 836 times.
851 if (shdata->node_info[cpuid].owner != NOBODY) {
2451 15 return false;
2452 }
2453 }
2454 86 return true;
2455 }
2456
2457 /*** End of helper functions ***/
2458