GCC Code Coverage Report


Directory: src/
File: src/LB_comm/shmem_lewi_async.c
Date: 2024-11-22 17:07:10
Exec Total Coverage
Lines: 287 292 98.3%
Functions: 20 21 95.2%
Branches: 133 164 81.1%

Line Branch Exec Source
1 /*********************************************************************************/
2 /* Copyright 2009-2024 Barcelona Supercomputing Center */
3 /* */
4 /* This file is part of the DLB library. */
5 /* */
6 /* DLB is free software: you can redistribute it and/or modify */
7 /* it under the terms of the GNU Lesser General Public License as published by */
8 /* the Free Software Foundation, either version 3 of the License, or */
9 /* (at your option) any later version. */
10 /* */
11 /* DLB is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Lesser General Public License for more details. */
15 /* */
16 /* You should have received a copy of the GNU Lesser General Public License */
17 /* along with DLB. If not, see <https://www.gnu.org/licenses/>. */
18 /*********************************************************************************/
19
20 #include "LB_comm/shmem_lewi_async.h"
21
22 #include "LB_comm/shmem.h"
23 #include "apis/dlb_errors.h"
24 #include "support/atomic.h"
25 #include "support/debug.h"
26 #include "support/mask_utils.h"
27 #include "support/queues.h"
28 #include "support/tracing.h"
29 #include "support/types.h"
30
31 #include <inttypes.h>
32 #include <stdbool.h>
33 #include <stdlib.h>
34
35 typedef struct DLB_ALIGN_CACHE lewi_process_t {
36 pid_t pid;
37 unsigned int initial_ncpus;
38 unsigned int current_ncpus;
39 } lewi_process_t;
40
41 typedef struct lewi_async_shdata {
42 unsigned int idle_cpus;
43 unsigned int attached_nprocs;
44 queue_lewi_reqs_t requests; /* queue of requests */
45 unsigned int proc_list_head;
46 lewi_process_t processes[]; /* per-process lewi data */
47 } lewi_async_shdata_t;
48
49 enum { NOBODY = 0 };
50 enum { SHMEM_LEWI_ASYNC_VERSION = 2 };
51
52 static lewi_async_shdata_t *shdata = NULL;
53 static shmem_handler_t *shm_handler = NULL;
54 static const char *shmem_name = "lewi_async";
55 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
56 static int subprocesses_attached = 0;
57 static int max_processes;
58 static lewi_process_t *my_process = NULL;
59
60
61 static void lend_ncpus_to_shmem(unsigned int ncpus, lewi_request_t *requests,
62 unsigned int *nreqs, unsigned int maxreqs);
63 static int reclaim_from_shmem(lewi_process_t *process, unsigned int ncpus,
64 lewi_request_t *requests, unsigned int *nreqs, unsigned int maxreqs);
65
66 /*********************************************************************************/
67 /* Shared memory management */
68 /*********************************************************************************/
69
70 static void cleanup_shmem(void *shdata_ptr, int pid) {
71 lewi_async_shdata_t *shared_data = shdata_ptr;
72 if (shared_data->attached_nprocs > 0) {
73 --shared_data->attached_nprocs;
74 }
75 }
76
77 11 static bool is_shmem_empty(void) {
78
2/4
✓ Branch 0 taken 11 times.
✗ Branch 1 not taken.
✓ Branch 2 taken 11 times.
✗ Branch 3 not taken.
11 return shdata && shdata->attached_nprocs == 0;
79 }
80
81 308 static lewi_process_t* get_process(pid_t pid) {
82
1/2
✓ Branch 0 taken 308 times.
✗ Branch 1 not taken.
308 if (shdata != NULL) {
83 /* Check first if pid is this process */
84
2/2
✓ Branch 0 taken 291 times.
✓ Branch 1 taken 17 times.
308 if (my_process != NULL
85
2/2
✓ Branch 0 taken 130 times.
✓ Branch 1 taken 161 times.
291 && my_process->pid == pid) {
86 130 return my_process;
87 }
88
89 /* Iterate otherwise */
90
2/2
✓ Branch 0 taken 268 times.
✓ Branch 1 taken 5 times.
273 for (unsigned int p = 0; p < shdata->proc_list_head; ++p) {
91
2/2
✓ Branch 0 taken 173 times.
✓ Branch 1 taken 95 times.
268 if (shdata->processes[p].pid == pid) {
92 173 return &shdata->processes[p];
93 }
94 }
95 }
96 5 return NULL;
97 }
98
99 2 bool shmem_lewi_async__exists(void) {
100 2 return shm_handler != NULL;
101 }
102
103 1 int shmem_lewi_async__version(void) {
104 1 return SHMEM_LEWI_ASYNC_VERSION;
105 }
106
107 12 size_t shmem_lewi_async__size(void) {
108 12 return sizeof(lewi_async_shdata_t) + sizeof(lewi_process_t)*mu_get_system_size();
109 }
110
111
112 /*********************************************************************************/
113 /* Queue management */
114 /*********************************************************************************/
115
116 20 void shmem_lewi_async__remove_requests(pid_t pid) {
117
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 19 times.
20 if (unlikely(shm_handler == NULL)) return;
118 19 shmem_lock(shm_handler);
119 {
120 19 queue_lewi_reqs_remove(&shdata->requests, pid);
121 }
122 19 shmem_unlock(shm_handler);
123 }
124
125 /* Only for testing purposes */
126 57 unsigned int shmem_lewi_async__get_num_requests(pid_t pid) {
127
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 57 times.
57 if (unlikely(shm_handler == NULL)) return 0;
128 unsigned int num_requests;
129
130 57 shmem_lock(shm_handler);
131 {
132 57 num_requests = queue_lewi_reqs_get(&shdata->requests, pid);
133 }
134 57 shmem_unlock(shm_handler);
135
136 57 return num_requests;
137 }
138
139
140 /*********************************************************************************/
141 /* Init */
142 /*********************************************************************************/
143
144 27 static void open_shmem(const char *shmem_key) {
145 27 pthread_mutex_lock(&mutex);
146 {
147
2/2
✓ Branch 0 taken 11 times.
✓ Branch 1 taken 16 times.
27 if (shm_handler == NULL) {
148 22 shm_handler = shmem_init((void**)&shdata,
149 11 &(const shmem_props_t) {
150 11 .size = shmem_lewi_async__size(),
151 .name = shmem_name,
152 .key = shmem_key,
153 .version = SHMEM_LEWI_ASYNC_VERSION,
154 .cleanup_fn = cleanup_shmem,
155 });
156 11 subprocesses_attached = 1;
157 11 max_processes = mu_get_system_size();
158 } else {
159 16 ++subprocesses_attached;
160 }
161 }
162 27 pthread_mutex_unlock(&mutex);
163 27 }
164
165 27 void shmem_lewi_async__init(pid_t pid, unsigned int ncpus, const char *shmem_key) {
166
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
27 verbose(VB_SHMEM, "Initializing LeWI_async shared memory");
167
168 // Shared memory creation
169 27 open_shmem(shmem_key);
170
171 27 shmem_lock(shm_handler);
172 {
173
2/2
✓ Branch 0 taken 11 times.
✓ Branch 1 taken 16 times.
27 if (++shdata->attached_nprocs == 1) {
174 // first attached process, initialize common structures
175 11 shdata->idle_cpus = 0;
176 11 queue_lewi_reqs_init(&shdata->requests);
177 }
178
179 // Iterate the processes array to find a free spot
180 27 lewi_process_t *process = NULL;
181
1/2
✓ Branch 0 taken 53 times.
✗ Branch 1 not taken.
53 for (int p = 0; p < max_processes; ++p) {
182
2/2
✓ Branch 0 taken 27 times.
✓ Branch 1 taken 26 times.
53 if (shdata->processes[p].pid == NOBODY) {
183 27 process = &shdata->processes[p];
184 /* save the highest upper bound to iterate faster */
185 27 shdata->proc_list_head = max_int(shdata->proc_list_head, p+1);
186 27 break;
187 }
188 }
189
190 // Assign this process initial data
191
1/2
✓ Branch 0 taken 27 times.
✗ Branch 1 not taken.
27 if (process != NULL) {
192 27 *process = (const lewi_process_t) {
193 .pid = pid,
194 .initial_ncpus = ncpus,
195 .current_ncpus = ncpus,
196 };
197 27 my_process = process;
198 }
199 }
200 27 shmem_unlock(shm_handler);
201 27 }
202
203
204 /*********************************************************************************/
205 /* Finalize */
206 /*********************************************************************************/
207
208 33 static int reset_process(lewi_process_t *process, lewi_request_t *requests,
209 unsigned int *nreqs, unsigned int maxreqs, unsigned int *prev_requested) {
210
211 33 *nreqs = 0;
212 33 int error = DLB_NOUPDT;
213
214 // Clear requests
215 33 *prev_requested = queue_lewi_reqs_remove(&shdata->requests, process->pid);
216
217 // Lend excess CPUs
218
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 30 times.
33 if (process->current_ncpus > process->initial_ncpus) {
219
220 /* Compute CPUs to lend and update process info */
221 3 unsigned int ncpus_to_lend =
222 3 process->current_ncpus - process->initial_ncpus;
223 3 process->current_ncpus = process->initial_ncpus;
224
225 /* Update output variable. Excess CPUs count as previously requested */
226 3 *prev_requested += ncpus_to_lend;
227
228 /* Update shmem, resolve requests if possible */
229 3 lend_ncpus_to_shmem(ncpus_to_lend, requests, nreqs, maxreqs);
230
231 3 error = DLB_SUCCESS;
232 }
233
234 // Borrow or Reclaim
235
2/2
✓ Branch 0 taken 7 times.
✓ Branch 1 taken 23 times.
30 else if (process->current_ncpus < process->initial_ncpus) {
236
237 7 unsigned int ncpus_to_reclaim =
238 7 process->initial_ncpus - process->current_ncpus;
239
240 // Borrow first
241 7 unsigned int ncpus_to_borrow = min_uint(shdata->idle_cpus,
242 ncpus_to_reclaim);
243
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 3 times.
7 if (ncpus_to_borrow > 0) {
244 4 shdata->idle_cpus -= ncpus_to_borrow;
245 4 process->current_ncpus += ncpus_to_borrow;
246 4 ncpus_to_reclaim -= ncpus_to_borrow;
247 4 error = DLB_SUCCESS;
248 }
249
250 // Reclaim later
251
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 3 times.
7 if (ncpus_to_reclaim > 0) {
252 4 error = reclaim_from_shmem(process, ncpus_to_reclaim, requests,
253 nreqs, maxreqs);
254 }
255 }
256
257 33 return error;
258 }
259
260 27 static void close_shmem(void) {
261 27 pthread_mutex_lock(&mutex);
262 {
263
2/2
✓ Branch 0 taken 11 times.
✓ Branch 1 taken 16 times.
27 if (--subprocesses_attached == 0) {
264 11 shmem_finalize(shm_handler, is_shmem_empty);
265 11 shm_handler = NULL;
266 11 shdata = NULL;
267 }
268 }
269 27 pthread_mutex_unlock(&mutex);
270 27 }
271
272 27 void shmem_lewi_async__finalize(pid_t pid, unsigned int *new_ncpus,
273 lewi_request_t *requests, unsigned int *nreqs, unsigned int maxreqs) {
274
275 27 *nreqs = 0;
276
277
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
27 if (shm_handler == NULL) return;
278
279 27 lewi_process_t *process = get_process(pid);
280
281 27 shmem_lock(shm_handler);
282 {
283
1/2
✓ Branch 0 taken 27 times.
✗ Branch 1 not taken.
27 if (process != NULL) {
284 27 *new_ncpus = process->initial_ncpus;
285
286 // Resolve requests and CPUs out of place, ignore previously requested
287 unsigned int prev_requested;
288 27 reset_process(process, requests, nreqs, maxreqs, &prev_requested);
289
290 // Remove process data
291 27 *process = (const lewi_process_t) {};
292
293 // Clear local pointer
294 27 my_process = NULL;
295
296 // Decrement process counter
297 27 --shdata->attached_nprocs;
298 }
299 }
300 27 shmem_unlock(shm_handler);
301
302 27 close_shmem();
303 }
304
305
306 /*********************************************************************************/
307 /* Lend */
308 /*********************************************************************************/
309
310 /* Lend ncpus. Check whether they can resolve some request, add to shmem otherwise */
311 69 static void lend_ncpus_to_shmem(unsigned int ncpus, lewi_request_t *requests,
312 unsigned int *nreqs, unsigned int maxreqs) {
313
314
2/2
✓ Branch 1 taken 24 times.
✓ Branch 2 taken 45 times.
69 if (queue_lewi_reqs_size(&shdata->requests) == 0) {
315 /* queue is empty */
316 24 shdata->idle_cpus += ncpus;
317 24 *nreqs = 0;
318 } else {
319
320 /* Resolve as many requests as possible, the remainder goes to the shmem */
321 45 unsigned int not_needed_cpus = queue_lewi_reqs_pop_ncpus(&shdata->requests,
322 ncpus, requests, nreqs, maxreqs);
323 45 shdata->idle_cpus += not_needed_cpus;
324
325 /* Update shmem with the resolved requests, and add current values */
326
2/2
✓ Branch 0 taken 54 times.
✓ Branch 1 taken 45 times.
99 for (unsigned int i = 0; i < *nreqs; ++i) {
327 54 lewi_process_t *target = get_process(requests[i].pid);
328 54 target->current_ncpus += requests[i].howmany;
329 /* the request is updated to call the appropriate set_num_threads */
330 54 requests[i].howmany = target->current_ncpus;
331 }
332 }
333 69 }
334
335 /* Lend ncpus. Return new_ncpus and pending CPU requests. */
336 42 int shmem_lewi_async__lend_cpus(pid_t pid, unsigned int ncpus,
337 unsigned int *new_ncpus, lewi_request_t *requests, unsigned int *nreqs,
338 unsigned int maxreqs, unsigned int *prev_requested) {
339
340
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 41 times.
42 if (unlikely(shm_handler == NULL)) return DLB_ERR_NOSHMEM;
341
342 41 lewi_process_t *process = get_process(pid);
343
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 40 times.
41 if (process == NULL) return DLB_ERR_NOPROC;
344
345 40 *new_ncpus = 0;
346 40 *nreqs = 0;
347 40 *prev_requested = 0;
348
349
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 39 times.
40 if (unlikely(ncpus == 0)) return DLB_NOUPDT;
350
351 int error;
352 unsigned int idle_cpus;
353 39 shmem_lock(shm_handler);
354 {
355 /* Not enough CPUs to lend */
356
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 38 times.
39 if (ncpus > process->current_ncpus) {
357 1 error = DLB_ERR_PERM;
358 }
359 /* Lend */
360 else {
361 /* CPUs previously requested is the sum of the previous petitions
362 * in the queue (which are removed at this point) and the excess of
363 * CPUs that the process lends but does not own */
364 38 unsigned int ncpus_lent_not_owned =
365 38 process->current_ncpus > process->initial_ncpus
366 1 ? min_uint(process->current_ncpus - process->initial_ncpus, ncpus)
367
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 37 times.
38 : 0;
368 38 unsigned int ncpus_in_queue = queue_lewi_reqs_remove(&shdata->requests, pid);
369 38 *prev_requested = ncpus_lent_not_owned + ncpus_in_queue;
370
371 /* Update process info and output variable */
372 38 process->current_ncpus -= ncpus;
373 38 *new_ncpus = process->current_ncpus;
374
375 /* Update shmem, resolve requests if possible */
376 38 lend_ncpus_to_shmem(ncpus, requests, nreqs, maxreqs);
377
378 38 error = DLB_SUCCESS;
379 }
380
381 39 idle_cpus = shdata->idle_cpus;
382 }
383 39 shmem_unlock(shm_handler);
384
385
2/2
✓ Branch 0 taken 38 times.
✓ Branch 1 taken 1 times.
39 if (error == DLB_SUCCESS) {
386 add_event(IDLE_CPUS_EVENT, idle_cpus);
387
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 38 times.
38 verbose(VB_SHMEM, "Lending %u CPUs (idle: %u, triggered requests: %u)",
388 ncpus, idle_cpus, *nreqs);
389 } else {
390
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 1 times.
1 verbose(VB_SHMEM, "Lend failed");
391 }
392
393 39 return error;
394 }
395
396 /* Lend all possible CPUs, keep new_ncpus. Return pending CPU requests. */
397 32 int shmem_lewi_async__lend_keep_cpus(pid_t pid, unsigned int new_ncpus,
398 lewi_request_t *requests, unsigned int *nreqs, unsigned int maxreqs,
399 unsigned int *prev_requested) {
400
401
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 31 times.
32 if (unlikely(shm_handler == NULL)) return DLB_ERR_NOSHMEM;
402
403 31 lewi_process_t *process = get_process(pid);
404
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 30 times.
31 if (process == NULL) return DLB_ERR_NOPROC;
405
406 30 *nreqs = 0;
407 30 *prev_requested = 0;
408
409 int error;
410 unsigned int idle_cpus;
411 unsigned int lent_cpus;
412 30 shmem_lock(shm_handler);
413 {
414 /* Not enough CPUs to lend */
415
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 29 times.
30 if (new_ncpus > process->current_ncpus) {
416 1 error = DLB_ERR_PERM;
417 }
418
419 /* No-op */
420
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 28 times.
29 else if (new_ncpus == process->current_ncpus) {
421 1 error = DLB_NOUPDT;
422 }
423
424 /* Lend */
425 else {
426 /* Compute CPUs to lend */
427 28 lent_cpus = process->current_ncpus - new_ncpus;
428
429 /* CPUs previously requested is the sum of the previous petitions
430 * in the queue (which are removed at this point) and the excess of
431 * CPUs that the process lends but does not own */
432 28 unsigned int ncpus_lent_not_owned =
433 28 process->current_ncpus > process->initial_ncpus
434 10 ? min_uint(process->current_ncpus - process->initial_ncpus, lent_cpus)
435
2/2
✓ Branch 0 taken 10 times.
✓ Branch 1 taken 18 times.
28 : 0;
436 28 unsigned int ncpus_in_queue = queue_lewi_reqs_remove(&shdata->requests, pid);
437 28 *prev_requested = ncpus_lent_not_owned + ncpus_in_queue;
438
439 /* Compute CPUs to lend and update process info */
440 28 lent_cpus = process->current_ncpus - new_ncpus;
441 28 process->current_ncpus = new_ncpus;
442
443 /* Update shmem, resolve requests if possible */
444 28 lend_ncpus_to_shmem(lent_cpus, requests, nreqs, maxreqs);
445
446 28 error = DLB_SUCCESS;
447 }
448
449 30 idle_cpus = shdata->idle_cpus;
450 }
451 30 shmem_unlock(shm_handler);
452
453
2/2
✓ Branch 0 taken 28 times.
✓ Branch 1 taken 2 times.
30 if (error == DLB_SUCCESS) {
454 add_event(IDLE_CPUS_EVENT, idle_cpus);
455
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 28 times.
28 verbose(VB_SHMEM, "Lending %u CPUs (idle: %u, triggered requests: %u)",
456 lent_cpus, idle_cpus, *nreqs);
457 } else {
458
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 2 times.
2 verbose(VB_SHMEM, "Lend failed");
459 }
460
461 30 return error;
462 }
463
464
465 /*********************************************************************************/
466 /* Reclaim */
467 /*********************************************************************************/
468
469 /* Helper function to reclaim all CPUs from a process */
470 48 static int reclaim_from_shmem(lewi_process_t *process, unsigned int ncpus,
471 lewi_request_t *requests, unsigned int *nreqs, unsigned int maxreqs) {
472
473 /* These conditions are actually checked before invoking the function */
474
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 48 times.
48 ensure(process != NULL, "illegal process in %s", __func__);
475
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 48 times.
48 ensure(process->initial_ncpus >= process->current_ncpus + ncpus,
476 "cannot reclaim in %s", __func__);
477
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 48 times.
48 ensure(shdata->idle_cpus == 0, "reclaiming while idle CPUs > 0");
478
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 48 times.
48 ensure(ncpus > 0, "Reclaiming 0 CPUs");
479
480 48 int error = DLB_SUCCESS;
481
482 // find victims to steal CPUs from
483
484 /* Construct a queue with the CPU surplus of each target process */
485 48 queue_lewi_reqs_t surplus = {};
486
2/2
✓ Branch 0 taken 138 times.
✓ Branch 1 taken 48 times.
186 for (unsigned int p = 0; p < shdata->proc_list_head; ++p) {
487 138 lewi_process_t *target = &shdata->processes[p];
488
1/2
✓ Branch 0 taken 138 times.
✗ Branch 1 not taken.
138 if (target->pid != NOBODY
489
2/2
✓ Branch 0 taken 53 times.
✓ Branch 1 taken 85 times.
138 && target->current_ncpus > target->initial_ncpus) {
490 53 queue_lewi_reqs_push(&surplus, target->pid,
491 53 target->current_ncpus - target->initial_ncpus);
492 }
493 }
494
495 /* Pop CPUs evenly */
496 48 unsigned int remaining_ncpus = queue_lewi_reqs_pop_ncpus(&surplus, ncpus,
497 requests, nreqs, maxreqs);
498
1/2
✓ Branch 0 taken 48 times.
✗ Branch 1 not taken.
48 if (remaining_ncpus == 0) {
499 /* Update shmem with the victims, subtract current values */
500
2/2
✓ Branch 0 taken 53 times.
✓ Branch 1 taken 48 times.
101 for (unsigned int i = 0; i < *nreqs; ++i) {
501 53 lewi_process_t *target = get_process(requests[i].pid);
502 53 target->current_ncpus -= requests[i].howmany;
503
504 /* Add requests for reclaimed CPUs */
505 53 queue_lewi_reqs_push(&shdata->requests, requests[i].pid,
506 53 requests[i].howmany);
507
508 /* the request is updated to call the appropriate set_num_threads */
509 53 requests[i].howmany = target->current_ncpus;
510 }
511
512 48 process->current_ncpus += ncpus;
513 } else {
514 /* This should be either an error or an impossibility to fill
515 * the requests array, due to a low capacity. */
516 error = DLB_ERR_REQST;
517 }
518
519 48 return error;
520 }
521
522 /* Reclaim initial number of CPUs */
523 31 int shmem_lewi_async__reclaim(pid_t pid, unsigned int *new_ncpus,
524 lewi_request_t *requests, unsigned int *nreqs, unsigned int maxreqs,
525 unsigned int prev_requested) {
526
527
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 30 times.
31 if (unlikely(shm_handler == NULL)) return DLB_ERR_NOSHMEM;
528
529 30 lewi_process_t *process = get_process(pid);
530
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 29 times.
30 if (process == NULL) return DLB_ERR_NOPROC;
531
532
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 29 times.
29 verbose(VB_SHMEM, "Reclaiming initial CPUs...");
533
534 29 *nreqs = 0;
535
536 29 int error = DLB_NOUPDT;
537 unsigned int idle_cpus;
538 29 shmem_lock(shm_handler);
539 {
540
2/2
✓ Branch 0 taken 27 times.
✓ Branch 1 taken 2 times.
29 if (process->current_ncpus < process->initial_ncpus) {
541
542 27 unsigned int ncpus_to_reclaim =
543 27 process->initial_ncpus - process->current_ncpus;
544
545 // Borrow first
546 27 unsigned int ncpus_to_borrow = min_uint(shdata->idle_cpus,
547 ncpus_to_reclaim);
548
2/2
✓ Branch 0 taken 11 times.
✓ Branch 1 taken 16 times.
27 if (ncpus_to_borrow > 0) {
549 11 shdata->idle_cpus -= ncpus_to_borrow;
550 11 process->current_ncpus += ncpus_to_borrow;
551 11 ncpus_to_reclaim -= ncpus_to_borrow;
552 11 error = DLB_SUCCESS;
553 }
554
555 // Reclaim later
556
2/2
✓ Branch 0 taken 17 times.
✓ Branch 1 taken 10 times.
27 if (ncpus_to_reclaim > 0) {
557 17 error = reclaim_from_shmem(process, ncpus_to_reclaim, requests,
558 nreqs, maxreqs);
559 }
560
561 // Attend previous requests
562
2/2
✓ Branch 0 taken 13 times.
✓ Branch 1 taken 14 times.
27 if (prev_requested > 0) {
563 // Try first to resolve them with the available CPUs
564 unsigned int ncpus_from_prev_requests =
565 13 min_uint(shdata->idle_cpus, prev_requested);
566
2/2
✓ Branch 0 taken 4 times.
✓ Branch 1 taken 9 times.
13 if (ncpus_from_prev_requests > 0) {
567 4 shdata->idle_cpus -= ncpus_from_prev_requests;
568 4 process->current_ncpus += ncpus_from_prev_requests;
569 4 prev_requested -= ncpus_from_prev_requests;
570 }
571
572 // If we still have previous requests, add them to the queue
573
1/2
✓ Branch 0 taken 13 times.
✗ Branch 1 not taken.
13 if (prev_requested > 0) {
574 13 queue_lewi_reqs_push(&shdata->requests, pid, prev_requested);
575 }
576 }
577 }
578
579 // Update output variable
580 29 idle_cpus = shdata->idle_cpus;
581 29 *new_ncpus = process->current_ncpus;
582 }
583 29 shmem_unlock(shm_handler);
584
585
2/2
✓ Branch 0 taken 27 times.
✓ Branch 1 taken 2 times.
29 if (error == DLB_SUCCESS) {
586 add_event(IDLE_CPUS_EVENT, idle_cpus);
587
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 27 times.
27 verbose(VB_SHMEM, "Using %u CPUs... Idle: %u", *new_ncpus, idle_cpus);
588 }
589
590 29 return error;
591 }
592
593
594 /*********************************************************************************/
595 /* Acquire */
596 /*********************************************************************************/
597
598 59 int shmem_lewi_async__acquire_cpus(pid_t pid, unsigned int ncpus,
599 unsigned int *new_ncpus, lewi_request_t *requests, unsigned int *nreqs,
600 unsigned int maxreqs) {
601
602
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 58 times.
59 if (unlikely(shm_handler == NULL)) return DLB_ERR_NOSHMEM;
603
604
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 57 times.
58 if (ncpus == 0) return DLB_NOUPDT;
605
606 57 lewi_process_t *process = get_process(pid);
607
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 56 times.
57 if (process == NULL) return DLB_ERR_NOPROC;
608
609 56 *nreqs = 0;
610
611 56 int error = DLB_NOUPDT;
612 int idle_cpus;
613 56 shmem_lock(shm_handler);
614 {
615 // Borrow first
616 56 unsigned int ncpus_to_borrow = min_uint(shdata->idle_cpus, ncpus);
617
2/2
✓ Branch 0 taken 11 times.
✓ Branch 1 taken 45 times.
56 if (ncpus_to_borrow > 0) {
618 11 shdata->idle_cpus -= ncpus_to_borrow;
619 11 process->current_ncpus += ncpus_to_borrow;
620 11 ncpus -= ncpus_to_borrow;
621 11 error = DLB_SUCCESS;
622 }
623
624 // Reclaim later
625 56 unsigned int ncpus_to_reclaim = min_uint(ncpus,
626
2/2
✓ Branch 0 taken 28 times.
✓ Branch 1 taken 28 times.
56 process->initial_ncpus > process->current_ncpus ?
627 28 process->initial_ncpus - process->current_ncpus : 0);
628
2/2
✓ Branch 0 taken 27 times.
✓ Branch 1 taken 29 times.
56 if (ncpus_to_reclaim > 0) {
629 27 error = reclaim_from_shmem(process, ncpus_to_reclaim, requests,
630 nreqs, maxreqs);
631
1/2
✓ Branch 0 taken 27 times.
✗ Branch 1 not taken.
27 if (error == DLB_SUCCESS) {
632 27 ncpus -= ncpus_to_reclaim;
633 }
634 }
635
636 // Add request for the rest
637
2/2
✓ Branch 0 taken 26 times.
✓ Branch 1 taken 30 times.
56 if (ncpus > 0) {
638 26 queue_lewi_reqs_push(&shdata->requests, pid, ncpus);
639 26 error = DLB_NOTED;
640 }
641
642 56 *new_ncpus = process->current_ncpus;
643 56 idle_cpus = shdata->idle_cpus;
644 }
645 56 shmem_unlock(shm_handler);
646
647
2/2
✓ Branch 0 taken 30 times.
✓ Branch 1 taken 26 times.
56 if (error == DLB_SUCCESS) {
648 add_event(IDLE_CPUS_EVENT, idle_cpus);
649
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 30 times.
30 verbose(VB_SHMEM, "Borrowing CPUs... New: %d, Idle: %d", *new_ncpus, idle_cpus);
650 }
651
652 56 return error;
653 }
654
655
656 /*********************************************************************************/
657 /* Borrow */
658 /*********************************************************************************/
659
660 7 int shmem_lewi_async__borrow_cpus(pid_t pid, unsigned int ncpus,
661 unsigned int *new_ncpus) {
662
663
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 6 times.
7 if (unlikely(shm_handler == NULL)) return DLB_ERR_NOSHMEM;
664
665 6 lewi_process_t *process = get_process(pid);
666
2/2
✓ Branch 0 taken 1 times.
✓ Branch 1 taken 5 times.
6 if (process == NULL) return DLB_ERR_NOPROC;
667
668 int error;
669 int idle_cpus;
670
671 5 shmem_lock(shm_handler);
672 {
673 // Borrow as many as possible
674 5 unsigned int ncpus_to_borrow = min_uint(shdata->idle_cpus, ncpus);
675
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 2 times.
5 if (ncpus_to_borrow > 0) {
676 3 shdata->idle_cpus -= ncpus_to_borrow;
677 3 process->current_ncpus += ncpus_to_borrow;
678 3 error = DLB_SUCCESS;
679 } else {
680 // No idle CPUs to borrow
681 2 error = DLB_NOUPDT;
682 }
683
684 5 *new_ncpus = process->current_ncpus;
685 5 idle_cpus = shdata->idle_cpus;
686 }
687 5 shmem_unlock(shm_handler);
688
689
2/2
✓ Branch 0 taken 3 times.
✓ Branch 1 taken 2 times.
5 if (error == DLB_SUCCESS) {
690 add_event(IDLE_CPUS_EVENT, idle_cpus);
691
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
3 verbose(VB_SHMEM, "Borrowing CPUs... New: %d, Idle: %d", *new_ncpus, idle_cpus);
692 }
693
694 5 return error;
695 }
696
697
698 /*********************************************************************************/
699 /* Reset (Lend or Reclaim) */
700 /*********************************************************************************/
701
702 9 int shmem_lewi_async__reset(pid_t pid, unsigned int *new_ncpus,
703 lewi_request_t *requests, unsigned int *nreqs, unsigned int maxreqs,
704 unsigned int *prev_requested) {
705
706
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9 times.
9 if (unlikely(shm_handler == NULL)) return DLB_ERR_NOSHMEM;
707
708 9 lewi_process_t *process = get_process(pid);
709
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9 times.
9 if (process == NULL) return DLB_ERR_NOPROC;
710
711
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 9 times.
9 verbose(VB_SHMEM, "Resetting");
712
713 9 *nreqs = 0;
714 9 *prev_requested = 0;
715
716 9 int error = DLB_NOUPDT;
717 unsigned int idle_cpus;
718 9 shmem_lock(shm_handler);
719 {
720
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 3 times.
9 if (process->initial_ncpus != process->current_ncpus) {
721 6 error = reset_process(process, requests, nreqs, maxreqs, prev_requested);
722 }
723
724 9 idle_cpus = shdata->idle_cpus;
725 9 *new_ncpus = process->current_ncpus;
726 }
727 9 shmem_unlock(shm_handler);
728
729
2/2
✓ Branch 0 taken 6 times.
✓ Branch 1 taken 3 times.
9 if (error == DLB_SUCCESS) {
730 add_event(IDLE_CPUS_EVENT, idle_cpus);
731
1/2
✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
6 verbose(VB_SHMEM, "Using %u CPUs... Idle: %u", *new_ncpus, idle_cpus);
732 }
733
734 9 return error;
735 }
736