Line data Source code
1 : /*
2 : *------------------------------------------------------------------
3 : * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
4 : * library
5 : *
6 : * Copyright (c) 2009 Cisco and/or its affiliates.
7 : * Licensed under the Apache License, Version 2.0 (the "License");
8 : * you may not use this file except in compliance with the License.
9 : * You may obtain a copy of the License at:
10 : *
11 : * http://www.apache.org/licenses/LICENSE-2.0
12 : *
13 : * Unless required by applicable law or agreed to in writing, software
14 : * distributed under the License is distributed on an "AS IS" BASIS,
15 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 : * See the License for the specific language governing permissions and
17 : * limitations under the License.
18 : *------------------------------------------------------------------
19 : */
20 :
21 : #include <stdio.h>
22 : #include <stdlib.h>
23 : #include <sys/types.h>
24 : #include <sys/mman.h>
25 : #include <sys/stat.h>
26 : #include <netinet/in.h>
27 : #include <signal.h>
28 : #include <pthread.h>
29 : #include <unistd.h>
30 : #include <time.h>
31 : #include <fcntl.h>
32 : #include <string.h>
33 : #include <vppinfra/clib.h>
34 : #include <vppinfra/vec.h>
35 : #include <vppinfra/hash.h>
36 : #include <vppinfra/bitmap.h>
37 : #include <vppinfra/fifo.h>
38 : #include <vppinfra/time.h>
39 : #include <vppinfra/heap.h>
40 : #include <vppinfra/pool.h>
41 : #include <vppinfra/format.h>
42 :
43 : #include "svm.h"
44 :
45 : static svm_region_t *root_rp;
46 : static int root_rp_refcount;
47 :
48 : #define MAXLOCK 2
49 : static pthread_mutex_t *mutexes_held[MAXLOCK];
50 : static int nheld;
51 :
52 : svm_region_t *
53 636 : svm_get_root_rp (void)
54 : {
55 636 : return root_rp;
56 : }
57 :
58 : #define MUTEX_DEBUG
59 :
60 : u64
61 576 : svm_get_global_region_base_va ()
62 : {
63 : #if __aarch64__
64 : /* On AArch64 VA space can have different size, from 36 to 48 bits.
65 : Here we are trying to detect VA bits by parsing /proc/self/maps
66 : address ranges */
67 : int fd;
68 : unformat_input_t input;
69 : u64 start, end = 0;
70 : u8 bits = 0;
71 :
72 : if ((fd = open ("/proc/self/maps", 0)) < 0)
73 : clib_unix_error ("open '/proc/self/maps'");
74 :
75 : unformat_init_clib_file (&input, fd);
76 : while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
77 : {
78 : if (unformat (&input, "%llx-%llx", &start, &end))
79 : end--;
80 : unformat_skip_line (&input);
81 : }
82 : unformat_free (&input);
83 : close (fd);
84 :
85 : bits = count_leading_zeros (end);
86 : bits = 64 - bits;
87 : if (bits >= 36 && bits <= 48)
88 : return ((1ul << bits) / 4) - (2 * SVM_GLOBAL_REGION_SIZE);
89 : else
90 : clib_unix_error ("unexpected va bits '%u'", bits);
91 : #endif
92 :
93 : #ifdef CLIB_SANITIZE_ADDR
94 : return 0x200000000000;
95 : #endif
96 : /* default value */
97 576 : return 0x130000000ULL;
98 : }
99 :
100 : static void
101 4077 : region_lock (svm_region_t * rp, int tag)
102 : {
103 4077 : pthread_mutex_lock (&rp->mutex);
104 : #ifdef MUTEX_DEBUG
105 4077 : rp->mutex_owner_pid = getpid ();
106 4077 : rp->mutex_owner_tag = tag;
107 : #endif
108 4077 : ASSERT (nheld < MAXLOCK); //NOSONAR
109 : /*
110 : * Keep score of held mutexes so we can try to exit
111 : * cleanly if the world comes to an end at the worst possible
112 : * moment
113 : */
114 4077 : mutexes_held[nheld++] = &rp->mutex;
115 4077 : }
116 :
117 : static void
118 4077 : region_unlock (svm_region_t * rp)
119 : {
120 : int i, j;
121 : #ifdef MUTEX_DEBUG
122 4077 : rp->mutex_owner_pid = 0;
123 4077 : rp->mutex_owner_tag = 0;
124 : #endif
125 :
126 4077 : for (i = nheld - 1; i >= 0; i--)
127 : {
128 4077 : if (mutexes_held[i] == &rp->mutex)
129 : {
130 7001 : for (j = i; j < MAXLOCK - 1; j++)
131 2924 : mutexes_held[j] = mutexes_held[j + 1];
132 4077 : nheld--;
133 4077 : goto found;
134 : }
135 : }
136 0 : ASSERT (0);
137 :
138 4077 : found:
139 4077 : CLIB_MEMORY_BARRIER ();
140 4077 : pthread_mutex_unlock (&rp->mutex);
141 4077 : }
142 :
143 :
144 : static u8 *
145 0 : format_svm_flags (u8 * s, va_list * args)
146 : {
147 0 : uword f = va_arg (*args, uword);
148 :
149 0 : if (f & SVM_FLAGS_MHEAP)
150 0 : s = format (s, "MHEAP ");
151 0 : if (f & SVM_FLAGS_FILE)
152 0 : s = format (s, "FILE ");
153 0 : if (f & SVM_FLAGS_NODATA)
154 0 : s = format (s, "NODATA ");
155 0 : if (f & SVM_FLAGS_NEED_DATA_INIT)
156 0 : s = format (s, "INIT ");
157 :
158 0 : return (s);
159 : }
160 :
161 : static u8 *
162 0 : format_svm_size (u8 * s, va_list * args)
163 : {
164 0 : uword size = va_arg (*args, uword);
165 :
166 0 : if (size >= (1 << 20))
167 : {
168 0 : s = format (s, "(%d mb)", size >> 20);
169 : }
170 0 : else if (size >= (1 << 10))
171 : {
172 0 : s = format (s, "(%d kb)", size >> 10);
173 : }
174 : else
175 : {
176 0 : s = format (s, "(%d bytes)", size);
177 : }
178 0 : return (s);
179 : }
180 :
181 : u8 *
182 0 : format_svm_region (u8 * s, va_list * args)
183 : {
184 0 : svm_region_t *rp = va_arg (*args, svm_region_t *);
185 0 : int verbose = va_arg (*args, int);
186 : int i;
187 : uword lo, hi;
188 :
189 0 : s = format (s, "%s: base va 0x%x size 0x%x %U\n",
190 : rp->region_name, rp->virtual_base,
191 : rp->virtual_size, format_svm_size, rp->virtual_size);
192 0 : s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
193 : rp->user_ctx, rp->bitmap_size);
194 :
195 0 : if (verbose)
196 : {
197 0 : s = format (s, " flags: 0x%x %U\n", rp->flags,
198 : format_svm_flags, rp->flags);
199 0 : s = format (s,
200 : " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
201 : rp->region_heap, rp->data_base, rp->data_heap);
202 : }
203 :
204 0 : s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
205 :
206 0 : for (i = 0; i < vec_len (rp->client_pids); i++)
207 0 : s = format (s, "%d ", rp->client_pids[i]);
208 :
209 0 : s = format (s, "\n");
210 :
211 0 : if (verbose)
212 : {
213 0 : lo = hi = ~0;
214 :
215 0 : s = format (s, " VM in use: ");
216 :
217 0 : for (i = 0; i < rp->bitmap_size; i++)
218 : {
219 0 : if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
220 : {
221 0 : if (lo == ~0)
222 : {
223 0 : hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
224 : }
225 : else
226 : {
227 0 : hi = rp->virtual_base + i * MMAP_PAGESIZE;
228 : }
229 : }
230 : else
231 : {
232 0 : if (lo != ~0)
233 : {
234 0 : hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
235 0 : s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
236 0 : (hi - lo) >> 10);
237 0 : lo = hi = ~0;
238 : }
239 : }
240 : }
241 : }
242 :
243 0 : return (s);
244 : }
245 :
246 : /*
247 : * rnd_pagesize
248 : * Round to a pagesize multiple, presumably 4k works
249 : */
250 : static u64
251 576 : rnd_pagesize (u64 size)
252 : {
253 : u64 rv;
254 :
255 576 : rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
256 576 : return (rv);
257 : }
258 :
259 : /*
260 : * svm_data_region_setup
261 : */
262 : static int
263 1160 : svm_data_region_create (svm_map_region_args_t * a, svm_region_t * rp)
264 : {
265 : int fd;
266 1160 : u8 junk = 0;
267 : uword map_size;
268 :
269 1160 : map_size = rp->virtual_size - (MMAP_PAGESIZE +
270 1160 : (a->pvt_heap_size ? a->pvt_heap_size :
271 : SVM_PVT_MHEAP_SIZE));
272 :
273 1160 : if (a->flags & SVM_FLAGS_FILE)
274 : {
275 : struct stat statb;
276 :
277 0 : fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
278 :
279 0 : if (fd < 0)
280 : {
281 0 : clib_unix_warning ("open");
282 0 : return -1;
283 : }
284 :
285 0 : if (fstat (fd, &statb) < 0)
286 : {
287 0 : clib_unix_warning ("fstat");
288 0 : close (fd);
289 0 : return -2;
290 : }
291 :
292 0 : if (statb.st_mode & S_IFREG)
293 : {
294 0 : if (statb.st_size == 0)
295 : {
296 0 : if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
297 : {
298 0 : clib_unix_warning ("seek region size");
299 0 : close (fd);
300 0 : return -3;
301 : }
302 0 : if (write (fd, &junk, 1) != 1)
303 : {
304 0 : clib_unix_warning ("set region size");
305 0 : close (fd);
306 0 : return -3;
307 : }
308 : }
309 : else
310 : {
311 0 : map_size = rnd_pagesize (statb.st_size);
312 : }
313 : }
314 : else
315 : {
316 0 : map_size = a->backing_mmap_size;
317 : }
318 :
319 0 : ASSERT (map_size <= rp->virtual_size -
320 : (MMAP_PAGESIZE + SVM_PVT_MHEAP_SIZE));
321 :
322 0 : if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
323 : MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
324 : {
325 0 : clib_unix_warning ("mmap");
326 0 : close (fd);
327 0 : return -3;
328 : }
329 0 : close (fd);
330 0 : clib_mem_unpoison (rp->data_base, map_size);
331 0 : rp->backing_file = (char *) format (0, "%s%c", a->backing_file, 0);
332 0 : rp->flags |= SVM_FLAGS_FILE;
333 : }
334 :
335 1160 : if (a->flags & SVM_FLAGS_MHEAP)
336 : {
337 601 : rp->data_heap = clib_mem_create_heap (rp->data_base, map_size,
338 : 1 /* locked */ , "svm data");
339 :
340 601 : rp->flags |= SVM_FLAGS_MHEAP;
341 : }
342 1160 : return 0;
343 : }
344 :
345 : static int
346 34 : svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp)
347 : {
348 : int fd;
349 34 : u8 junk = 0;
350 : uword map_size;
351 : struct stat statb;
352 :
353 68 : map_size = rp->virtual_size -
354 34 : (MMAP_PAGESIZE
355 34 : + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE));
356 :
357 34 : if (a->flags & SVM_FLAGS_FILE)
358 : {
359 :
360 0 : fd = open (a->backing_file, O_RDWR, 0777);
361 :
362 0 : if (fd < 0)
363 : {
364 0 : clib_unix_warning ("open");
365 0 : return -1;
366 : }
367 :
368 0 : if (fstat (fd, &statb) < 0)
369 : {
370 0 : clib_unix_warning ("fstat");
371 0 : close (fd);
372 0 : return -2;
373 : }
374 :
375 0 : if (statb.st_mode & S_IFREG)
376 : {
377 0 : if (statb.st_size == 0)
378 : {
379 0 : if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
380 : {
381 0 : clib_unix_warning ("seek region size");
382 0 : close (fd);
383 0 : return -3;
384 : }
385 0 : if (write (fd, &junk, 1) != 1)
386 : {
387 0 : clib_unix_warning ("set region size");
388 0 : close (fd);
389 0 : return -3;
390 : }
391 : }
392 : else
393 : {
394 0 : map_size = rnd_pagesize (statb.st_size);
395 : }
396 : }
397 : else
398 : {
399 0 : map_size = a->backing_mmap_size;
400 : }
401 :
402 0 : ASSERT (map_size <= rp->virtual_size
403 : - (MMAP_PAGESIZE
404 : +
405 : (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)));
406 :
407 0 : if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
408 : MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
409 : {
410 0 : clib_unix_warning ("mmap");
411 0 : close (fd);
412 0 : return -3;
413 : }
414 0 : close (fd);
415 0 : clib_mem_unpoison (rp->data_base, map_size);
416 : }
417 34 : return 0;
418 : }
419 :
420 : u8 *
421 2270 : shm_name_from_svm_map_region_args (svm_map_region_args_t * a)
422 : {
423 : u8 *shm_name;
424 2270 : int root_path_offset = 0;
425 2270 : int name_offset = 0;
426 :
427 2270 : if (a->root_path)
428 : {
429 : /* Tolerate present or absent slashes */
430 2270 : if (a->root_path[0] == '/')
431 0 : root_path_offset++;
432 :
433 2270 : if (a->name[0] == '/')
434 2270 : name_offset = 1;
435 :
436 2270 : shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
437 2270 : &a->name[name_offset], 0);
438 : }
439 : else
440 0 : shm_name = format (0, "%s%c", a->name, 0);
441 2270 : return (shm_name);
442 : }
443 :
444 : void
445 1160 : svm_region_init_mapped_region (svm_map_region_args_t * a, svm_region_t * rp)
446 : {
447 : pthread_mutexattr_t attr;
448 : pthread_condattr_t cattr;
449 : int nbits, words, bit;
450 : int overhead_space;
451 : void *oldheap;
452 : uword data_base;
453 1160 : ASSERT (rp);
454 : int rv;
455 :
456 1160 : clib_memset (rp, 0, sizeof (*rp));
457 :
458 1160 : if (pthread_mutexattr_init (&attr))
459 0 : clib_unix_warning ("mutexattr_init");
460 :
461 1160 : if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
462 0 : clib_unix_warning ("mutexattr_setpshared");
463 :
464 1160 : if (pthread_mutex_init (&rp->mutex, &attr))
465 0 : clib_unix_warning ("mutex_init");
466 :
467 1160 : if (pthread_mutexattr_destroy (&attr))
468 0 : clib_unix_warning ("mutexattr_destroy");
469 :
470 1160 : if (pthread_condattr_init (&cattr))
471 0 : clib_unix_warning ("condattr_init");
472 :
473 1160 : if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
474 0 : clib_unix_warning ("condattr_setpshared");
475 :
476 1160 : if (pthread_cond_init (&rp->condvar, &cattr))
477 0 : clib_unix_warning ("cond_init");
478 :
479 1160 : if (pthread_condattr_destroy (&cattr))
480 0 : clib_unix_warning ("condattr_destroy");
481 :
482 1160 : region_lock (rp, 1);
483 :
484 1160 : rp->virtual_base = a->baseva;
485 1160 : rp->virtual_size = a->size;
486 :
487 1160 : rp->region_heap = clib_mem_create_heap
488 1160 : (uword_to_pointer (a->baseva + MMAP_PAGESIZE, void *),
489 1160 : (a->pvt_heap_size !=
490 : 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, 1 /* locked */ ,
491 : "svm region");
492 :
493 1160 : oldheap = svm_push_pvt_heap (rp);
494 :
495 1160 : rp->region_name = (char *) format (0, "%s%c", a->name, 0);
496 1160 : vec_add1 (rp->client_pids, getpid ());
497 :
498 1160 : nbits = rp->virtual_size / MMAP_PAGESIZE;
499 :
500 1160 : ASSERT (nbits > 0);
501 1160 : rp->bitmap_size = nbits;
502 1160 : words = (nbits + BITS (uword) - 1) / BITS (uword);
503 1160 : vec_validate (rp->bitmap, words - 1);
504 :
505 1761 : overhead_space = MMAP_PAGESIZE /* header */ +
506 1160 : ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
507 :
508 1160 : bit = 0;
509 1160 : data_base = (uword) rp->virtual_base;
510 :
511 1160 : if (a->flags & SVM_FLAGS_NODATA)
512 559 : rp->flags |= SVM_FLAGS_NEED_DATA_INIT;
513 :
514 : do
515 : {
516 38280 : clib_bitmap_set_no_check (rp->bitmap, bit, 1);
517 38280 : bit++;
518 38280 : overhead_space -= MMAP_PAGESIZE;
519 38280 : data_base += MMAP_PAGESIZE;
520 : }
521 38280 : while (overhead_space > 0);
522 :
523 1160 : rp->data_base = (void *) data_base;
524 :
525 : /*
526 : * Note: although the POSIX spec guarantees that only one
527 : * process enters this block, we have to play games
528 : * to hold off clients until e.g. the mutex is ready
529 : */
530 1160 : rp->version = SVM_VERSION;
531 :
532 : /* setup the data portion of the region */
533 :
534 1160 : rv = svm_data_region_create (a, rp);
535 1160 : if (rv)
536 : {
537 0 : clib_warning ("data_region_create: %d", rv);
538 : }
539 :
540 1160 : region_unlock (rp);
541 :
542 1160 : svm_pop_heap (oldheap);
543 1160 : }
544 :
545 : /*
546 : * svm_map_region
547 : */
548 : void *
549 1152 : svm_map_region (svm_map_region_args_t * a)
550 : {
551 : int svm_fd;
552 : svm_region_t *rp;
553 1152 : int deadman = 0;
554 1152 : u8 junk = 0;
555 : void *oldheap;
556 : int rv;
557 : int pid_holding_region_lock;
558 : u8 *shm_name;
559 1152 : int dead_region_recovery = 0;
560 : int time_left;
561 : struct stat stat;
562 : struct timespec ts, tsrem;
563 :
564 1152 : ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
565 1152 : ASSERT (a->name);
566 :
567 1152 : shm_name = shm_name_from_svm_map_region_args (a);
568 :
569 : if (CLIB_DEBUG > 1)
570 : clib_warning ("[%d] map region %s: shm_open (%s)",
571 : getpid (), a->name, shm_name);
572 :
573 1152 : svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
574 :
575 1152 : if (svm_fd >= 0)
576 : {
577 1118 : if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
578 0 : clib_unix_warning ("segment chmod");
579 : /* This turns out to fail harmlessly if the client starts first */
580 1118 : if (fchown (svm_fd, a->uid, a->gid) < 0)
581 0 : clib_unix_warning ("segment chown [ok if client starts first]");
582 :
583 1118 : vec_free (shm_name);
584 :
585 1118 : if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
586 : {
587 0 : clib_warning ("seek region size");
588 0 : close (svm_fd);
589 0 : return (0);
590 : }
591 1118 : if (write (svm_fd, &junk, 1) != 1)
592 : {
593 0 : clib_warning ("set region size");
594 0 : close (svm_fd);
595 0 : return (0);
596 : }
597 :
598 1118 : rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
599 : PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
600 :
601 1118 : if (rp == (svm_region_t *) MAP_FAILED)
602 : {
603 0 : clib_unix_warning ("mmap create");
604 0 : close (svm_fd);
605 0 : return (0);
606 : }
607 1118 : close (svm_fd);
608 1118 : clib_mem_unpoison (rp, a->size);
609 :
610 1118 : svm_region_init_mapped_region (a, rp);
611 :
612 1118 : return ((void *) rp);
613 : }
614 : else
615 : {
616 34 : svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
617 :
618 34 : vec_free (shm_name);
619 :
620 34 : if (svm_fd < 0)
621 : {
622 0 : perror ("svm_region_map(mmap open)");
623 0 : return (0);
624 : }
625 :
626 : /* Reset ownership in case the client started first */
627 34 : if (fchown (svm_fd, a->uid, a->gid) < 0)
628 0 : clib_unix_warning ("segment chown [ok if client starts first]");
629 :
630 34 : time_left = 20;
631 : while (1)
632 : {
633 34 : if (0 != fstat (svm_fd, &stat))
634 : {
635 0 : clib_warning ("fstat failed: %d", errno);
636 0 : close (svm_fd);
637 0 : return (0);
638 : }
639 34 : if (stat.st_size > 0)
640 : {
641 34 : break;
642 : }
643 0 : if (0 == time_left)
644 : {
645 0 : clib_warning ("waiting for resize of shm file timed out");
646 0 : close (svm_fd);
647 0 : return (0);
648 : }
649 0 : ts.tv_sec = 0;
650 0 : ts.tv_nsec = 100000000;
651 0 : while (nanosleep (&ts, &tsrem) < 0)
652 0 : ts = tsrem;
653 0 : time_left--;
654 : }
655 :
656 34 : rp = mmap (0, MMAP_PAGESIZE,
657 : PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
658 :
659 34 : if (rp == (svm_region_t *) MAP_FAILED)
660 : {
661 0 : close (svm_fd);
662 0 : clib_warning ("mmap");
663 0 : return (0);
664 : }
665 :
666 34 : clib_mem_unpoison (rp, MMAP_PAGESIZE);
667 :
668 : /*
669 : * We lost the footrace to create this region; make sure
670 : * the winner has crossed the finish line.
671 : */
672 34 : while (rp->version == 0 && deadman++ < 5)
673 : {
674 0 : sleep (1);
675 : }
676 :
677 : /*
678 : * <bleep>-ed?
679 : */
680 34 : if (rp->version == 0)
681 : {
682 0 : clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
683 0 : close (svm_fd);
684 0 : munmap (rp, a->size);
685 0 : return (0);
686 : }
687 : /* Remap now that the region has been placed */
688 34 : a->baseva = rp->virtual_base;
689 34 : a->size = rp->virtual_size;
690 34 : munmap (rp, MMAP_PAGESIZE);
691 :
692 34 : rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
693 : PROT_READ | PROT_WRITE,
694 : MAP_SHARED | MAP_FIXED, svm_fd, 0);
695 34 : if ((uword) rp == (uword) MAP_FAILED)
696 : {
697 0 : clib_unix_warning ("mmap");
698 0 : close (svm_fd);
699 0 : return (0);
700 : }
701 :
702 34 : close (svm_fd);
703 :
704 34 : clib_mem_unpoison (rp, a->size);
705 :
706 34 : if ((uword) rp != rp->virtual_base)
707 : {
708 0 : clib_warning ("mmap botch");
709 : }
710 :
711 : /*
712 : * Try to fix the region mutex if it is held by
713 : * a dead process
714 : */
715 34 : pid_holding_region_lock = rp->mutex_owner_pid;
716 34 : if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
717 : {
718 : pthread_mutexattr_t attr;
719 0 : clib_warning
720 : ("region %s mutex held by dead pid %d, tag %d, force unlock",
721 : rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
722 : /* owner pid is nonexistent */
723 0 : if (pthread_mutexattr_init (&attr))
724 0 : clib_unix_warning ("mutexattr_init");
725 0 : if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
726 0 : clib_unix_warning ("mutexattr_setpshared");
727 0 : if (pthread_mutex_init (&rp->mutex, &attr))
728 0 : clib_unix_warning ("mutex_init");
729 0 : dead_region_recovery = 1;
730 : }
731 :
732 34 : if (dead_region_recovery)
733 0 : clib_warning ("recovery: attempt to re-lock region");
734 :
735 34 : region_lock (rp, 2);
736 34 : oldheap = svm_push_pvt_heap (rp);
737 34 : vec_add1 (rp->client_pids, getpid ());
738 :
739 34 : if (dead_region_recovery)
740 0 : clib_warning ("recovery: attempt svm_data_region_map");
741 :
742 34 : rv = svm_data_region_map (a, rp);
743 34 : if (rv)
744 : {
745 0 : clib_warning ("data_region_map: %d", rv);
746 : }
747 :
748 34 : if (dead_region_recovery)
749 0 : clib_warning ("unlock and continue");
750 :
751 34 : region_unlock (rp);
752 :
753 34 : svm_pop_heap (oldheap);
754 :
755 34 : return ((void *) rp);
756 :
757 : }
758 : return 0; /* NOTREACHED *///NOSONAR
759 : }
760 :
761 : static void
762 577 : svm_mutex_cleanup (void)
763 : {
764 : int i;
765 577 : for (i = 0; i < nheld; i++)
766 : {
767 0 : pthread_mutex_unlock (mutexes_held[i]); //NOSONAR
768 : }
769 577 : }
770 :
771 : static int
772 576 : svm_region_init_internal (svm_map_region_args_t * a)
773 : {
774 : svm_region_t *rp;
775 576 : u64 ticks = clib_cpu_time_now ();
776 : uword randomize_baseva;
777 :
778 : /* guard against klutz calls */
779 576 : if (root_rp)
780 0 : return -1;
781 :
782 576 : root_rp_refcount++;
783 :
784 576 : atexit (svm_mutex_cleanup);
785 :
786 : /* Randomize the shared-VM base at init time */
787 576 : if (MMAP_PAGESIZE <= (4 << 10))
788 576 : randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
789 : else
790 0 : randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
791 :
792 576 : a->baseva += randomize_baseva;
793 :
794 576 : rp = svm_map_region (a);
795 576 : if (!rp)
796 0 : return -1;
797 :
798 576 : region_lock (rp, 3);
799 :
800 : /* Set up the main region data structures */
801 576 : if (rp->flags & SVM_FLAGS_NEED_DATA_INIT)
802 : {
803 559 : svm_main_region_t *mp = 0;
804 : void *oldheap;
805 :
806 559 : rp->flags &= ~(SVM_FLAGS_NEED_DATA_INIT);
807 :
808 559 : oldheap = svm_push_pvt_heap (rp);
809 559 : vec_validate (mp, 0);
810 559 : mp->name_hash = hash_create_string (0, sizeof (uword));
811 559 : mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
812 559 : mp->uid = a->uid;
813 559 : mp->gid = a->gid;
814 559 : rp->data_base = mp;
815 559 : svm_pop_heap (oldheap);
816 : }
817 576 : region_unlock (rp);
818 576 : root_rp = rp;
819 :
820 576 : return 0;
821 : }
822 :
823 : void
824 0 : svm_region_init (void)
825 : {
826 0 : svm_map_region_args_t _a, *a = &_a;
827 :
828 0 : clib_memset (a, 0, sizeof (*a));
829 0 : a->root_path = 0;
830 0 : a->name = SVM_GLOBAL_REGION_NAME;
831 0 : a->baseva = svm_get_global_region_base_va ();
832 0 : a->size = SVM_GLOBAL_REGION_SIZE;
833 0 : a->flags = SVM_FLAGS_NODATA;
834 0 : a->uid = 0;
835 0 : a->gid = 0;
836 :
837 0 : svm_region_init_internal (a);
838 0 : }
839 :
840 : int
841 0 : svm_region_init_chroot (const char *root_path)
842 : {
843 0 : svm_map_region_args_t _a, *a = &_a;
844 :
845 0 : clib_memset (a, 0, sizeof (*a));
846 0 : a->root_path = root_path;
847 0 : a->name = SVM_GLOBAL_REGION_NAME;
848 0 : a->baseva = svm_get_global_region_base_va ();
849 0 : a->size = SVM_GLOBAL_REGION_SIZE;
850 0 : a->flags = SVM_FLAGS_NODATA;
851 0 : a->uid = 0;
852 0 : a->gid = 0;
853 :
854 0 : return svm_region_init_internal (a);
855 : }
856 :
857 : void
858 17 : svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
859 : {
860 17 : svm_map_region_args_t _a, *a = &_a;
861 :
862 17 : clib_memset (a, 0, sizeof (*a));
863 17 : a->root_path = root_path;
864 17 : a->name = SVM_GLOBAL_REGION_NAME;
865 17 : a->baseva = svm_get_global_region_base_va ();
866 17 : a->size = SVM_GLOBAL_REGION_SIZE;
867 17 : a->flags = SVM_FLAGS_NODATA;
868 17 : a->uid = uid;
869 17 : a->gid = gid;
870 :
871 17 : svm_region_init_internal (a);
872 17 : }
873 :
874 : void
875 559 : svm_region_init_args (svm_map_region_args_t * a)
876 : {
877 559 : svm_region_init_internal (a);
878 559 : }
879 :
880 : void *
881 576 : svm_region_find_or_create (svm_map_region_args_t * a)
882 : {
883 : svm_main_region_t *mp;
884 : svm_region_t *rp;
885 : uword need_nbits;
886 : int index, i;
887 : void *oldheap;
888 : uword *p;
889 : u8 *name;
890 : svm_subregion_t *subp;
891 :
892 576 : ASSERT (root_rp);
893 :
894 576 : a->size += MMAP_PAGESIZE +
895 576 : ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
896 576 : a->size = rnd_pagesize (a->size);
897 :
898 576 : region_lock (root_rp, 4);
899 576 : oldheap = svm_push_pvt_heap (root_rp);
900 576 : mp = root_rp->data_base;
901 :
902 576 : ASSERT (mp);
903 :
904 : /* Map the named region from the correct chroot environment */
905 576 : if (a->root_path == NULL)
906 576 : a->root_path = (char *) mp->root_path;
907 :
908 : /*
909 : * See if this region is already known. If it is, we're
910 : * almost done...
911 : */
912 1152 : p = hash_get_mem (mp->name_hash, a->name);
913 :
914 576 : if (p)
915 : {
916 17 : rp = svm_map_region (a);
917 17 : region_unlock (root_rp);
918 17 : svm_pop_heap (oldheap);
919 17 : return rp;
920 : }
921 :
922 : /* Create the region. */
923 559 : ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
924 :
925 559 : need_nbits = a->size / MMAP_PAGESIZE;
926 :
927 559 : index = 1; /* $$$ fixme, figure out how many bit to really skip */
928 :
929 : /*
930 : * Scan the virtual space allocation bitmap, looking for a large
931 : * enough chunk
932 : */
933 : do
934 : {
935 18447 : if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
936 : {
937 2308110 : for (i = 0; i < (need_nbits - 1); i++)
938 : {
939 2307550 : if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
940 : {
941 0 : index = index + i;
942 0 : goto next;
943 : }
944 : }
945 559 : break;
946 : }
947 17888 : index++;
948 17888 : next:;
949 : }
950 17888 : while (index < root_rp->bitmap_size);
951 :
952 : /* Completely out of VM? */
953 559 : if (index >= root_rp->bitmap_size)
954 : {
955 0 : clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
956 : root_rp->region_name, a->size, a->size);
957 0 : svm_pop_heap (oldheap);
958 0 : region_unlock (root_rp);
959 0 : return 0;
960 : }
961 :
962 : /*
963 : * Mark virtual space allocated
964 : */
965 : #if CLIB_DEBUG > 1
966 : clib_warning ("set %d bits at index %d", need_nbits, index);
967 : #endif
968 :
969 2308670 : for (i = 0; i < need_nbits; i++)
970 : {
971 2308110 : clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
972 : }
973 :
974 : /* Place this region where it goes... */
975 559 : a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
976 :
977 559 : rp = svm_map_region (a);
978 :
979 559 : pool_get (mp->subregions, subp);
980 559 : name = format (0, "%s%c", a->name, 0);
981 559 : subp->subregion_name = name;
982 :
983 1118 : hash_set_mem (mp->name_hash, name, subp - mp->subregions);
984 :
985 559 : svm_pop_heap (oldheap);
986 :
987 559 : region_unlock (root_rp);
988 :
989 559 : return (rp);
990 : }
991 :
992 : void
993 1118 : svm_region_unlink (svm_region_t * rp)
994 : {
995 1118 : svm_map_region_args_t _a, *a = &_a;
996 : svm_main_region_t *mp;
997 : u8 *shm_name;
998 :
999 1118 : ASSERT (root_rp);
1000 1118 : ASSERT (rp);
1001 1118 : ASSERT (vec_c_string_is_terminated (rp->region_name));
1002 :
1003 1118 : mp = root_rp->data_base;
1004 1118 : ASSERT (mp);
1005 :
1006 1118 : a->root_path = (char *) mp->root_path;
1007 1118 : a->name = rp->region_name;
1008 1118 : shm_name = shm_name_from_svm_map_region_args (a);
1009 : if (CLIB_DEBUG > 1)
1010 : clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
1011 1118 : shm_unlink ((const char *) shm_name);
1012 1118 : vec_free (shm_name);
1013 1118 : }
1014 :
1015 : /*
1016 : * svm_region_unmap
1017 : *
1018 : * Let go of the indicated region. If the calling process
1019 : * is the last customer, throw it away completely.
1020 : * The root region mutex guarantees atomicity with respect to
1021 : * a new region client showing up at the wrong moment.
1022 : */
1023 : void
1024 577 : svm_region_unmap_internal (void *rp_arg, u8 is_client)
1025 : {
1026 577 : int i, mypid = getpid ();
1027 : int nclients_left;
1028 : void *oldheap;
1029 : uword virtual_base, virtual_size;
1030 577 : svm_region_t *rp = rp_arg;
1031 : char *name;
1032 :
1033 : /*
1034 : * If we take a signal while holding one or more shared-memory
1035 : * mutexes, we may end up back here from an otherwise
1036 : * benign exit handler. Bail out to avoid a recursive
1037 : * mutex screw-up.
1038 : */
1039 577 : if (nheld)
1040 559 : return;
1041 :
1042 577 : ASSERT (rp);
1043 577 : ASSERT (root_rp);
1044 :
1045 : if (CLIB_DEBUG > 1)
1046 : clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
1047 :
1048 577 : region_lock (root_rp, 5);
1049 577 : region_lock (rp, 6);
1050 :
1051 577 : oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1052 :
1053 : /* Remove the caller from the list of mappers */
1054 577 : clib_mem_unpoison (rp->client_pids, vec_bytes (rp->client_pids));
1055 595 : for (i = 0; i < vec_len (rp->client_pids); i++)
1056 : {
1057 594 : if (rp->client_pids[i] == mypid)
1058 : {
1059 576 : vec_delete (rp->client_pids, 1, i);
1060 576 : goto found;
1061 : }
1062 : }
1063 1 : clib_warning ("pid %d AWOL", mypid);
1064 :
1065 577 : found:
1066 :
1067 577 : svm_pop_heap (oldheap);
1068 :
1069 577 : nclients_left = vec_len (rp->client_pids);
1070 577 : virtual_base = rp->virtual_base;
1071 577 : virtual_size = rp->virtual_size;
1072 :
1073 577 : if (nclients_left == 0)
1074 : {
1075 : int index, nbits, i;
1076 : svm_main_region_t *mp;
1077 : uword *p;
1078 : svm_subregion_t *subp;
1079 :
1080 : /* Kill the region, last guy on his way out */
1081 :
1082 559 : oldheap = svm_push_pvt_heap (root_rp);
1083 559 : name = vec_dup (rp->region_name);
1084 :
1085 559 : virtual_base = rp->virtual_base;
1086 559 : virtual_size = rp->virtual_size;
1087 :
1088 : /* Figure out which bits to clear in the root region bitmap */
1089 559 : index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
1090 :
1091 559 : nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
1092 :
1093 : #if CLIB_DEBUG > 1
1094 : clib_warning ("clear %d bits at index %d", nbits, index);
1095 : #endif
1096 : /* Give back the allocated VM */
1097 2308670 : for (i = 0; i < nbits; i++)
1098 : {
1099 2308110 : clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1100 : }
1101 :
1102 559 : mp = root_rp->data_base;
1103 :
1104 1118 : p = hash_get_mem (mp->name_hash, name);
1105 :
1106 : /* Better never happen ... */
1107 559 : if (p == NULL)
1108 : {
1109 0 : region_unlock (rp);
1110 0 : region_unlock (root_rp);
1111 0 : svm_pop_heap (oldheap);
1112 0 : clib_warning ("Region name '%s' not found?", name);
1113 0 : return;
1114 : }
1115 :
1116 : /* Remove from the root region subregion pool */
1117 559 : subp = mp->subregions + p[0];
1118 559 : pool_put (mp->subregions, subp);
1119 :
1120 1118 : hash_unset_mem (mp->name_hash, name);
1121 :
1122 559 : vec_free (name);
1123 :
1124 559 : region_unlock (rp);
1125 :
1126 : /* If a client asks for the cleanup, don't unlink the backing
1127 : * file since we can't tell if it has been recreated. */
1128 559 : if (!is_client)
1129 559 : svm_region_unlink (rp);
1130 :
1131 559 : munmap ((void *) virtual_base, virtual_size);
1132 559 : region_unlock (root_rp);
1133 559 : svm_pop_heap (oldheap);
1134 559 : return;
1135 : }
1136 :
1137 18 : region_unlock (rp);
1138 18 : region_unlock (root_rp);
1139 :
1140 18 : munmap ((void *) virtual_base, virtual_size);
1141 : }
1142 :
1143 : void
1144 560 : svm_region_unmap (void *rp_arg)
1145 : {
1146 560 : svm_region_unmap_internal (rp_arg, 0 /* is_client */ );
1147 560 : }
1148 :
1149 : void
1150 17 : svm_region_unmap_client (void *rp_arg)
1151 : {
1152 17 : svm_region_unmap_internal (rp_arg, 1 /* is_client */ );
1153 17 : }
1154 :
1155 : /*
1156 : * svm_region_exit
1157 : */
1158 : static void
1159 577 : svm_region_exit_internal (u8 is_client)
1160 : {
1161 : void *oldheap;
1162 577 : int i, mypid = getpid ();
1163 : uword virtual_base, virtual_size;
1164 :
1165 : /* It felt so nice we did it twice... */
1166 577 : if (root_rp == 0)
1167 0 : return;
1168 :
1169 577 : if (--root_rp_refcount > 0)
1170 0 : return;
1171 :
1172 : /*
1173 : * If we take a signal while holding one or more shared-memory
1174 : * mutexes, we may end up back here from an otherwise
1175 : * benign exit handler. Bail out to avoid a recursive
1176 : * mutex screw-up.
1177 : */
1178 577 : if (nheld)
1179 0 : return;
1180 :
1181 577 : region_lock (root_rp, 7);
1182 577 : oldheap = svm_push_pvt_heap (root_rp);
1183 :
1184 577 : virtual_base = root_rp->virtual_base;
1185 577 : virtual_size = root_rp->virtual_size;
1186 :
1187 577 : clib_mem_unpoison (root_rp->client_pids, vec_bytes (root_rp->client_pids));
1188 595 : for (i = 0; i < vec_len (root_rp->client_pids); i++)
1189 : {
1190 594 : if (root_rp->client_pids[i] == mypid)
1191 : {
1192 576 : vec_delete (root_rp->client_pids, 1, i);
1193 576 : goto found;
1194 : }
1195 : }
1196 1 : clib_warning ("pid %d AWOL", mypid);
1197 :
1198 577 : found:
1199 :
1200 577 : if (!is_client && vec_len (root_rp->client_pids) == 0)
1201 559 : svm_region_unlink (root_rp);
1202 :
1203 577 : region_unlock (root_rp);
1204 577 : svm_pop_heap (oldheap);
1205 :
1206 577 : root_rp = 0;
1207 577 : munmap ((void *) virtual_base, virtual_size);
1208 : }
1209 :
1210 : void
1211 560 : svm_region_exit (void)
1212 : {
1213 560 : svm_region_exit_internal (0 /* is_client */ );
1214 560 : }
1215 :
1216 : void
1217 17 : svm_region_exit_client (void)
1218 : {
1219 17 : svm_region_exit_internal (1 /* is_client */ );
1220 17 : }
1221 :
1222 : void
1223 0 : svm_client_scan_this_region_nolock (svm_region_t * rp)
1224 : {
1225 : int j;
1226 0 : int mypid = getpid ();
1227 : void *oldheap;
1228 :
1229 0 : for (j = 0; j < vec_len (rp->client_pids); j++)
1230 : {
1231 0 : if (mypid == rp->client_pids[j])
1232 0 : continue;
1233 0 : if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1234 : {
1235 0 : clib_warning ("%s: cleanup ghost pid %d",
1236 : rp->region_name, rp->client_pids[j]);
1237 : /* nb: client vec in rp->region_heap */
1238 0 : oldheap = svm_push_pvt_heap (rp);
1239 0 : vec_delete (rp->client_pids, 1, j);
1240 0 : j--;
1241 0 : svm_pop_heap (oldheap);
1242 : }
1243 : }
1244 0 : }
1245 :
1246 :
1247 : /*
1248 : * Scan svm regions for dead clients
1249 : */
1250 : void
1251 0 : svm_client_scan (const char *root_path)
1252 : {
1253 : int i, j;
1254 : svm_main_region_t *mp;
1255 0 : svm_map_region_args_t *a = 0;
1256 : svm_region_t *root_rp;
1257 : svm_region_t *rp;
1258 : svm_subregion_t *subp;
1259 0 : u8 *name = 0;
1260 0 : u8 **svm_names = 0;
1261 : void *oldheap;
1262 0 : int mypid = getpid ();
1263 :
1264 0 : vec_validate (a, 0);
1265 :
1266 0 : svm_region_init_chroot (root_path);
1267 :
1268 0 : root_rp = svm_get_root_rp ();
1269 :
1270 0 : pthread_mutex_lock (&root_rp->mutex);
1271 :
1272 0 : mp = root_rp->data_base;
1273 :
1274 0 : for (j = 0; j < vec_len (root_rp->client_pids); j++)
1275 : {
1276 0 : if (mypid == root_rp->client_pids[j])
1277 0 : continue;
1278 0 : if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1279 : {
1280 0 : clib_warning ("%s: cleanup ghost pid %d",
1281 : root_rp->region_name, root_rp->client_pids[j]);
1282 : /* nb: client vec in root_rp->region_heap */
1283 0 : oldheap = svm_push_pvt_heap (root_rp);
1284 0 : vec_delete (root_rp->client_pids, 1, j);
1285 0 : j--;
1286 0 : svm_pop_heap (oldheap);
1287 : }
1288 : }
1289 :
1290 : /*
1291 : * Snapshoot names, can't hold root rp mutex across
1292 : * find_or_create.
1293 : */
1294 : /* *INDENT-OFF* */
1295 0 : pool_foreach (subp, mp->subregions) {
1296 0 : name = vec_dup (subp->subregion_name);
1297 0 : vec_add1(svm_names, name);
1298 : }
1299 : /* *INDENT-ON* */
1300 :
1301 0 : pthread_mutex_unlock (&root_rp->mutex);
1302 :
1303 0 : for (i = 0; i < vec_len (svm_names); i++)
1304 : {
1305 0 : vec_validate (a, 0);
1306 0 : a->root_path = root_path;
1307 0 : a->name = (char *) svm_names[i];
1308 0 : rp = svm_region_find_or_create (a);
1309 0 : if (rp)
1310 : {
1311 0 : pthread_mutex_lock (&rp->mutex);
1312 :
1313 0 : svm_client_scan_this_region_nolock (rp);
1314 :
1315 0 : pthread_mutex_unlock (&rp->mutex);
1316 0 : svm_region_unmap (rp);
1317 0 : vec_free (svm_names[i]);
1318 : }
1319 0 : vec_free (a);
1320 : }
1321 0 : vec_free (svm_names);
1322 :
1323 0 : svm_region_exit ();
1324 :
1325 0 : vec_free (a);
1326 0 : }
1327 :
1328 : /*
1329 : * fd.io coding-style-patch-verification: ON
1330 : *
1331 : * Local Variables:
1332 : * eval: (c-set-style "gnu")
1333 : * End:
1334 : */
|