Line data Source code
1 : /*
2 : * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <svm/fifo_segment.h>
17 : #include <vppinfra/mem.h>
18 :
19 : static inline void *
20 1556 : fsh_alloc_aligned (fifo_segment_header_t *fsh, uword size, uword align)
21 : {
22 : uword cur_pos, cur_pos_align, new_pos;
23 :
24 1556 : cur_pos = clib_atomic_load_relax_n (&fsh->byte_index);
25 1556 : cur_pos_align = round_pow2_u64 (cur_pos, align);
26 1556 : size = round_pow2_u64 (size, align);
27 1556 : new_pos = cur_pos_align + size;
28 :
29 1556 : if (new_pos >= fsh->max_byte_index)
30 3 : return 0;
31 :
32 1553 : while (!clib_atomic_cmp_and_swap_acq_relax (&fsh->byte_index, &cur_pos,
33 : &new_pos, 1 /* weak */))
34 : {
35 0 : cur_pos_align = round_pow2_u64 (cur_pos, align);
36 0 : new_pos = cur_pos_align + size;
37 0 : if (new_pos >= fsh->max_byte_index)
38 0 : return 0;
39 : }
40 1553 : return uword_to_pointer ((u8 *) fsh + cur_pos_align, void *);
41 : }
42 :
43 : static inline void *
44 0 : fsh_alloc (fifo_segment_header_t *fsh, uword size)
45 : {
46 0 : return fsh_alloc_aligned (fsh, size, 8);
47 : }
48 :
49 : static inline fifo_segment_slice_t *
50 443744 : fsh_slice_get (fifo_segment_header_t * fsh, u32 slice_index)
51 : {
52 443744 : return &fsh->slices[slice_index];
53 : }
54 :
55 : static inline fifo_slice_private_t *
56 1798 : fs_slice_private_get (fifo_segment_t *fs, u32 slice_index)
57 : {
58 1798 : ASSERT (slice_index < fs->n_slices);
59 1798 : return &fs->slices[slice_index];
60 : }
61 :
62 : static char *fifo_segment_mem_status_strings[] = {
63 : #define _(sym,str) str,
64 : foreach_segment_mem_status
65 : #undef _
66 : };
67 :
68 : static inline uword
69 3337 : fsh_n_free_bytes (fifo_segment_header_t * fsh)
70 : {
71 3337 : uword cur_pos = clib_atomic_load_relax_n (&fsh->byte_index);
72 3337 : ASSERT (fsh->max_byte_index > cur_pos);
73 3337 : return fsh->max_byte_index - cur_pos;
74 : }
75 :
76 : static inline void
77 213239 : fsh_cached_bytes_add (fifo_segment_header_t * fsh, uword size)
78 : {
79 213239 : clib_atomic_fetch_add_rel (&fsh->n_cached_bytes, size);
80 213239 : }
81 :
82 : static inline void
83 221457 : fsh_cached_bytes_sub (fifo_segment_header_t * fsh, uword size)
84 : {
85 221457 : clib_atomic_fetch_sub_rel (&fsh->n_cached_bytes, size);
86 221457 : }
87 :
88 : static inline uword
89 1902 : fsh_n_cached_bytes (fifo_segment_header_t * fsh)
90 : {
91 1902 : uword n_cached = clib_atomic_load_relax_n (&fsh->n_cached_bytes);
92 1902 : return n_cached;
93 : }
94 :
95 : static inline void
96 1698 : fsh_active_fifos_update (fifo_segment_header_t * fsh, int inc)
97 : {
98 1698 : clib_atomic_fetch_add_rel (&fsh->n_active_fifos, inc);
99 1698 : }
100 :
101 : static inline u32
102 524 : fsh_n_active_fifos (fifo_segment_header_t * fsh)
103 : {
104 524 : return clib_atomic_load_relax_n (&fsh->n_active_fifos);
105 : }
106 :
107 : static inline uword
108 0 : fs_virtual_mem (fifo_segment_t *fs)
109 : {
110 0 : fifo_segment_header_t *fsh = fs->h;
111 : fifo_segment_slice_t *fss;
112 0 : uword total_vm = 0;
113 : int i;
114 :
115 0 : for (i = 0; i < fs->n_slices; i++)
116 : {
117 0 : fss = fsh_slice_get (fsh, i);
118 0 : total_vm += clib_atomic_load_relax_n (&fss->virtual_mem);
119 : }
120 0 : return total_vm;
121 : }
122 :
123 : void
124 8409 : fsh_virtual_mem_update (fifo_segment_header_t * fsh, u32 slice_index,
125 : int n_bytes)
126 : {
127 8409 : fifo_segment_slice_t *fss = fsh_slice_get (fsh, slice_index);
128 8409 : fss->virtual_mem += n_bytes;
129 8409 : }
130 :
131 : static inline int
132 223937 : fss_chunk_fl_index_is_valid (fifo_segment_slice_t *fss, u32 fl_index)
133 : {
134 223937 : return (fl_index < FS_CHUNK_VEC_LEN);
135 : }
136 :
137 : #define FS_CL_HEAD_MASK 0xFFFFFFFFFFFF
138 : #define FS_CL_HEAD_TMASK 0xFFFF000000000000
139 : #define FS_CL_HEAD_TINC (1ULL << 48)
140 :
141 : static svm_fifo_chunk_t *
142 43 : fss_chunk_free_list_head (fifo_segment_header_t *fsh,
143 : fifo_segment_slice_t *fss, u32 fl_index)
144 : {
145 43 : fs_sptr_t headsp = clib_atomic_load_relax_n (&fss->free_chunks[fl_index]);
146 43 : return fs_chunk_ptr (fsh, headsp & FS_CL_HEAD_MASK);
147 : }
148 :
149 : static void
150 220197 : fss_chunk_free_list_push (fifo_segment_header_t *fsh,
151 : fifo_segment_slice_t *fss, u32 fl_index,
152 : svm_fifo_chunk_t *c)
153 : {
154 : fs_sptr_t old_head, new_head, csp;
155 :
156 220197 : csp = fs_chunk_sptr (fsh, c);
157 220197 : ASSERT (csp <= FS_CL_HEAD_MASK);
158 220197 : old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
159 :
160 : do
161 : {
162 220412 : c->next = old_head & FS_CL_HEAD_MASK;
163 220412 : new_head = csp + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
164 : }
165 220412 : while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
166 : &new_head, 0 /* weak */, __ATOMIC_RELEASE,
167 220412 : __ATOMIC_ACQUIRE));
168 220197 : }
169 :
170 : static void
171 985 : fss_chunk_free_list_push_list (fifo_segment_header_t *fsh,
172 : fifo_segment_slice_t *fss, u32 fl_index,
173 : svm_fifo_chunk_t *head, svm_fifo_chunk_t *tail)
174 : {
175 : fs_sptr_t old_head, new_head, headsp;
176 :
177 985 : headsp = fs_chunk_sptr (fsh, head);
178 985 : ASSERT (headsp <= FS_CL_HEAD_MASK);
179 985 : old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
180 :
181 : do
182 : {
183 985 : tail->next = old_head & FS_CL_HEAD_MASK;
184 985 : new_head = headsp + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
185 : }
186 985 : while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
187 : &new_head, 0 /* weak */, __ATOMIC_RELEASE,
188 985 : __ATOMIC_ACQUIRE));
189 985 : }
190 :
191 : static svm_fifo_chunk_t *
192 222942 : fss_chunk_free_list_pop (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss,
193 : u32 fl_index)
194 : {
195 : fs_sptr_t old_head, new_head;
196 : svm_fifo_chunk_t *c;
197 :
198 222942 : ASSERT (fss_chunk_fl_index_is_valid (fss, fl_index));
199 :
200 222942 : old_head = clib_atomic_load_acq_n (&fss->free_chunks[fl_index]);
201 :
202 : /* Lock-free stacks are affected by ABA if a side allocates a chunk and
203 : * shortly thereafter frees it. To circumvent that, reuse the upper bits
204 : * of the head of the list shared pointer, i.e., offset to where the chunk
205 : * is, as a tag. The tag is incremented with each push/pop operation and
206 : * therefore collisions can only happen if an element is popped and pushed
207 : * exactly after a complete wrap of the tag (16 bits). It's unlikely either
208 : * of the sides will be descheduled for that long */
209 : do
210 : {
211 223269 : if (!(old_head & FS_CL_HEAD_MASK))
212 1375 : return 0;
213 221894 : c = fs_chunk_ptr (fsh, old_head & FS_CL_HEAD_MASK);
214 221894 : new_head = c->next + ((old_head + FS_CL_HEAD_TINC) & FS_CL_HEAD_TMASK);
215 : }
216 221894 : while (!__atomic_compare_exchange (&fss->free_chunks[fl_index], &old_head,
217 : &new_head, 0 /* weak */, __ATOMIC_RELEASE,
218 221894 : __ATOMIC_ACQUIRE));
219 :
220 221567 : return c;
221 : }
222 :
223 : static void
224 707 : fss_fifo_free_list_push (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss,
225 : svm_fifo_shared_t *sf)
226 : {
227 707 : sf->next = fss->free_fifos;
228 707 : fss->free_fifos = fs_sptr (fsh, sf);
229 707 : }
230 :
231 : static void
232 278 : fss_fifo_free_list_push_list (fifo_segment_header_t *fsh,
233 : fifo_segment_slice_t *fss,
234 : svm_fifo_shared_t *head, svm_fifo_shared_t *tail)
235 : {
236 278 : tail->next = fss->free_fifos;
237 278 : fss->free_fifos = fs_sptr (fsh, head);
238 278 : }
239 :
240 : svm_fifo_shared_t *
241 995 : fss_fifo_free_list_pop (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss)
242 : {
243 : svm_fifo_shared_t *sf;
244 995 : sf = fs_ptr (fsh, fss->free_fifos);
245 995 : fss->free_fifos = sf->next;
246 995 : return sf;
247 : }
248 :
249 : static inline void
250 569 : pfss_fifo_add_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f)
251 : {
252 569 : if (pfss->active_fifos)
253 : {
254 406 : pfss->active_fifos->prev = f;
255 406 : f->next = pfss->active_fifos;
256 : }
257 569 : pfss->active_fifos = f;
258 569 : }
259 :
260 : static inline void
261 425 : pfss_fifo_del_active_list (fifo_slice_private_t *pfss, svm_fifo_t *f)
262 : {
263 425 : if (f->flags & SVM_FIFO_F_LL_TRACKED)
264 : {
265 425 : if (f->prev)
266 258 : f->prev->next = f->next;
267 : else
268 167 : pfss->active_fifos = f->next;
269 425 : if (f->next)
270 72 : f->next->prev = f->prev;
271 : }
272 425 : }
273 :
274 : static inline uword
275 1054 : fss_fl_chunk_bytes (fifo_segment_slice_t * fss)
276 : {
277 1054 : return clib_atomic_load_relax_n (&fss->n_fl_chunk_bytes);
278 : }
279 :
280 : static inline void
281 213239 : fss_fl_chunk_bytes_add (fifo_segment_slice_t * fss, uword size)
282 : {
283 213239 : clib_atomic_fetch_add_relax (&fss->n_fl_chunk_bytes, size);
284 213239 : }
285 :
286 : static inline void
287 221457 : fss_fl_chunk_bytes_sub (fifo_segment_slice_t * fss, uword size)
288 : {
289 221457 : clib_atomic_fetch_sub_relax (&fss->n_fl_chunk_bytes, size);
290 221457 : }
291 :
292 : /**
293 : * Initialize fifo segment shared header
294 : */
295 : int
296 368 : fifo_segment_init (fifo_segment_t * fs)
297 : {
298 368 : u32 align = 8, offset = FIFO_SEGMENT_ALLOC_OVERHEAD, slices_sz, i;
299 : uword max_fifo, seg_start, seg_sz;
300 : fifo_segment_header_t *fsh;
301 : ssvm_shared_header_t *sh;
302 : void *seg_data;
303 :
304 : /* TODO remove ssvm heap entirely */
305 368 : sh = fs->ssvm.sh;
306 :
307 368 : seg_data = (u8 *) sh + offset;
308 368 : seg_sz = sh->ssvm_size - offset;
309 :
310 368 : fs->n_slices = clib_max (fs->n_slices, 1);
311 368 : slices_sz = sizeof (fifo_segment_slice_t) * fs->n_slices;
312 :
313 736 : seg_start = round_pow2_u64 (pointer_to_uword (seg_data), align);
314 368 : fsh = uword_to_pointer (seg_start, void *);
315 368 : clib_mem_unpoison (fsh, seg_sz);
316 368 : memset (fsh, 0, sizeof (*fsh) + slices_sz);
317 :
318 368 : fsh->byte_index = sizeof (*fsh) + slices_sz;
319 368 : fsh->max_byte_index = seg_sz;
320 368 : fsh->n_slices = fs->n_slices;
321 368 : max_fifo = clib_min ((seg_sz - slices_sz) / 2, FIFO_SEGMENT_MAX_FIFO_SIZE);
322 368 : fsh->max_log2_fifo_size = min_log2 (max_fifo);
323 368 : fsh->n_cached_bytes = 0;
324 368 : fsh->n_reserved_bytes = fsh->byte_index;
325 368 : fsh->start_byte_index = fsh->byte_index;
326 368 : ASSERT (fsh->max_byte_index <= sh->ssvm_size - offset);
327 :
328 368 : fs->max_byte_index = fsh->max_byte_index;
329 368 : fs->h = fsh;
330 368 : sh->opaque[0] = (void *) ((u8 *) fsh - (u8 *) fs->ssvm.sh);
331 :
332 : /* Allow random offsets */
333 368 : fs->ssvm.sh->ssvm_va = 0;
334 :
335 368 : vec_validate (fs->slices, fs->n_slices - 1);
336 807 : for (i = 0; i < fs->n_slices; i++)
337 439 : fs->slices[i].fifos =
338 439 : clib_mem_bulk_init (sizeof (svm_fifo_t), CLIB_CACHE_LINE_BYTES, 32);
339 :
340 368 : sh->ready = 1;
341 368 : return (0);
342 : }
343 :
344 : /**
345 : * Create a fifo segment and initialize as master
346 : */
347 : int
348 19 : fifo_segment_create (fifo_segment_main_t * sm, fifo_segment_create_args_t * a)
349 : {
350 : fifo_segment_t *fs;
351 : uword baseva;
352 : int rv;
353 :
354 : /* Allocate a fresh segment */
355 19 : pool_get_zero (sm->segments, fs);
356 :
357 19 : baseva = a->segment_type == SSVM_SEGMENT_PRIVATE ? ~0ULL : sm->next_baseva;
358 19 : fs->ssvm.ssvm_size = a->segment_size;
359 19 : fs->ssvm.is_server = 1;
360 19 : fs->ssvm.my_pid = getpid ();
361 19 : fs->ssvm.name = format (0, "%s%c", a->segment_name, 0);
362 19 : fs->ssvm.requested_va = baseva;
363 :
364 19 : if ((rv = ssvm_server_init (&fs->ssvm, a->segment_type)))
365 : {
366 0 : pool_put (sm->segments, fs);
367 0 : return (rv);
368 : }
369 :
370 : /* Note: requested_va updated due to seg base addr randomization */
371 19 : sm->next_baseva = fs->ssvm.sh->ssvm_va + fs->ssvm.ssvm_size;
372 :
373 19 : fifo_segment_init (fs);
374 19 : vec_add1 (a->new_segment_indices, fs - sm->segments);
375 19 : return (0);
376 : }
377 :
378 : /**
379 : * Attach as slave to a fifo segment
380 : */
381 : int
382 121 : fifo_segment_attach (fifo_segment_main_t * sm, fifo_segment_create_args_t * a)
383 : {
384 : fifo_segment_header_t *fsh;
385 : fifo_segment_t *fs;
386 : int rv;
387 :
388 121 : pool_get_zero (sm->segments, fs);
389 :
390 121 : fs->fs_index = fs - sm->segments;
391 121 : fs->sm_index = ~0;
392 121 : fs->ssvm.ssvm_size = a->segment_size;
393 121 : fs->ssvm.my_pid = getpid ();
394 121 : fs->ssvm.name = format (0, "%s%c", a->segment_name, 0);
395 121 : fs->ssvm.requested_va = 0;
396 121 : if (a->segment_type == SSVM_SEGMENT_MEMFD)
397 121 : fs->ssvm.fd = a->memfd_fd;
398 : else
399 0 : fs->ssvm.attach_timeout = sm->timeout_in_seconds;
400 :
401 121 : if ((rv = ssvm_client_init (&fs->ssvm, a->segment_type)))
402 : {
403 0 : pool_put (sm->segments, fs);
404 0 : return (rv);
405 : }
406 :
407 : /* Probably a segment without fifos */
408 121 : if (!fs->ssvm.sh->opaque[0])
409 0 : goto done;
410 :
411 121 : fsh = fs->h = (void *) fs->ssvm.sh + (uword) fs->ssvm.sh->opaque[0];
412 121 : fs->max_byte_index = fsh->max_byte_index;
413 121 : vec_validate (fs->slices, 0);
414 121 : fs->slices[0].fifos =
415 121 : clib_mem_bulk_init (sizeof (svm_fifo_t), CLIB_CACHE_LINE_BYTES, 32);
416 :
417 121 : done:
418 121 : vec_add1 (a->new_segment_indices, fs - sm->segments);
419 121 : return (0);
420 : }
421 :
422 : void
423 23 : fifo_segment_delete (fifo_segment_main_t * sm, fifo_segment_t * s)
424 : {
425 23 : fifo_segment_cleanup (s);
426 23 : ssvm_delete (&s->ssvm);
427 23 : clib_memset (s, 0xfe, sizeof (*s));
428 23 : pool_put (sm->segments, s);
429 23 : }
430 :
431 : u32
432 0 : fifo_segment_index (fifo_segment_main_t * sm, fifo_segment_t * s)
433 : {
434 0 : return s - sm->segments;
435 : }
436 :
437 : fifo_segment_t *
438 360 : fifo_segment_get_segment (fifo_segment_main_t * sm, u32 segment_index)
439 : {
440 360 : return pool_elt_at_index (sm->segments, segment_index);
441 : }
442 :
443 : fifo_segment_t *
444 29 : fifo_segment_get_segment_if_valid (fifo_segment_main_t *sm, u32 segment_index)
445 : {
446 29 : if (pool_is_free_index (sm->segments, segment_index))
447 0 : return 0;
448 29 : return pool_elt_at_index (sm->segments, segment_index);
449 : }
450 :
451 : void
452 0 : fifo_segment_info (fifo_segment_t * seg, char **address, size_t * size)
453 : {
454 0 : *address = (char *) seg->ssvm.sh->ssvm_va;
455 0 : *size = seg->ssvm.ssvm_size;
456 0 : }
457 :
458 : void
459 42 : fifo_segment_main_init (fifo_segment_main_t * sm, u64 baseva,
460 : u32 timeout_in_seconds)
461 : {
462 42 : sm->next_baseva = baseva;
463 42 : sm->timeout_in_seconds = timeout_in_seconds;
464 42 : }
465 :
466 : static inline u32
467 443337 : fs_freelist_for_size (u32 size)
468 : {
469 443337 : if (PREDICT_FALSE (size < FIFO_SEGMENT_MIN_FIFO_SIZE))
470 5307 : return 0;
471 438030 : return clib_min (max_log2 (size) - FIFO_SEGMENT_MIN_LOG2_FIFO_SIZE,
472 : FS_CHUNK_VEC_LEN - 1);
473 : }
474 :
475 : static inline u32
476 665443 : fs_freelist_index_to_size (u32 fl_index)
477 : {
478 665443 : return 1 << (fl_index + FIFO_SEGMENT_MIN_LOG2_FIFO_SIZE);
479 : }
480 :
481 : static inline int
482 154 : fs_chunk_size_is_valid (fifo_segment_header_t * fsh, u32 size)
483 : {
484 : /*
485 : * 4K minimum. It's not likely that anything good will happen
486 : * with a smaller FIFO.
487 : */
488 308 : return size >= FIFO_SEGMENT_MIN_FIFO_SIZE &&
489 154 : size <= (1ULL << fsh->max_log2_fifo_size);
490 : }
491 :
492 : svm_fifo_chunk_t *
493 6 : fs_try_alloc_multi_chunk (fifo_segment_header_t * fsh,
494 : fifo_segment_slice_t * fss, u32 data_bytes)
495 : {
496 6 : u32 fl_index, fl_size, n_alloc = 0, req_bytes = data_bytes;
497 6 : svm_fifo_chunk_t *c, *first = 0, *next;
498 :
499 6 : fl_index = fs_freelist_for_size (req_bytes);
500 6 : if (fl_index > 0)
501 6 : fl_index -= 1;
502 :
503 6 : fl_size = fs_freelist_index_to_size (fl_index);
504 :
505 146 : while (req_bytes)
506 : {
507 140 : c = fss_chunk_free_list_pop (fsh, fss, fl_index);
508 140 : if (c)
509 : {
510 116 : c->next = fs_chunk_sptr (fsh, first);
511 116 : first = c;
512 116 : n_alloc += fl_size;
513 116 : req_bytes -= clib_min (fl_size, req_bytes);
514 : }
515 : else
516 : {
517 : /* Failed to allocate with smaller chunks */
518 24 : if (fl_index == 0)
519 : {
520 : /* Free all chunks if any allocated */
521 0 : c = first;
522 0 : while (c)
523 : {
524 0 : fl_index = fs_freelist_for_size (c->length);
525 0 : next = fs_chunk_ptr (fsh, c->next);
526 0 : fss_chunk_free_list_push (fsh, fss, fl_index, c);
527 0 : c = next;
528 : }
529 0 : n_alloc = 0;
530 0 : first = 0;
531 : /* As last attempt, try allocating a chunk larger than
532 : * the requested size, if possible */
533 0 : fl_index = fs_freelist_for_size (data_bytes) + 1;
534 0 : if (!fss_chunk_fl_index_is_valid (fss, fl_index))
535 0 : return 0;
536 0 : first = fss_chunk_free_list_pop (fsh, fss, fl_index);
537 0 : if (first)
538 : {
539 0 : first->next = 0;
540 0 : n_alloc = fs_freelist_index_to_size (fl_index);
541 0 : goto done;
542 : }
543 0 : return 0;
544 : }
545 24 : fl_index -= 1;
546 24 : fl_size = fl_size >> 1;
547 : }
548 : }
549 :
550 6 : done:
551 6 : fss_fl_chunk_bytes_sub (fss, n_alloc);
552 6 : fsh_cached_bytes_sub (fsh, n_alloc);
553 6 : return first;
554 : }
555 :
556 : static int
557 279 : fsh_try_alloc_fifo_hdr_batch (fifo_segment_header_t * fsh,
558 : fifo_segment_slice_t * fss, u32 batch_size)
559 : {
560 279 : svm_fifo_shared_t *f, *head = 0, *tail;
561 : uword size;
562 : u8 *fmem;
563 : int i;
564 :
565 279 : ASSERT (batch_size != 0);
566 :
567 279 : size = (uword) sizeof (*f) * batch_size;
568 :
569 279 : fmem = fsh_alloc_aligned (fsh, size, CLIB_CACHE_LINE_BYTES);
570 279 : if (fmem == 0)
571 1 : return -1;
572 :
573 : /* Carve fifo hdr space */
574 278 : tail = f = (svm_fifo_shared_t *) fmem;
575 5296 : for (i = 0; i < batch_size; i++)
576 : {
577 5018 : clib_memset (f, 0, sizeof (*f));
578 5018 : f->next = fs_sptr (fsh, head);
579 5018 : head = f;
580 5018 : fmem += sizeof (*f);
581 5018 : f = (svm_fifo_shared_t *) fmem;
582 : }
583 :
584 278 : fss_fifo_free_list_push_list (fsh, fss, head, tail);
585 :
586 278 : return 0;
587 : }
588 :
589 : static int
590 987 : fsh_try_alloc_chunk_batch (fifo_segment_header_t * fsh,
591 : fifo_segment_slice_t * fss,
592 : u32 fl_index, u32 batch_size)
593 : {
594 987 : svm_fifo_chunk_t *c, *head = 0, *tail;
595 : uword size, total_chunk_bytes;
596 : u32 rounded_data_size;
597 : u8 *cmem;
598 : int i;
599 :
600 987 : ASSERT (batch_size != 0);
601 :
602 987 : rounded_data_size = fs_freelist_index_to_size (fl_index);
603 987 : total_chunk_bytes = (uword) batch_size *rounded_data_size;
604 987 : size = (uword) (sizeof (*c) + rounded_data_size) * batch_size;
605 :
606 987 : cmem = fsh_alloc_aligned (fsh, size, 8 /* chunk hdr is 24B */);
607 987 : if (cmem == 0)
608 2 : return -1;
609 :
610 : /* Carve fifo + chunk space */
611 985 : tail = c = (svm_fifo_chunk_t *) cmem;
612 14056 : for (i = 0; i < batch_size; i++)
613 : {
614 13071 : c->start_byte = 0;
615 13071 : c->length = rounded_data_size;
616 13071 : c->next = fs_chunk_sptr (fsh, head);
617 13071 : head = c;
618 13071 : cmem += sizeof (*c) + rounded_data_size;
619 13071 : c = (svm_fifo_chunk_t *) cmem;
620 : }
621 :
622 985 : fss_chunk_free_list_push_list (fsh, fss, fl_index, head, tail);
623 985 : fss->num_chunks[fl_index] += batch_size;
624 985 : fss_fl_chunk_bytes_add (fss, total_chunk_bytes);
625 985 : fsh_cached_bytes_add (fsh, total_chunk_bytes);
626 :
627 985 : return 0;
628 : }
629 :
630 : static int
631 154 : fs_try_alloc_fifo_batch (fifo_segment_header_t * fsh,
632 : fifo_segment_slice_t * fss,
633 : u32 fl_index, u32 batch_size)
634 : {
635 154 : if (fsh_try_alloc_fifo_hdr_batch (fsh, fss, batch_size))
636 0 : return 0;
637 154 : return fsh_try_alloc_chunk_batch (fsh, fss, fl_index, batch_size);
638 : }
639 :
640 : static svm_fifo_shared_t *
641 995 : fsh_try_alloc_fifo_hdr (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss)
642 : {
643 : svm_fifo_shared_t *sf;
644 :
645 995 : if (!fss->free_fifos)
646 : {
647 122 : if (fsh_try_alloc_fifo_hdr_batch (fsh, fss,
648 : FIFO_SEGMENT_ALLOC_BATCH_SIZE))
649 0 : return 0;
650 : }
651 :
652 995 : sf = fss_fifo_free_list_pop (fsh, fss);
653 995 : clib_memset (sf, 0, sizeof (*sf));
654 :
655 995 : return sf;
656 : }
657 :
658 : static svm_fifo_chunk_t *
659 221975 : fsh_try_alloc_chunk (fifo_segment_header_t * fsh,
660 : fifo_segment_slice_t * fss, u32 data_bytes)
661 : {
662 : svm_fifo_chunk_t *c;
663 : u32 fl_index;
664 :
665 221975 : fl_index = fs_freelist_for_size (data_bytes);
666 :
667 222802 : free_list:
668 222802 : c = fss_chunk_free_list_pop (fsh, fss, fl_index);
669 222802 : if (c)
670 : {
671 221451 : c->next = 0;
672 221451 : fss_fl_chunk_bytes_sub (fss, fs_freelist_index_to_size (fl_index));
673 221451 : fsh_cached_bytes_sub (fsh, fs_freelist_index_to_size (fl_index));
674 : }
675 : else
676 : {
677 1351 : u32 chunk_size, batch = FIFO_SEGMENT_ALLOC_BATCH_SIZE;
678 : uword n_free;
679 :
680 1351 : chunk_size = fs_freelist_index_to_size (fl_index);
681 1351 : n_free = fsh_n_free_bytes (fsh);
682 :
683 1351 : if (chunk_size <= n_free)
684 : {
685 827 : batch = chunk_size * batch <= n_free ? batch : 1;
686 827 : if (!fsh_try_alloc_chunk_batch (fsh, fss, fl_index, batch))
687 827 : goto free_list;
688 : }
689 : /* Failed to allocate larger chunk, try to allocate multi-chunk
690 : * that is close to what was actually requested */
691 524 : if (data_bytes <= fss_fl_chunk_bytes (fss))
692 : {
693 6 : c = fs_try_alloc_multi_chunk (fsh, fss, data_bytes);
694 6 : if (c)
695 6 : goto done;
696 0 : batch = n_free / FIFO_SEGMENT_MIN_FIFO_SIZE;
697 0 : if (!batch || fsh_try_alloc_chunk_batch (fsh, fss, 0, batch))
698 0 : goto done;
699 : }
700 518 : if (data_bytes <= fss_fl_chunk_bytes (fss) + n_free)
701 : {
702 0 : u32 min_size = FIFO_SEGMENT_MIN_FIFO_SIZE;
703 0 : if (n_free < min_size)
704 0 : goto done;
705 0 : batch = (data_bytes - fss_fl_chunk_bytes (fss)) / min_size;
706 0 : batch = clib_min (batch + 1, n_free / min_size);
707 0 : if (fsh_try_alloc_chunk_batch (fsh, fss, 0, batch))
708 0 : goto done;
709 0 : c = fs_try_alloc_multi_chunk (fsh, fss, data_bytes);
710 : }
711 : }
712 :
713 518 : done:
714 :
715 221975 : return c;
716 : }
717 :
718 : /**
719 : * Try to allocate new fifo
720 : *
721 : * Tries the following steps in order:
722 : * - grab fifo and chunk from freelists
723 : * - batch fifo and chunk allocation
724 : * - single fifo allocation
725 : * - grab multiple fifo chunks from freelists
726 : */
727 : static svm_fifo_shared_t *
728 995 : fs_try_alloc_fifo (fifo_segment_header_t *fsh, u32 slice_index, u32 data_bytes)
729 : {
730 : fifo_segment_slice_t *fss;
731 : u32 fl_index, min_size;
732 : svm_fifo_chunk_t *c;
733 995 : svm_fifo_shared_t *sf = 0;
734 :
735 995 : fss = fsh_slice_get (fsh, slice_index);
736 995 : min_size = clib_max ((fsh->pct_first_alloc * data_bytes) / 100, 4096);
737 995 : fl_index = fs_freelist_for_size (min_size);
738 :
739 995 : if (!fss_chunk_fl_index_is_valid (fss, fl_index))
740 0 : return 0;
741 :
742 995 : sf = fsh_try_alloc_fifo_hdr (fsh, fss);
743 995 : if (!sf)
744 0 : return 0;
745 :
746 995 : c = fsh_try_alloc_chunk (fsh, fss, min_size);
747 995 : if (!c)
748 : {
749 2 : fss_fifo_free_list_push (fsh, fss, sf);
750 2 : return 0;
751 : }
752 :
753 993 : sf->start_chunk = fs_chunk_sptr (fsh, c);
754 1073 : while (c->next)
755 80 : c = fs_chunk_ptr (fsh, c->next);
756 993 : sf->end_chunk = fs_chunk_sptr (fsh, c);
757 993 : sf->size = data_bytes;
758 993 : sf->slice_index = slice_index;
759 :
760 993 : return sf;
761 : }
762 :
763 : svm_fifo_chunk_t *
764 220968 : fsh_alloc_chunk (fifo_segment_header_t * fsh, u32 slice_index, u32 chunk_size)
765 : {
766 : fifo_segment_slice_t *fss;
767 : svm_fifo_chunk_t *c;
768 :
769 220968 : fss = fsh_slice_get (fsh, slice_index);
770 220968 : c = fsh_try_alloc_chunk (fsh, fss, chunk_size);
771 :
772 220968 : return c;
773 : }
774 :
775 : static void
776 212254 : fsh_slice_collect_chunks (fifo_segment_header_t * fsh,
777 : fifo_segment_slice_t * fss, svm_fifo_chunk_t * c)
778 : {
779 212254 : u32 n_collect = 0, fl_index;
780 : svm_fifo_chunk_t *next;
781 :
782 432451 : while (c)
783 : {
784 220197 : clib_mem_unpoison (c, sizeof (*c));
785 220197 : next = fs_chunk_ptr (fsh, c->next);
786 220197 : fl_index = fs_freelist_for_size (c->length);
787 220197 : fss_chunk_free_list_push (fsh, fss, fl_index, c);
788 220197 : n_collect += fs_freelist_index_to_size (fl_index);
789 220197 : c = next;
790 : }
791 :
792 212254 : fss_fl_chunk_bytes_add (fss, n_collect);
793 212254 : fsh_cached_bytes_add (fsh, n_collect);
794 212254 : }
795 :
796 : void
797 211549 : fsh_collect_chunks (fifo_segment_header_t * fsh, u32 slice_index,
798 : svm_fifo_chunk_t * c)
799 : {
800 : fifo_segment_slice_t *fss;
801 211549 : fss = fsh_slice_get (fsh, slice_index);
802 211549 : fsh_slice_collect_chunks (fsh, fss, c);
803 211549 : }
804 :
805 : svm_fifo_t *
806 1207 : fs_fifo_alloc (fifo_segment_t *fs, u32 slice_index)
807 : {
808 1207 : fifo_slice_private_t *pfss = &fs->slices[slice_index];
809 : svm_fifo_t *f;
810 :
811 1207 : f = clib_mem_bulk_alloc (pfss->fifos);
812 1207 : clib_memset (f, 0, sizeof (*f));
813 1207 : return f;
814 : }
815 :
816 : void
817 763 : fs_fifo_free (fifo_segment_t *fs, svm_fifo_t *f, u32 slice_index)
818 : {
819 : fifo_slice_private_t *pfss;
820 :
821 : if (CLIB_DEBUG)
822 763 : clib_memset (f, 0xfc, sizeof (*f));
823 :
824 763 : pfss = &fs->slices[slice_index];
825 763 : clib_mem_bulk_free (pfss->fifos, f);
826 763 : }
827 :
828 : void
829 141 : fifo_segment_cleanup (fifo_segment_t *fs)
830 : {
831 : int slice_index;
832 141 : svm_msg_q_t *mq = 0;
833 :
834 286 : for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
835 145 : clib_mem_bulk_destroy (fs->slices[slice_index].fifos);
836 :
837 141 : vec_free (fs->slices);
838 :
839 218 : vec_foreach (mq, fs->mqs)
840 77 : svm_msg_q_cleanup (mq);
841 :
842 141 : vec_free (fs->mqs);
843 141 : }
844 :
845 : /**
846 : * Allocate fifo in fifo segment
847 : */
848 : svm_fifo_t *
849 997 : fifo_segment_alloc_fifo_w_slice (fifo_segment_t * fs, u32 slice_index,
850 : u32 data_bytes, fifo_segment_ftype_t ftype)
851 : {
852 997 : fifo_segment_header_t *fsh = fs->h;
853 : fifo_slice_private_t *pfss;
854 : fifo_segment_slice_t *fss;
855 : svm_fifo_shared_t *sf;
856 997 : svm_fifo_t *f = 0;
857 :
858 997 : ASSERT (slice_index < fs->n_slices);
859 :
860 997 : if (PREDICT_FALSE (data_bytes > 1 << fsh->max_log2_fifo_size))
861 2 : return 0;
862 :
863 995 : sf = fs_try_alloc_fifo (fsh, slice_index, data_bytes);
864 995 : if (!sf)
865 2 : goto done;
866 :
867 993 : f = fs_fifo_alloc (fs, slice_index);
868 993 : f->fs_hdr = fsh;
869 993 : f->shr = sf;
870 :
871 993 : svm_fifo_init (f, data_bytes);
872 :
873 993 : f->segment_manager = fs->sm_index;
874 993 : f->segment_index = fs->fs_index;
875 :
876 993 : fss = fsh_slice_get (fsh, slice_index);
877 993 : pfss = fs_slice_private_get (fs, slice_index);
878 :
879 : /* If rx fifo type add to active fifos list. When cleaning up segment,
880 : * we need a list of active sessions that should be disconnected. Since
881 : * both rx and tx fifos keep pointers to the session, it's enough to track
882 : * only one. */
883 993 : if (ftype == FIFO_SEGMENT_RX_FIFO)
884 : {
885 569 : pfss_fifo_add_active_list (pfss, f);
886 569 : f->flags |= SVM_FIFO_F_LL_TRACKED;
887 : }
888 :
889 993 : fsh_active_fifos_update (fsh, 1);
890 993 : fss->virtual_mem += svm_fifo_size (f);
891 :
892 995 : done:
893 995 : return (f);
894 : }
895 :
896 : svm_fifo_t *
897 214 : fifo_segment_alloc_fifo_w_offset (fifo_segment_t *fs, uword offset)
898 : {
899 214 : svm_fifo_t *f = fs_fifo_alloc (fs, 0);
900 : svm_fifo_shared_t *sf;
901 :
902 214 : sf = (svm_fifo_shared_t *) ((u8 *) fs->h + offset);
903 214 : f->fs_hdr = fs->h;
904 214 : f->shr = sf;
905 :
906 214 : f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
907 214 : f->segment_index = SVM_FIFO_INVALID_INDEX;
908 214 : f->refcnt = 1;
909 214 : return f;
910 : }
911 :
912 : svm_fifo_t *
913 0 : fifo_segment_duplicate_fifo (fifo_segment_t *fs, svm_fifo_t *f)
914 : {
915 0 : svm_fifo_t *nf = fs_fifo_alloc (fs, 0);
916 0 : clib_memcpy (nf, f, sizeof (*f));
917 0 : return nf;
918 : }
919 :
920 : /**
921 : * Free fifo allocated in fifo segment
922 : */
923 : void
924 729 : fifo_segment_free_fifo (fifo_segment_t * fs, svm_fifo_t * f)
925 : {
926 729 : fifo_segment_header_t *fsh = fs->h;
927 : fifo_slice_private_t *pfss;
928 : fifo_segment_slice_t *fss;
929 : svm_fifo_shared_t *sf;
930 :
931 729 : ASSERT (f->refcnt > 0);
932 :
933 729 : if (--f->refcnt > 0)
934 24 : return;
935 :
936 : /*
937 : * Cleanup shared state
938 : */
939 :
940 705 : sf = f->shr;
941 705 : fss = fsh_slice_get (fsh, sf->slice_index);
942 705 : pfss = fs_slice_private_get (fs, sf->slice_index);
943 :
944 : /* Free fifo chunks */
945 705 : fsh_slice_collect_chunks (fsh, fss, fs_chunk_ptr (fsh, f->shr->start_chunk));
946 :
947 705 : sf->start_chunk = sf->end_chunk = 0;
948 705 : sf->head_chunk = sf->tail_chunk = 0;
949 :
950 : /* Add to free list */
951 705 : fss_fifo_free_list_push (fsh, fss, sf);
952 :
953 705 : fss->virtual_mem -= svm_fifo_size (f);
954 :
955 : /*
956 : * Cleanup private state
957 : */
958 :
959 : /* Remove from active list. Only rx fifos are tracked */
960 705 : if (f->flags & SVM_FIFO_F_LL_TRACKED)
961 : {
962 425 : pfss_fifo_del_active_list (pfss, f);
963 425 : f->flags &= ~SVM_FIFO_F_LL_TRACKED;
964 : }
965 :
966 705 : svm_fifo_free_chunk_lookup (f);
967 705 : svm_fifo_free_ooo_data (f);
968 :
969 : if (CLIB_DEBUG)
970 : {
971 705 : sf->master_session_index = ~0;
972 705 : f->master_thread_index = ~0;
973 : }
974 :
975 705 : f->ooo_enq = f->ooo_deq = 0;
976 705 : f->prev = 0;
977 :
978 705 : fs_fifo_free (fs, f, f->shr->slice_index);
979 :
980 705 : fsh_active_fifos_update (fsh, -1);
981 : }
982 :
983 : void
984 58 : fifo_segment_free_client_fifo (fifo_segment_t *fs, svm_fifo_t *f)
985 : {
986 58 : fs_fifo_free (fs, f, 0 /* clients attach fifos in slice 0 */);
987 58 : }
988 :
989 : void
990 0 : fifo_segment_detach_fifo (fifo_segment_t *fs, svm_fifo_t **f)
991 : {
992 : fifo_slice_private_t *pfss;
993 : fifo_segment_slice_t *fss;
994 0 : svm_fifo_t *of = *f;
995 : u32 slice_index;
996 :
997 0 : slice_index = of->master_thread_index;
998 0 : fss = fsh_slice_get (fs->h, slice_index);
999 0 : pfss = fs_slice_private_get (fs, slice_index);
1000 0 : fss->virtual_mem -= svm_fifo_size (of);
1001 0 : if (of->flags & SVM_FIFO_F_LL_TRACKED)
1002 0 : pfss_fifo_del_active_list (pfss, of);
1003 :
1004 : /* Collect chunks that were provided in return for those detached */
1005 0 : fsh_slice_collect_chunks (fs->h, fss, of->chunks_at_attach);
1006 0 : of->chunks_at_attach = 0;
1007 :
1008 : /* Collect hdr that was provided in return for the detached */
1009 0 : fss_fifo_free_list_push (fs->h, fss, of->hdr_at_attach);
1010 0 : of->hdr_at_attach = 0;
1011 :
1012 0 : clib_mem_bulk_free (pfss->fifos, *f);
1013 0 : *f = 0;
1014 0 : }
1015 :
1016 : void
1017 0 : fifo_segment_attach_fifo (fifo_segment_t *fs, svm_fifo_t **f, u32 slice_index)
1018 : {
1019 0 : svm_fifo_chunk_t *c, *nc, *pc = 0;
1020 : fifo_slice_private_t *pfss;
1021 : fifo_segment_slice_t *fss;
1022 : svm_fifo_t *nf, *of;
1023 :
1024 0 : nf = fs_fifo_alloc (fs, slice_index);
1025 0 : clib_memcpy_fast (nf, *f, sizeof (*nf));
1026 :
1027 0 : fss = fsh_slice_get (fs->h, slice_index);
1028 0 : pfss = fs_slice_private_get (fs, slice_index);
1029 0 : fss->virtual_mem += svm_fifo_size (nf);
1030 0 : nf->next = nf->prev = 0;
1031 0 : if (nf->flags & SVM_FIFO_F_LL_TRACKED)
1032 0 : pfss_fifo_add_active_list (pfss, nf);
1033 :
1034 : /* Allocate shared hdr and chunks to be collected at detach in return
1035 : * for those that are being attached now */
1036 0 : of = *f;
1037 0 : of->hdr_at_attach = fsh_try_alloc_fifo_hdr (fs->h, fss);
1038 :
1039 0 : c = fs_chunk_ptr (fs->h, nf->shr->start_chunk);
1040 0 : of->chunks_at_attach = pc = fsh_try_alloc_chunk (fs->h, fss, c->length);
1041 :
1042 0 : while ((c = fs_chunk_ptr (fs->h, c->next)))
1043 : {
1044 0 : nc = fsh_try_alloc_chunk (fs->h, fss, c->length);
1045 0 : pc->next = fs_chunk_sptr (fs->h, nc);
1046 0 : pc = nc;
1047 : }
1048 :
1049 0 : nf->shr->slice_index = slice_index;
1050 0 : *f = nf;
1051 0 : }
1052 :
1053 : uword
1054 226 : fifo_segment_fifo_offset (svm_fifo_t *f)
1055 : {
1056 226 : return (u8 *) f->shr - (u8 *) f->fs_hdr;
1057 : }
1058 :
1059 : svm_fifo_chunk_t *
1060 12 : fifo_segment_alloc_chunk_w_slice (fifo_segment_t *fs, u32 slice_index,
1061 : u32 chunk_size)
1062 : {
1063 12 : fifo_segment_header_t *fsh = fs->h;
1064 : fifo_segment_slice_t *fss;
1065 :
1066 12 : fss = fsh_slice_get (fsh, slice_index);
1067 12 : return fsh_try_alloc_chunk (fsh, fss, chunk_size);
1068 : }
1069 :
1070 : void
1071 12 : fifo_segment_collect_chunk (fifo_segment_t *fs, u32 slice_index,
1072 : svm_fifo_chunk_t *c)
1073 : {
1074 12 : fsh_collect_chunks (fs->h, slice_index, c);
1075 12 : }
1076 :
1077 : uword
1078 12 : fifo_segment_chunk_offset (fifo_segment_t *fs, svm_fifo_chunk_t *c)
1079 : {
1080 12 : return (u8 *) c - (u8 *) fs->h;
1081 : }
1082 :
1083 : svm_msg_q_t *
1084 290 : fifo_segment_msg_q_alloc (fifo_segment_t *fs, u32 mq_index,
1085 : svm_msg_q_cfg_t *cfg)
1086 : {
1087 290 : fifo_segment_header_t *fsh = fs->h;
1088 : svm_msg_q_shared_t *smq;
1089 : svm_msg_q_t *mq;
1090 : void *base;
1091 : u32 size;
1092 :
1093 290 : if (!fs->mqs)
1094 : {
1095 269 : u32 n_mqs = clib_max (fs->h->n_mqs, 1);
1096 269 : vec_validate (fs->mqs, n_mqs - 1);
1097 : }
1098 :
1099 290 : size = svm_msg_q_size_to_alloc (cfg);
1100 290 : base = fsh_alloc_aligned (fsh, size, 8);
1101 290 : if (!base)
1102 0 : return 0;
1103 :
1104 290 : fsh->n_reserved_bytes += size;
1105 :
1106 290 : smq = svm_msg_q_init (base, cfg);
1107 290 : mq = vec_elt_at_index (fs->mqs, mq_index);
1108 290 : svm_msg_q_attach (mq, smq);
1109 :
1110 290 : return mq;
1111 : }
1112 :
1113 : svm_msg_q_t *
1114 177 : fifo_segment_msg_q_attach (fifo_segment_t *fs, uword offset, u32 mq_index)
1115 : {
1116 : svm_msg_q_t *mq;
1117 :
1118 177 : if (!fs->mqs)
1119 : {
1120 41 : u32 n_mqs = clib_max (fs->h->n_mqs, 1);
1121 41 : vec_validate (fs->mqs, n_mqs - 1);
1122 : }
1123 :
1124 177 : mq = vec_elt_at_index (fs->mqs, mq_index);
1125 :
1126 177 : if (!mq->q.shr)
1127 : {
1128 : svm_msg_q_shared_t *smq;
1129 41 : smq = (svm_msg_q_shared_t *) ((u8 *) fs->h + offset);
1130 41 : svm_msg_q_attach (mq, smq);
1131 : }
1132 :
1133 177 : ASSERT (fifo_segment_msg_q_offset (fs, mq_index) == offset);
1134 :
1135 177 : return mq;
1136 : }
1137 :
1138 : void
1139 41 : fifo_segment_msg_qs_discover (fifo_segment_t *fs, int *fds, u32 n_fds)
1140 : {
1141 : svm_msg_q_shared_t *smq;
1142 : u32 n_mqs, size, i;
1143 41 : uword offset = 0, n_alloced;
1144 : svm_msg_q_t *mq;
1145 :
1146 41 : n_mqs = fs->h->n_mqs;
1147 41 : if (n_fds && n_mqs != n_fds)
1148 : {
1149 0 : clib_warning ("expected %u fds got %u", n_mqs, n_fds);
1150 0 : return;
1151 : }
1152 :
1153 41 : vec_validate (fs->mqs, n_mqs - 1);
1154 41 : n_alloced = fs->h->n_reserved_bytes - fs->h->start_byte_index;
1155 41 : ASSERT (n_alloced % n_mqs == 0);
1156 41 : size = n_alloced / n_mqs;
1157 :
1158 41 : offset = fs->h->start_byte_index;
1159 86 : for (i = 0; i < n_mqs; i++)
1160 : {
1161 45 : mq = vec_elt_at_index (fs->mqs, i);
1162 45 : smq = (svm_msg_q_shared_t *) ((u8 *) fs->h + offset);
1163 45 : svm_msg_q_attach (mq, smq);
1164 45 : if (n_fds)
1165 4 : svm_msg_q_set_eventfd (mq, fds[i]);
1166 45 : offset += size;
1167 : }
1168 : }
1169 :
1170 : uword
1171 433 : fifo_segment_msg_q_offset (fifo_segment_t *fs, u32 mq_index)
1172 : {
1173 433 : svm_msg_q_t *mq = vec_elt_at_index (fs->mqs, mq_index);
1174 :
1175 433 : if (mq->q.shr == 0)
1176 0 : return ~0ULL;
1177 :
1178 433 : return (uword) ((u8 *) mq->q.shr - (u8 *) fs->h) -
1179 : sizeof (svm_msg_q_shared_t);
1180 : }
1181 :
1182 : int
1183 3 : fifo_segment_prealloc_fifo_hdrs (fifo_segment_t * fs, u32 slice_index,
1184 : u32 batch_size)
1185 : {
1186 3 : fifo_segment_header_t *fsh = fs->h;
1187 : fifo_segment_slice_t *fss;
1188 :
1189 3 : fss = fsh_slice_get (fsh, slice_index);
1190 3 : return fsh_try_alloc_fifo_hdr_batch (fsh, fss, batch_size);
1191 : }
1192 :
1193 : int
1194 6 : fifo_segment_prealloc_fifo_chunks (fifo_segment_t * fs, u32 slice_index,
1195 : u32 chunk_size, u32 batch_size)
1196 : {
1197 6 : fifo_segment_header_t *fsh = fs->h;
1198 : fifo_segment_slice_t *fss;
1199 : u32 fl_index;
1200 :
1201 6 : if (!fs_chunk_size_is_valid (fsh, chunk_size))
1202 : {
1203 0 : clib_warning ("chunk size out of range %d", chunk_size);
1204 0 : return -1;
1205 : }
1206 :
1207 6 : fl_index = fs_freelist_for_size (chunk_size);
1208 6 : fss = fsh_slice_get (fsh, slice_index);
1209 :
1210 6 : return fsh_try_alloc_chunk_batch (fsh, fss, fl_index, batch_size);
1211 : }
1212 :
1213 : /**
1214 : * Pre-allocates fifo pairs in fifo segment
1215 : */
1216 : void
1217 74 : fifo_segment_preallocate_fifo_pairs (fifo_segment_t * fs,
1218 : u32 rx_fifo_size, u32 tx_fifo_size,
1219 : u32 * n_fifo_pairs)
1220 : {
1221 : u32 rx_rounded_data_size, tx_rounded_data_size, pair_size, pairs_to_alloc;
1222 : u32 hdrs, pairs_per_slice, alloc_now;
1223 74 : fifo_segment_header_t *fsh = fs->h;
1224 : int rx_fl_index, tx_fl_index, i;
1225 : fifo_segment_slice_t *fss;
1226 : uword space_available;
1227 :
1228 : /* Parameter check */
1229 74 : if (rx_fifo_size == 0 || tx_fifo_size == 0 || *n_fifo_pairs == 0)
1230 0 : return;
1231 :
1232 74 : if (!fs_chunk_size_is_valid (fsh, rx_fifo_size))
1233 : {
1234 0 : clib_warning ("rx fifo_size out of range %d", rx_fifo_size);
1235 0 : return;
1236 : }
1237 :
1238 74 : if (!fs_chunk_size_is_valid (fsh, tx_fifo_size))
1239 : {
1240 0 : clib_warning ("tx fifo_size out of range %d", tx_fifo_size);
1241 0 : return;
1242 : }
1243 :
1244 74 : rx_rounded_data_size = (1 << (max_log2 (rx_fifo_size)));
1245 74 : rx_fl_index = fs_freelist_for_size (rx_fifo_size);
1246 74 : tx_rounded_data_size = (1 << (max_log2 (tx_fifo_size)));
1247 74 : tx_fl_index = fs_freelist_for_size (tx_fifo_size);
1248 :
1249 74 : hdrs = sizeof (svm_fifo_t) + sizeof (svm_fifo_chunk_t);
1250 :
1251 : /* Calculate space requirements */
1252 74 : pair_size = 2 * hdrs + rx_rounded_data_size + tx_rounded_data_size;
1253 74 : space_available = fsh_n_free_bytes (fsh);
1254 74 : pairs_to_alloc = space_available / pair_size;
1255 74 : pairs_to_alloc = clib_min (pairs_to_alloc, *n_fifo_pairs);
1256 74 : pairs_per_slice = pairs_to_alloc / fs->n_slices;
1257 74 : pairs_per_slice += pairs_to_alloc % fs->n_slices ? 1 : 0;
1258 :
1259 74 : if (!pairs_per_slice)
1260 1 : return;
1261 :
1262 150 : for (i = 0; i < fs->n_slices; i++)
1263 : {
1264 77 : alloc_now = clib_min (pairs_per_slice, *n_fifo_pairs);
1265 77 : if (0 == alloc_now)
1266 0 : break;
1267 :
1268 77 : fss = fsh_slice_get (fsh, i);
1269 77 : if (fs_try_alloc_fifo_batch (fsh, fss, rx_fl_index, alloc_now))
1270 0 : clib_warning ("rx prealloc failed: pairs %u", alloc_now);
1271 77 : if (fs_try_alloc_fifo_batch (fsh, fss, tx_fl_index, alloc_now))
1272 0 : clib_warning ("tx prealloc failed: pairs %u", alloc_now);
1273 :
1274 : /* Account for the pairs allocated */
1275 77 : *n_fifo_pairs -= alloc_now;
1276 : }
1277 : }
1278 :
1279 : /**
1280 : * Get number of active fifos
1281 : */
1282 : u32
1283 4 : fifo_segment_num_fifos (fifo_segment_t * fs)
1284 : {
1285 4 : return fsh_n_active_fifos (fs->h);
1286 : }
1287 :
1288 : static u32
1289 2 : fs_slice_num_free_fifos (fifo_segment_header_t *fsh, fifo_segment_slice_t *fss)
1290 : {
1291 : svm_fifo_shared_t *f;
1292 2 : u32 count = 0;
1293 :
1294 2 : f = fs_ptr (fsh, fss->free_fifos);
1295 2 : if (f == 0)
1296 0 : return 0;
1297 :
1298 116 : while (f)
1299 : {
1300 114 : f = fs_ptr (fsh, f->next);
1301 114 : count++;
1302 : }
1303 2 : return count;
1304 : }
1305 :
1306 : u32
1307 2 : fifo_segment_num_free_fifos (fifo_segment_t * fs)
1308 : {
1309 2 : fifo_segment_header_t *fsh = fs->h;
1310 : fifo_segment_slice_t *fss;
1311 : int slice_index;
1312 2 : u32 count = 0;
1313 :
1314 4 : for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
1315 : {
1316 2 : fss = fsh_slice_get (fsh, slice_index);
1317 2 : count += fs_slice_num_free_fifos (fsh, fss);
1318 : }
1319 2 : return count;
1320 : }
1321 :
1322 : static u32
1323 13 : fs_slice_num_free_chunks (fifo_segment_header_t *fsh,
1324 : fifo_segment_slice_t *fss, u32 size)
1325 : {
1326 13 : u32 count = 0, rounded_size, fl_index;
1327 : svm_fifo_chunk_t *c;
1328 : int i;
1329 :
1330 : /* Count all free chunks? */
1331 13 : if (size == ~0)
1332 : {
1333 36 : for (i = 0; i < FS_CHUNK_VEC_LEN; i++)
1334 : {
1335 33 : c = fss_chunk_free_list_head (fsh, fss, i);
1336 33 : if (c == 0)
1337 28 : continue;
1338 :
1339 101 : while (c)
1340 : {
1341 96 : c = fs_chunk_ptr (fsh, c->next);
1342 96 : count++;
1343 : }
1344 : }
1345 3 : return count;
1346 : }
1347 :
1348 10 : rounded_size = (1 << (max_log2 (size)));
1349 10 : fl_index = fs_freelist_for_size (rounded_size);
1350 :
1351 10 : if (fl_index >= FS_CHUNK_VEC_LEN)
1352 0 : return 0;
1353 :
1354 10 : c = fss_chunk_free_list_head (fsh, fss, fl_index);
1355 10 : if (c == 0)
1356 2 : return 0;
1357 :
1358 201 : while (c)
1359 : {
1360 193 : c = fs_chunk_ptr (fsh, c->next);
1361 193 : count++;
1362 : }
1363 8 : return count;
1364 : }
1365 :
1366 : u32
1367 13 : fifo_segment_num_free_chunks (fifo_segment_t * fs, u32 size)
1368 : {
1369 13 : fifo_segment_header_t *fsh = fs->h;
1370 : fifo_segment_slice_t *fss;
1371 : int slice_index;
1372 13 : u32 count = 0;
1373 :
1374 26 : for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
1375 : {
1376 13 : fss = fsh_slice_get (fsh, slice_index);
1377 13 : count += fs_slice_num_free_chunks (fsh, fss, size);
1378 : }
1379 13 : return count;
1380 : }
1381 :
1382 : uword
1383 18 : fifo_segment_size (fifo_segment_t * fs)
1384 : {
1385 18 : return fs->h->max_byte_index - fs->h->n_reserved_bytes;
1386 : }
1387 :
1388 : static u8
1389 14 : fs_has_reached_mem_limit (fifo_segment_t *fs)
1390 : {
1391 14 : return (fs->flags & FIFO_SEGMENT_F_MEM_LIMIT) ? 1 : 0;
1392 : }
1393 :
1394 : static void
1395 0 : fs_reset_mem_limit (fifo_segment_t *fs)
1396 : {
1397 0 : fs->flags &= ~FIFO_SEGMENT_F_MEM_LIMIT;
1398 0 : }
1399 :
1400 : void *
1401 0 : fifo_segment_alloc (fifo_segment_t *fs, uword size)
1402 : {
1403 0 : void *rv = fsh_alloc (fs->h, size);
1404 : /* Mark externally allocated bytes as reserved. This helps
1405 : * @ref fifo_segment_size report bytes used only for fifos */
1406 0 : fs->h->n_reserved_bytes += size;
1407 0 : return rv;
1408 : }
1409 :
1410 : uword
1411 24 : fifo_segment_free_bytes (fifo_segment_t * fs)
1412 : {
1413 24 : return fsh_n_free_bytes (fs->h);
1414 : }
1415 :
1416 : uword
1417 14 : fifo_segment_cached_bytes (fifo_segment_t * fs)
1418 : {
1419 14 : return fsh_n_cached_bytes (fs->h);
1420 : }
1421 :
1422 : uword
1423 1888 : fifo_segment_available_bytes (fifo_segment_t * fs)
1424 : {
1425 1888 : return fsh_n_free_bytes (fs->h) + fsh_n_cached_bytes (fs->h);
1426 : }
1427 :
1428 : uword
1429 12 : fifo_segment_fl_chunk_bytes (fifo_segment_t * fs)
1430 : {
1431 12 : fifo_segment_header_t *fsh = fs->h;
1432 : fifo_segment_slice_t *fss;
1433 12 : uword n_bytes = 0;
1434 : int slice_index;
1435 :
1436 24 : for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
1437 : {
1438 12 : fss = fsh_slice_get (fsh, slice_index);
1439 12 : n_bytes += fss_fl_chunk_bytes (fss);
1440 : }
1441 :
1442 12 : return n_bytes;
1443 : }
1444 :
1445 : u8
1446 520 : fifo_segment_has_fifos (fifo_segment_t * fs)
1447 : {
1448 520 : return (fsh_n_active_fifos (fs->h) != 0);
1449 : }
1450 :
1451 : svm_fifo_t *
1452 100 : fifo_segment_get_slice_fifo_list (fifo_segment_t * fs, u32 slice_index)
1453 : {
1454 : fifo_slice_private_t *pfss;
1455 :
1456 100 : pfss = fs_slice_private_get (fs, slice_index);
1457 100 : return pfss->active_fifos;
1458 : }
1459 :
1460 : u8
1461 14 : fifo_segment_get_mem_usage (fifo_segment_t * fs)
1462 : {
1463 : uword size, in_use;
1464 :
1465 14 : size = fifo_segment_size (fs);
1466 14 : in_use =
1467 14 : size - fifo_segment_free_bytes (fs) - fifo_segment_cached_bytes (fs);
1468 14 : return (in_use * 100) / size;
1469 : }
1470 :
1471 : fifo_segment_mem_status_t
1472 14 : fifo_segment_determine_status (fifo_segment_t *fs, u8 usage)
1473 : {
1474 14 : if (!fs->high_watermark || !fs->low_watermark)
1475 0 : return MEMORY_PRESSURE_NO_PRESSURE;
1476 :
1477 : /* once the no-memory is detected, the status continues
1478 : * until memory usage gets below the high watermark
1479 : */
1480 14 : if (fs_has_reached_mem_limit (fs))
1481 : {
1482 0 : if (usage >= fs->high_watermark)
1483 0 : return MEMORY_PRESSURE_NO_MEMORY;
1484 : else
1485 0 : fs_reset_mem_limit (fs);
1486 : }
1487 :
1488 14 : if (usage >= fs->high_watermark)
1489 3 : return MEMORY_PRESSURE_HIGH_PRESSURE;
1490 :
1491 11 : else if (usage >= fs->low_watermark)
1492 4 : return MEMORY_PRESSURE_LOW_PRESSURE;
1493 :
1494 7 : return MEMORY_PRESSURE_NO_PRESSURE;
1495 : }
1496 :
1497 : fifo_segment_mem_status_t
1498 14 : fifo_segment_get_mem_status (fifo_segment_t * fs)
1499 : {
1500 14 : u8 usage = fifo_segment_get_mem_usage (fs);
1501 :
1502 14 : return fifo_segment_determine_status (fs, usage);
1503 : }
1504 :
1505 : u8 *
1506 0 : format_fifo_segment_type (u8 * s, va_list * args)
1507 : {
1508 : fifo_segment_t *sp;
1509 0 : sp = va_arg (*args, fifo_segment_t *);
1510 0 : ssvm_segment_type_t st = ssvm_type (&sp->ssvm);
1511 :
1512 0 : if (st == SSVM_SEGMENT_PRIVATE)
1513 0 : s = format (s, "%s", "private");
1514 0 : else if (st == SSVM_SEGMENT_MEMFD)
1515 0 : s = format (s, "%s", "memfd");
1516 0 : else if (st == SSVM_SEGMENT_SHM)
1517 0 : s = format (s, "%s", "shm");
1518 : else
1519 0 : s = format (s, "%s", "unknown");
1520 0 : return s;
1521 : }
1522 :
1523 : /**
1524 : * Segment format function
1525 : */
1526 : u8 *
1527 0 : format_fifo_segment (u8 * s, va_list * args)
1528 : {
1529 : u32 count, indent, active_fifos, free_fifos;
1530 0 : fifo_segment_t *fs = va_arg (*args, fifo_segment_t *);
1531 0 : int verbose __attribute__ ((unused)) = va_arg (*args, int);
1532 : uword est_chunk_bytes, est_free_seg_bytes, free_chunks;
1533 0 : uword chunk_bytes = 0, free_seg_bytes, chunk_size;
1534 : uword tracked_cached_bytes;
1535 0 : uword fifo_hdr = 0, reserved;
1536 : fifo_segment_header_t *fsh;
1537 : fifo_segment_slice_t *fss;
1538 : svm_fifo_chunk_t *c;
1539 : u32 slice_index;
1540 : char *address;
1541 : size_t size;
1542 : int i;
1543 : uword allocated, in_use, virt;
1544 : f64 usage;
1545 : fifo_segment_mem_status_t mem_st;
1546 :
1547 0 : indent = format_get_indent (s);
1548 :
1549 0 : fifo_segment_info (fs, &address, &size);
1550 0 : active_fifos = fifo_segment_num_fifos (fs);
1551 0 : free_fifos = fifo_segment_num_free_fifos (fs);
1552 :
1553 0 : s = format (s, "%U%v type: %U size: %U active fifos: %u", format_white_space,
1554 0 : 2, ssvm_name (&fs->ssvm), format_fifo_segment_type, fs,
1555 : format_memory_size, size, active_fifos);
1556 :
1557 0 : if (!verbose)
1558 0 : return s;
1559 :
1560 0 : fsh = fs->h;
1561 :
1562 0 : free_chunks = fifo_segment_num_free_chunks (fs, ~0);
1563 0 : if (free_chunks)
1564 0 : s = format (s, "\n\n%UFree/Allocated chunks by size:\n",
1565 : format_white_space, indent + 2);
1566 : else
1567 0 : s = format (s, "\n");
1568 :
1569 0 : for (slice_index = 0; slice_index < fs->n_slices; slice_index++)
1570 : {
1571 0 : fss = fsh_slice_get (fsh, slice_index);
1572 0 : for (i = 0; i < FS_CHUNK_VEC_LEN; i++)
1573 : {
1574 0 : c = fss_chunk_free_list_head (fsh, fss, i);
1575 0 : if (c == 0 && fss->num_chunks[i] == 0)
1576 0 : continue;
1577 0 : count = 0;
1578 0 : while (c)
1579 : {
1580 0 : c = fs_chunk_ptr (fsh, c->next);
1581 0 : count++;
1582 : }
1583 :
1584 0 : chunk_size = fs_freelist_index_to_size (i);
1585 0 : s = format (s, "%U%-5u kB: %u/%u\n", format_white_space, indent + 2,
1586 : chunk_size >> 10, count, fss->num_chunks[i]);
1587 :
1588 0 : chunk_bytes += count * chunk_size;
1589 : }
1590 : }
1591 :
1592 0 : fifo_hdr = free_fifos * sizeof (svm_fifo_t);
1593 0 : est_chunk_bytes = fifo_segment_fl_chunk_bytes (fs);
1594 0 : est_free_seg_bytes = fifo_segment_free_bytes (fs);
1595 0 : free_seg_bytes = fifo_segment_free_bytes (fs);
1596 0 : tracked_cached_bytes = fifo_segment_cached_bytes (fs);
1597 0 : allocated = fifo_segment_size (fs);
1598 0 : in_use = fifo_segment_size (fs) - est_free_seg_bytes - tracked_cached_bytes;
1599 0 : usage = (100.0 * in_use) / allocated;
1600 0 : mem_st = fifo_segment_get_mem_status (fs);
1601 0 : virt = fs_virtual_mem (fs);
1602 0 : reserved = fsh->n_reserved_bytes;
1603 :
1604 0 : s = format (s, "\n%Useg free bytes: %U (%lu) estimated: %U (%lu) reserved:"
1605 : " %U (%lu)\n", format_white_space, indent + 2,
1606 : format_memory_size, free_seg_bytes, free_seg_bytes,
1607 : format_memory_size, est_free_seg_bytes, est_free_seg_bytes,
1608 : format_memory_size, reserved, reserved);
1609 0 : s = format (s, "%Uchunk free bytes: %U (%lu) estimated: %U (%lu) tracked:"
1610 : " %U (%lu)\n", format_white_space, indent + 2,
1611 : format_memory_size, chunk_bytes, chunk_bytes,
1612 : format_memory_size, est_chunk_bytes, est_chunk_bytes,
1613 : format_memory_size, tracked_cached_bytes, tracked_cached_bytes);
1614 0 : s = format (s, "%Ufifo active: %u hdr free: %u bytes: %U (%u) \n",
1615 : format_white_space, indent + 2, fsh->n_active_fifos, free_fifos,
1616 : format_memory_size, fifo_hdr, fifo_hdr);
1617 0 : s = format (s, "%Usegment usage: %.2f%% (%U / %U) virt: %U status: %s\n",
1618 : format_white_space, indent + 2, usage, format_memory_size,
1619 : in_use, format_memory_size, allocated, format_memory_size, virt,
1620 : fifo_segment_mem_status_strings[mem_st]);
1621 0 : s = format (s, "\n");
1622 :
1623 0 : return s;
1624 : }
1625 :
1626 : /*
1627 : * fd.io coding-style-patch-verification: ON
1628 : *
1629 : * Local Variables:
1630 : * eval: (c-set-style "gnu")
1631 : * End:
1632 : */
|