Line data Source code
1 : /*
2 : * Copyright (c) 2015 Cisco and/or its affiliates.
3 : * Licensed under the Apache License, Version 2.0 (the "License");
4 : * you may not use this file except in compliance with the License.
5 : * You may obtain a copy of the License at:
6 : *
7 : * http://www.apache.org/licenses/LICENSE-2.0
8 : *
9 : * Unless required by applicable law or agreed to in writing, software
10 : * distributed under the License is distributed on an "AS IS" BASIS,
11 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : * See the License for the specific language governing permissions and
13 : * limitations under the License.
14 : */
15 :
16 : #include <vppinfra/format.h>
17 : #include <vppinfra/dlmalloc.h>
18 : #include <vppinfra/os.h>
19 : #include <vppinfra/lock.h>
20 : #include <vppinfra/hash.h>
21 : #include <vppinfra/elf_clib.h>
22 :
23 : typedef struct
24 : {
25 : /* Address of callers: outer first, inner last. */
26 : uword callers[12];
27 :
28 : /* Count of allocations with this traceback. */
29 : u32 n_allocations;
30 :
31 : /* Count of bytes allocated with this traceback. */
32 : u32 n_bytes;
33 :
34 : /* Offset of this item */
35 : uword offset;
36 : } mheap_trace_t;
37 :
38 : typedef struct
39 : {
40 : clib_spinlock_t lock;
41 :
42 : mheap_trace_t *traces;
43 :
44 : /* Indices of free traces. */
45 : u32 *trace_free_list;
46 :
47 : /* Hash table mapping callers to trace index. */
48 : uword *trace_by_callers;
49 :
50 : /* Hash table mapping mheap offset to trace index. */
51 : uword *trace_index_by_offset;
52 :
53 : /* So we can easily shut off current segment trace, if any */
54 : const clib_mem_heap_t *current_traced_mheap;
55 :
56 : } mheap_trace_main_t;
57 :
58 : mheap_trace_main_t mheap_trace_main;
59 :
60 : static __thread int mheap_trace_thread_disable;
61 :
62 : static void
63 75 : mheap_get_trace_internal (const clib_mem_heap_t *heap, uword offset,
64 : uword size)
65 : {
66 75 : mheap_trace_main_t *tm = &mheap_trace_main;
67 : mheap_trace_t *t;
68 : uword i, n_callers, trace_index, *p;
69 : mheap_trace_t trace;
70 :
71 75 : if (heap != tm->current_traced_mheap || mheap_trace_thread_disable)
72 53 : return;
73 :
74 : /* Spurious Coverity warnings be gone. */
75 22 : clib_memset (&trace, 0, sizeof (trace));
76 :
77 22 : clib_spinlock_lock (&tm->lock);
78 :
79 : /* heap could have changed while we were waiting on the lock */
80 22 : if (heap != tm->current_traced_mheap)
81 0 : goto out;
82 :
83 : /* Turn off tracing for this thread to avoid embarrassment... */
84 22 : mheap_trace_thread_disable = 1;
85 :
86 : /* Skip our frame and mspace_get_aligned's frame */
87 22 : n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers), 2);
88 22 : if (n_callers == 0)
89 0 : goto out;
90 :
91 22 : if (!tm->trace_by_callers)
92 2 : tm->trace_by_callers =
93 2 : hash_create_shmem (0, sizeof (trace.callers), sizeof (uword));
94 :
95 22 : p = hash_get_mem (tm->trace_by_callers, &trace.callers);
96 22 : if (p)
97 : {
98 1 : trace_index = p[0];
99 1 : t = tm->traces + trace_index;
100 : }
101 : else
102 : {
103 21 : i = vec_len (tm->trace_free_list);
104 21 : if (i > 0)
105 : {
106 7 : trace_index = tm->trace_free_list[i - 1];
107 7 : vec_set_len (tm->trace_free_list, i - 1);
108 : }
109 : else
110 : {
111 14 : mheap_trace_t *old_start = tm->traces;
112 14 : mheap_trace_t *old_end = vec_end (tm->traces);
113 :
114 14 : vec_add2 (tm->traces, t, 1);
115 :
116 14 : if (tm->traces != old_start)
117 : {
118 : hash_pair_t *p;
119 : mheap_trace_t *q;
120 : /* *INDENT-OFF* */
121 476 : hash_foreach_pair (p, tm->trace_by_callers,
122 : ({
123 : q = uword_to_pointer (p->key, mheap_trace_t *);
124 : ASSERT (q >= old_start && q < old_end);
125 : p->key = pointer_to_uword (tm->traces + (q - old_start));
126 : }));
127 : /* *INDENT-ON* */
128 : }
129 14 : trace_index = t - tm->traces;
130 : }
131 :
132 21 : t = tm->traces + trace_index;
133 21 : t[0] = trace;
134 21 : t->n_allocations = 0;
135 21 : t->n_bytes = 0;
136 42 : hash_set_mem (tm->trace_by_callers, t->callers, trace_index);
137 : }
138 :
139 22 : t->n_allocations += 1;
140 22 : t->n_bytes += size;
141 22 : t->offset = offset; /* keep a sample to autopsy */
142 22 : hash_set (tm->trace_index_by_offset, offset, t - tm->traces);
143 :
144 22 : out:
145 22 : mheap_trace_thread_disable = 0;
146 22 : clib_spinlock_unlock (&tm->lock);
147 : }
148 :
149 : static void
150 60 : mheap_put_trace_internal (const clib_mem_heap_t *heap, uword offset,
151 : uword size)
152 : {
153 : mheap_trace_t *t;
154 : uword trace_index, *p;
155 60 : mheap_trace_main_t *tm = &mheap_trace_main;
156 :
157 60 : if (heap != tm->current_traced_mheap || mheap_trace_thread_disable)
158 39 : return;
159 :
160 21 : clib_spinlock_lock (&tm->lock);
161 :
162 : /* heap could have changed while we were waiting on the lock */
163 21 : if (heap != tm->current_traced_mheap)
164 0 : goto out;
165 :
166 : /* Turn off tracing for this thread for a moment */
167 21 : mheap_trace_thread_disable = 1;
168 :
169 21 : p = hash_get (tm->trace_index_by_offset, offset);
170 21 : if (!p)
171 5 : goto out;
172 :
173 16 : trace_index = p[0];
174 16 : hash_unset (tm->trace_index_by_offset, offset);
175 16 : ASSERT (trace_index < vec_len (tm->traces));
176 :
177 16 : t = tm->traces + trace_index;
178 16 : ASSERT (t->n_allocations > 0);
179 16 : ASSERT (t->n_bytes >= size);
180 16 : t->n_allocations -= 1;
181 16 : t->n_bytes -= size;
182 16 : if (t->n_allocations == 0)
183 : {
184 30 : hash_unset_mem (tm->trace_by_callers, t->callers);
185 15 : vec_add1 (tm->trace_free_list, trace_index);
186 15 : clib_memset (t, 0, sizeof (t[0]));
187 : }
188 :
189 1 : out:
190 21 : mheap_trace_thread_disable = 0;
191 21 : clib_spinlock_unlock (&tm->lock);
192 : }
193 :
194 : void
195 0 : mheap_get_trace (uword offset, uword size)
196 : {
197 0 : mheap_get_trace_internal (clib_mem_get_heap (), offset, size);
198 0 : }
199 :
200 : void
201 0 : mheap_put_trace (uword offset, uword size)
202 : {
203 0 : mheap_put_trace_internal (clib_mem_get_heap (), offset, size);
204 0 : }
205 :
206 : always_inline void
207 2 : mheap_trace_main_free (mheap_trace_main_t * tm)
208 : {
209 2 : CLIB_SPINLOCK_ASSERT_LOCKED (&tm->lock);
210 2 : tm->current_traced_mheap = 0;
211 2 : vec_free (tm->traces);
212 2 : vec_free (tm->trace_free_list);
213 2 : hash_free (tm->trace_by_callers);
214 2 : hash_free (tm->trace_index_by_offset);
215 2 : mheap_trace_thread_disable = 0;
216 2 : }
217 :
218 : static clib_mem_heap_t *
219 4875 : clib_mem_create_heap_internal (void *base, uword size,
220 : clib_mem_page_sz_t log2_page_sz, int is_locked,
221 : char *name)
222 : {
223 : clib_mem_heap_t *h;
224 4875 : u8 flags = 0;
225 4875 : int sz = sizeof (clib_mem_heap_t);
226 :
227 4875 : if (base == 0)
228 : {
229 2041 : log2_page_sz = clib_mem_log2_page_size_validate (log2_page_sz);
230 2041 : size = round_pow2 (size, clib_mem_page_bytes (log2_page_sz));
231 2041 : base = clib_mem_vm_map_internal (0, log2_page_sz, size, -1, 0,
232 : "main heap");
233 :
234 2041 : if (base == CLIB_MEM_VM_MAP_FAILED)
235 0 : return 0;
236 :
237 2041 : flags = CLIB_MEM_HEAP_F_UNMAP_ON_DESTROY;
238 : }
239 : else
240 2834 : log2_page_sz = CLIB_MEM_PAGE_SZ_UNKNOWN;
241 :
242 4875 : if (is_locked)
243 4875 : flags |= CLIB_MEM_HEAP_F_LOCKED;
244 :
245 4875 : h = base;
246 4875 : h->base = base;
247 4875 : h->size = size;
248 4875 : h->log2_page_sz = log2_page_sz;
249 4875 : h->flags = flags;
250 4875 : sz = strlen (name);
251 4875 : strcpy (h->name, name);
252 4875 : sz = round_pow2 (sz + sizeof (clib_mem_heap_t), 16);
253 4875 : h->mspace = create_mspace_with_base (base + sz, size - sz, is_locked);
254 :
255 4875 : mspace_disable_expand (h->mspace);
256 :
257 4875 : clib_mem_poison (mspace_least_addr (h->mspace),
258 : mspace_footprint (h->mspace));
259 :
260 4875 : return h;
261 : }
262 :
263 : /* Initialize CLIB heap based on memory/size given by user.
264 : Set memory to 0 and CLIB will try to allocate its own heap. */
265 : static void *
266 1221 : clib_mem_init_internal (void *base, uword size,
267 : clib_mem_page_sz_t log2_page_sz)
268 : {
269 : clib_mem_heap_t *h;
270 :
271 1221 : clib_mem_main_init ();
272 :
273 1221 : h = clib_mem_create_heap_internal (base, size, log2_page_sz,
274 : 1 /*is_locked */ , "main heap");
275 :
276 1221 : clib_mem_set_heap (h);
277 :
278 1221 : if (mheap_trace_main.lock == 0)
279 : {
280 : /* clib_spinlock_init() dynamically allocates the spinlock in the current
281 : * per-cpu heap, but it is used for all traces accross all heaps and
282 : * hence we can't really allocate it in the current per-cpu heap as it
283 : * could be destroyed later */
284 : static struct clib_spinlock_s mheap_trace_main_lock = {};
285 634 : mheap_trace_main.lock = &mheap_trace_main_lock;
286 : }
287 :
288 1221 : return h;
289 : }
290 :
291 : __clib_export void *
292 593 : clib_mem_init (void *memory, uword memory_size)
293 : {
294 593 : return clib_mem_init_internal (memory, memory_size,
295 : CLIB_MEM_PAGE_SZ_DEFAULT);
296 : }
297 :
298 : __clib_export void *
299 575 : clib_mem_init_with_page_size (uword memory_size,
300 : clib_mem_page_sz_t log2_page_sz)
301 : {
302 575 : return clib_mem_init_internal (0, memory_size, log2_page_sz);
303 : }
304 :
305 : __clib_export void *
306 53 : clib_mem_init_thread_safe (void *memory, uword memory_size)
307 : {
308 53 : return clib_mem_init_internal (memory, memory_size,
309 : CLIB_MEM_PAGE_SZ_DEFAULT);
310 : }
311 :
312 : __clib_export void
313 575 : clib_mem_destroy (void)
314 : {
315 575 : mheap_trace_main_t *tm = &mheap_trace_main;
316 575 : clib_mem_heap_t *heap = clib_mem_get_heap ();
317 :
318 575 : if (heap->mspace == tm->current_traced_mheap)
319 0 : mheap_trace (heap, 0);
320 :
321 575 : destroy_mspace (heap->mspace);
322 575 : clib_mem_vm_unmap (heap);
323 575 : }
324 :
325 : __clib_export u8 *
326 0 : format_clib_mem_usage (u8 *s, va_list *va)
327 : {
328 0 : int verbose = va_arg (*va, int);
329 0 : return format (s, "$$$$ heap at %llx verbose %d", clib_mem_get_heap (),
330 : verbose);
331 : }
332 :
333 : /*
334 : * Magic decoder ring for mallinfo stats (ala dlmalloc):
335 : *
336 : * size_t arena; / * Non-mmapped space allocated (bytes) * /
337 : * size_t ordblks; / * Number of free chunks * /
338 : * size_t smblks; / * Number of free fastbin blocks * /
339 : * size_t hblks; / * Number of mmapped regions * /
340 : * size_t hblkhd; / * Space allocated in mmapped regions (bytes) * /
341 : * size_t usmblks; / * Maximum total allocated space (bytes) * /
342 : * size_t fsmblks; / * Space in freed fastbin blocks (bytes) * /
343 : * size_t uordblks; / * Total allocated space (bytes) * /
344 : * size_t fordblks; / * Total free space (bytes) * /
345 : * size_t keepcost; / * Top-most, releasable space (bytes) * /
346 : *
347 : */
348 :
349 : u8 *
350 3414 : format_msize (u8 * s, va_list * va)
351 : {
352 3414 : uword a = va_arg (*va, uword);
353 :
354 3414 : if (a >= 1ULL << 30)
355 0 : s = format (s, "%.2fG", (((f64) a) / ((f64) (1ULL << 30))));
356 3414 : else if (a >= 1ULL << 20)
357 2521 : s = format (s, "%.2fM", (((f64) a) / ((f64) (1ULL << 20))));
358 893 : else if (a >= 1ULL << 10)
359 893 : s = format (s, "%.2fK", (((f64) a) / ((f64) (1ULL << 10))));
360 : else
361 0 : s = format (s, "%lld", a);
362 3414 : return s;
363 : }
364 :
365 : static int
366 8 : mheap_trace_sort (const void *_t1, const void *_t2)
367 : {
368 8 : const mheap_trace_t *t1 = _t1;
369 8 : const mheap_trace_t *t2 = _t2;
370 : word cmp;
371 :
372 8 : cmp = (word) t2->n_bytes - (word) t1->n_bytes;
373 8 : if (!cmp)
374 5 : cmp = (word) t2->n_allocations - (word) t1->n_allocations;
375 8 : return cmp;
376 : }
377 :
378 : u8 *
379 1 : format_mheap_trace (u8 * s, va_list * va)
380 : {
381 1 : mheap_trace_main_t *tm = va_arg (*va, mheap_trace_main_t *);
382 1 : int verbose = va_arg (*va, int);
383 1 : int have_traces = 0;
384 : int i;
385 1 : int n = 0;
386 :
387 1 : clib_spinlock_lock (&tm->lock);
388 1 : if (vec_len (tm->traces) > 0 &&
389 1 : clib_mem_get_heap () == tm->current_traced_mheap)
390 : {
391 1 : have_traces = 1;
392 :
393 : /* Make a copy of traces since we'll be sorting them. */
394 : mheap_trace_t *t, *traces_copy;
395 : u32 indent, total_objects_traced;
396 :
397 1 : traces_copy = vec_dup (tm->traces);
398 :
399 1 : qsort (traces_copy, vec_len (traces_copy), sizeof (traces_copy[0]),
400 : mheap_trace_sort);
401 :
402 1 : total_objects_traced = 0;
403 1 : s = format (s, "\n");
404 7 : vec_foreach (t, traces_copy)
405 : {
406 : /* Skip over free elements. */
407 6 : if (t->n_allocations == 0)
408 5 : continue;
409 :
410 1 : total_objects_traced += t->n_allocations;
411 :
412 : /* When not verbose only report the 50 biggest allocations */
413 1 : if (!verbose && n >= 50)
414 0 : continue;
415 1 : n++;
416 :
417 1 : if (t == traces_copy)
418 1 : s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
419 : "Sample");
420 1 : s = format (s, "%9d%9d %p", t->n_bytes, t->n_allocations, t->offset);
421 1 : indent = format_get_indent (s);
422 13 : for (i = 0; i < ARRAY_LEN (t->callers) && t->callers[i]; i++)
423 : {
424 12 : if (i > 0)
425 11 : s = format (s, "%U", format_white_space, indent);
426 : #if defined(CLIB_UNIX) && !defined(__APPLE__)
427 : /* $$$$ does this actually work? */
428 : s =
429 12 : format (s, " %U\n", format_clib_elf_symbol_with_address,
430 : t->callers[i]);
431 : #else
432 : s = format (s, " %p\n", t->callers[i]);
433 : #endif
434 : }
435 : }
436 :
437 1 : s = format (s, "%d total traced objects\n", total_objects_traced);
438 :
439 1 : vec_free (traces_copy);
440 : }
441 1 : clib_spinlock_unlock (&tm->lock);
442 1 : if (have_traces == 0)
443 0 : s = format (s, "no traced allocations\n");
444 :
445 1 : return s;
446 : }
447 :
448 : __clib_export u8 *
449 852 : format_clib_mem_heap (u8 * s, va_list * va)
450 : {
451 852 : clib_mem_heap_t *heap = va_arg (*va, clib_mem_heap_t *);
452 852 : int verbose = va_arg (*va, int);
453 : struct dlmallinfo mi;
454 852 : mheap_trace_main_t *tm = &mheap_trace_main;
455 852 : u32 indent = format_get_indent (s) + 2;
456 :
457 852 : if (heap == 0)
458 4 : heap = clib_mem_get_heap ();
459 :
460 852 : mi = mspace_mallinfo (heap->mspace);
461 :
462 852 : s = format (s, "base %p, size %U",
463 : heap->base, format_memory_size, heap->size);
464 :
465 : #define _(i,v,str) \
466 : if (heap->flags & CLIB_MEM_HEAP_F_##v) s = format (s, ", %s", str);
467 852 : foreach_clib_mem_heap_flag;
468 : #undef _
469 :
470 852 : s = format (s, ", name '%s'", heap->name);
471 :
472 852 : if (heap->log2_page_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
473 : {
474 : clib_mem_page_stats_t stats;
475 850 : clib_mem_get_page_stats (heap->base, heap->log2_page_sz,
476 850 : heap->size >> heap->log2_page_sz, &stats);
477 850 : s = format (s, "\n%U%U", format_white_space, indent,
478 : format_clib_mem_page_stats, &stats);
479 : }
480 :
481 852 : s = format (s, "\n%Utotal: %U, used: %U, free: %U, trimmable: %U",
482 : format_white_space, indent,
483 : format_msize, mi.arena,
484 : format_msize, mi.uordblks,
485 : format_msize, mi.fordblks, format_msize, mi.keepcost);
486 852 : if (verbose > 0)
487 : {
488 6 : s = format (s, "\n%Ufree chunks %llu free fastbin blks %llu",
489 : format_white_space, indent + 2, mi.ordblks, mi.smblks);
490 6 : s = format (s, "\n%Umax total allocated %U",
491 : format_white_space, indent + 2, format_msize, mi.usmblks);
492 : }
493 :
494 852 : if (heap->flags & CLIB_MEM_HEAP_F_TRACED)
495 1 : s = format (s, "\n%U", format_mheap_trace, tm, verbose);
496 852 : return s;
497 : }
498 :
499 : __clib_export __clib_flatten void
500 2512 : clib_mem_get_heap_usage (clib_mem_heap_t *heap, clib_mem_usage_t *usage)
501 : {
502 2512 : struct dlmallinfo mi = mspace_mallinfo (heap->mspace);
503 :
504 2512 : usage->bytes_total = mi.arena; /* non-mmapped space allocated from system */
505 2512 : usage->bytes_used = mi.uordblks; /* total allocated space */
506 2512 : usage->bytes_free = mi.fordblks; /* total free space */
507 2512 : usage->bytes_used_mmap = mi.hblkhd; /* space in mmapped regions */
508 2512 : usage->bytes_max = mi.usmblks; /* maximum total allocated space */
509 2512 : usage->bytes_free_reclaimed = mi.ordblks; /* number of free chunks */
510 2512 : usage->bytes_overhead = mi.keepcost; /* releasable (via malloc_trim) space */
511 :
512 : /* Not supported */
513 2512 : usage->bytes_used_sbrk = 0;
514 2512 : usage->object_count = 0;
515 2512 : }
516 :
517 : /* Call serial number for debugger breakpoints. */
518 : uword clib_mem_validate_serial = 0;
519 :
520 : __clib_export void
521 10 : mheap_trace (clib_mem_heap_t * h, int enable)
522 : {
523 10 : mheap_trace_main_t *tm = &mheap_trace_main;
524 :
525 10 : clib_spinlock_lock (&tm->lock);
526 :
527 10 : if (tm->current_traced_mheap != 0 && tm->current_traced_mheap != h)
528 : {
529 5 : clib_warning ("tracing already enabled for another heap, ignoring");
530 5 : goto out;
531 : }
532 :
533 5 : if (enable)
534 : {
535 3 : h->flags |= CLIB_MEM_HEAP_F_TRACED;
536 3 : tm->current_traced_mheap = h;
537 : }
538 : else
539 : {
540 2 : h->flags &= ~CLIB_MEM_HEAP_F_TRACED;
541 2 : mheap_trace_main_free (&mheap_trace_main);
542 : }
543 :
544 10 : out:
545 10 : clib_spinlock_unlock (&tm->lock);
546 10 : }
547 :
548 : __clib_export void
549 10 : clib_mem_trace (int enable)
550 : {
551 10 : void *current_heap = clib_mem_get_heap ();
552 10 : mheap_trace (current_heap, enable);
553 10 : }
554 :
555 : int
556 0 : clib_mem_is_traced (void)
557 : {
558 0 : clib_mem_heap_t *h = clib_mem_get_heap ();
559 0 : return (h->flags &= CLIB_MEM_HEAP_F_TRACED) != 0;
560 : }
561 :
562 : __clib_export uword
563 10 : clib_mem_trace_enable_disable (uword enable)
564 : {
565 10 : uword rv = !mheap_trace_thread_disable;
566 10 : mheap_trace_thread_disable = !enable;
567 10 : return rv;
568 : }
569 :
570 : __clib_export clib_mem_heap_t *
571 3654 : clib_mem_create_heap (void *base, uword size, int is_locked, char *fmt, ...)
572 : {
573 3654 : clib_mem_page_sz_t log2_page_sz = clib_mem_get_log2_page_size ();
574 : clib_mem_heap_t *h;
575 : char *name;
576 3654 : u8 *s = 0;
577 :
578 3654 : if (fmt == 0)
579 : {
580 0 : name = "";
581 : }
582 3654 : else if (strchr (fmt, '%'))
583 : {
584 : va_list va;
585 0 : va_start (va, fmt);
586 0 : s = va_format (0, fmt, &va);
587 0 : vec_add1 (s, 0);
588 0 : va_end (va);
589 0 : name = (char *) s;
590 : }
591 : else
592 3654 : name = fmt;
593 :
594 3654 : h = clib_mem_create_heap_internal (base, size, log2_page_sz, is_locked,
595 : name);
596 3654 : vec_free (s);
597 3654 : return h;
598 : }
599 :
600 : __clib_export void
601 914 : clib_mem_destroy_heap (clib_mem_heap_t * h)
602 : {
603 914 : mheap_trace_main_t *tm = &mheap_trace_main;
604 :
605 914 : if (h->mspace == tm->current_traced_mheap)
606 0 : mheap_trace (h, 0);
607 :
608 914 : destroy_mspace (h->mspace);
609 914 : if (h->flags & CLIB_MEM_HEAP_F_UNMAP_ON_DESTROY)
610 811 : clib_mem_vm_unmap (h->base);
611 914 : }
612 :
613 : __clib_export __clib_flatten uword
614 211 : clib_mem_get_heap_free_space (clib_mem_heap_t *h)
615 : {
616 211 : struct dlmallinfo dlminfo = mspace_mallinfo (h->mspace);
617 211 : return dlminfo.fordblks;
618 : }
619 :
620 : __clib_export __clib_flatten void *
621 253658 : clib_mem_get_heap_base (clib_mem_heap_t *h)
622 : {
623 253658 : return h->base;
624 : }
625 :
626 : __clib_export __clib_flatten uword
627 24 : clib_mem_get_heap_size (clib_mem_heap_t *heap)
628 : {
629 24 : return heap->size;
630 : }
631 :
632 : /* Memory allocator which may call os_out_of_memory() if it fails */
633 : static inline void *
634 114479000 : clib_mem_heap_alloc_inline (void *heap, uword size, uword align,
635 : int os_out_of_memory_on_failure)
636 : {
637 114479000 : clib_mem_heap_t *h = heap ? heap : clib_mem_get_per_cpu_heap ();
638 : void *p;
639 :
640 114479000 : align = clib_max (CLIB_MEM_MIN_ALIGN, align);
641 :
642 114479000 : p = mspace_memalign (h->mspace, align, size);
643 :
644 114481000 : if (PREDICT_FALSE (0 == p))
645 : {
646 0 : if (os_out_of_memory_on_failure)
647 0 : os_out_of_memory ();
648 0 : return 0;
649 : }
650 :
651 114481000 : if (PREDICT_FALSE (h->flags & CLIB_MEM_HEAP_F_TRACED))
652 146 : mheap_get_trace_internal (h, pointer_to_uword (p), clib_mem_size (p));
653 :
654 114481000 : clib_mem_unpoison (p, size);
655 114481000 : return p;
656 : }
657 :
658 : /* Memory allocator which calls os_out_of_memory() when it fails */
659 : __clib_export __clib_flatten void *
660 47509300 : clib_mem_alloc (uword size)
661 : {
662 47509300 : return clib_mem_heap_alloc_inline (0, size, CLIB_MEM_MIN_ALIGN,
663 : /* os_out_of_memory */ 1);
664 : }
665 :
666 : __clib_export __clib_flatten void *
667 919204 : clib_mem_alloc_aligned (uword size, uword align)
668 : {
669 919204 : return clib_mem_heap_alloc_inline (0, size, align,
670 : /* os_out_of_memory */ 1);
671 : }
672 :
673 : /* Memory allocator which calls os_out_of_memory() when it fails */
674 : __clib_export __clib_flatten void *
675 0 : clib_mem_alloc_or_null (uword size)
676 : {
677 0 : return clib_mem_heap_alloc_inline (0, size, CLIB_MEM_MIN_ALIGN,
678 : /* os_out_of_memory */ 0);
679 : }
680 :
681 : __clib_export __clib_flatten void *
682 0 : clib_mem_alloc_aligned_or_null (uword size, uword align)
683 : {
684 0 : return clib_mem_heap_alloc_inline (0, size, align,
685 : /* os_out_of_memory */ 0);
686 : }
687 :
688 : __clib_export __clib_flatten void *
689 0 : clib_mem_heap_alloc (void *heap, uword size)
690 : {
691 0 : return clib_mem_heap_alloc_inline (heap, size, CLIB_MEM_MIN_ALIGN,
692 : /* os_out_of_memory */ 1);
693 : }
694 :
695 : __clib_export __clib_flatten void *
696 51168800 : clib_mem_heap_alloc_aligned (void *heap, uword size, uword align)
697 : {
698 51168800 : return clib_mem_heap_alloc_inline (heap, size, align,
699 : /* os_out_of_memory */ 1);
700 : }
701 :
702 : __clib_export __clib_flatten void *
703 0 : clib_mem_heap_alloc_or_null (void *heap, uword size)
704 : {
705 0 : return clib_mem_heap_alloc_inline (heap, size, CLIB_MEM_MIN_ALIGN,
706 : /* os_out_of_memory */ 0);
707 : }
708 :
709 : __clib_export __clib_flatten void *
710 0 : clib_mem_heap_alloc_aligned_or_null (void *heap, uword size, uword align)
711 : {
712 0 : return clib_mem_heap_alloc_inline (heap, size, align,
713 : /* os_out_of_memory */ 0);
714 : }
715 :
716 : __clib_export __clib_flatten void *
717 24644100 : clib_mem_heap_realloc_aligned (void *heap, void *p, uword new_size,
718 : uword align)
719 : {
720 : uword old_alloc_size;
721 24644100 : clib_mem_heap_t *h = heap ? heap : clib_mem_get_per_cpu_heap ();
722 : void *new;
723 :
724 24644000 : ASSERT (count_set_bits (align) == 1);
725 :
726 24644000 : old_alloc_size = p ? mspace_usable_size (p) : 0;
727 :
728 24643900 : if (new_size == old_alloc_size)
729 0 : return p;
730 :
731 73933300 : if (p && pointer_is_aligned (p, align) &&
732 24643900 : mspace_realloc_in_place (h->mspace, p, new_size))
733 : {
734 9763040 : clib_mem_unpoison (p, new_size);
735 9763040 : if (PREDICT_FALSE (h->flags & CLIB_MEM_HEAP_F_TRACED))
736 : {
737 2 : mheap_put_trace_internal (h, pointer_to_uword (p), old_alloc_size);
738 4 : mheap_get_trace_internal (h, pointer_to_uword (p),
739 : clib_mem_size (p));
740 : }
741 : }
742 : else
743 : {
744 14882400 : new = clib_mem_heap_alloc_inline (h, new_size, align, 1);
745 :
746 14882400 : clib_mem_unpoison (new, new_size);
747 14882400 : if (old_alloc_size)
748 : {
749 14882400 : clib_mem_unpoison (p, old_alloc_size);
750 14882400 : clib_memcpy_fast (new, p, clib_min (new_size, old_alloc_size));
751 14882300 : clib_mem_heap_free (h, p);
752 : }
753 14882400 : p = new;
754 : }
755 :
756 24645500 : return p;
757 : }
758 :
759 : __clib_export __clib_flatten void *
760 0 : clib_mem_heap_realloc (void *heap, void *p, uword new_size)
761 : {
762 0 : return clib_mem_heap_realloc_aligned (heap, p, new_size, CLIB_MEM_MIN_ALIGN);
763 : }
764 :
765 : __clib_export __clib_flatten void *
766 0 : clib_mem_realloc_aligned (void *p, uword new_size, uword align)
767 : {
768 0 : return clib_mem_heap_realloc_aligned (0, p, new_size, align);
769 : }
770 :
771 : __clib_export __clib_flatten void *
772 6889290 : clib_mem_realloc (void *p, uword new_size)
773 : {
774 6889290 : return clib_mem_heap_realloc_aligned (0, p, new_size, CLIB_MEM_MIN_ALIGN);
775 : }
776 :
777 : __clib_export __clib_flatten uword
778 66137500 : clib_mem_heap_is_heap_object (void *heap, void *p)
779 : {
780 66137500 : clib_mem_heap_t *h = heap ? heap : clib_mem_get_per_cpu_heap ();
781 66137500 : return mspace_is_heap_object (h->mspace, p);
782 : }
783 :
784 : __clib_export __clib_flatten uword
785 0 : clib_mem_is_heap_object (void *p)
786 : {
787 0 : return clib_mem_heap_is_heap_object (0, p);
788 : }
789 :
790 : __clib_export __clib_flatten void
791 65974500 : clib_mem_heap_free (void *heap, void *p)
792 : {
793 65974500 : clib_mem_heap_t *h = heap ? heap : clib_mem_get_per_cpu_heap ();
794 65974500 : uword size = clib_mem_size (p);
795 :
796 : /* Make sure object is in the correct heap. */
797 65974500 : ASSERT (clib_mem_heap_is_heap_object (h, p));
798 :
799 65974500 : if (PREDICT_FALSE (h->flags & CLIB_MEM_HEAP_F_TRACED))
800 58 : mheap_put_trace_internal (h, pointer_to_uword (p), size);
801 65974500 : clib_mem_poison (p, clib_mem_size (p));
802 :
803 65974400 : mspace_free (h->mspace, p);
804 65974700 : }
805 :
806 : __clib_export __clib_flatten void
807 24435500 : clib_mem_free (void *p)
808 : {
809 24435500 : clib_mem_heap_free (0, p);
810 24435500 : }
811 :
812 : __clib_export __clib_flatten uword
813 1792740000 : clib_mem_size (void *p)
814 : {
815 1792740000 : return mspace_usable_size (p);
816 : }
817 :
818 : __clib_export void
819 268002 : clib_mem_free_s (void *p)
820 : {
821 268002 : uword size = clib_mem_size (p);
822 268002 : clib_mem_unpoison (p, size);
823 268002 : memset_s_inline (p, size, 0, size);
824 268002 : clib_mem_free (p);
825 268002 : }
|