LCOV - code coverage report
Current view: top level - vppinfra - pmalloc.c (source / functions) Hit Total Coverage
Test: coverage-filtered.info Lines: 87 340 25.6 %
Date: 2023-10-26 01:39:38 Functions: 5 16 31.2 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2018 Cisco and/or its affiliates.
       3             :  * Licensed under the Apache License, Version 2.0 (the "License");
       4             :  * you may not use this file except in compliance with the License.
       5             :  * You may obtain a copy of the License at:
       6             :  *
       7             :  *     http://www.apache.org/licenses/LICENSE-2.0
       8             :  *
       9             :  * Unless required by applicable law or agreed to in writing, software
      10             :  * distributed under the License is distributed on an "AS IS" BASIS,
      11             :  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      12             :  * See the License for the specific language governing permissions and
      13             :  * limitations under the License.
      14             :  */
      15             : 
      16             : #define _GNU_SOURCE
      17             : #include <stdlib.h>
      18             : #include <sys/types.h>
      19             : #include <sys/stat.h>
      20             : #include <fcntl.h>
      21             : #include <unistd.h>
      22             : #include <sched.h>
      23             : 
      24             : #include <vppinfra/format.h>
      25             : #include <vppinfra/linux/sysfs.h>
      26             : #include <vppinfra/mem.h>
      27             : #include <vppinfra/hash.h>
      28             : #include <vppinfra/pmalloc.h>
      29             : #include <vppinfra/cpu.h>
      30             : 
      31             : #if __SIZEOF_POINTER__ >= 8
      32             : #define DEFAULT_RESERVED_MB 16384
      33             : #else
      34             : #define DEFAULT_RESERVED_MB 256
      35             : #endif
      36             : 
      37             : static inline clib_pmalloc_chunk_t *
      38           0 : get_chunk (clib_pmalloc_page_t * pp, u32 index)
      39             : {
      40           0 :   return pool_elt_at_index (pp->chunks, index);
      41             : }
      42             : 
      43             : static inline uword
      44        2300 : pmalloc_size2pages (uword size, u32 log2_page_sz)
      45             : {
      46        2300 :   return round_pow2 (size, 1ULL << log2_page_sz) >> log2_page_sz;
      47             : }
      48             : 
      49             : __clib_export int
      50         575 : clib_pmalloc_init (clib_pmalloc_main_t * pm, uword base_addr, uword size)
      51             : {
      52             :   uword base, pagesize;
      53         575 :   u64 *pt = 0;
      54             : 
      55         575 :   ASSERT (pm->error == 0);
      56             : 
      57         575 :   pagesize = clib_mem_get_default_hugepage_size ();
      58         575 :   pm->def_log2_page_sz = min_log2 (pagesize);
      59         575 :   pm->lookup_log2_page_sz = pm->def_log2_page_sz;
      60             : 
      61             :   /* check if pagemap is accessible */
      62         575 :   pt = clib_mem_vm_get_paddr (&pt, CLIB_MEM_PAGE_SZ_DEFAULT, 1);
      63         575 :   if (pt == 0 || pt[0] == 0)
      64           0 :     pm->flags |= CLIB_PMALLOC_F_NO_PAGEMAP;
      65             : 
      66         575 :   size = size ? size : ((u64) DEFAULT_RESERVED_MB) << 20;
      67         575 :   size = round_pow2 (size, pagesize);
      68             : 
      69         575 :   pm->max_pages = size >> pm->def_log2_page_sz;
      70             : 
      71         575 :   base = clib_mem_vm_reserve (base_addr, size, pm->def_log2_page_sz);
      72             : 
      73         575 :   if (base == ~0)
      74             :     {
      75           0 :       pm->error = clib_error_return (0, "failed to reserve %u pages",
      76             :                                      pm->max_pages);
      77           0 :       return -1;
      78             :     }
      79             : 
      80         575 :   pm->base = uword_to_pointer (base, void *);
      81         575 :   return 0;
      82             : }
      83             : 
      84             : static inline void *
      85           0 : alloc_chunk_from_page (clib_pmalloc_main_t * pm, clib_pmalloc_page_t * pp,
      86             :                        u32 n_blocks, u32 block_align, u32 numa_node)
      87             : {
      88           0 :   clib_pmalloc_chunk_t *c = 0;
      89             :   clib_pmalloc_arena_t *a;
      90             :   void *va;
      91             :   u32 off;
      92             :   u32 alloc_chunk_index;
      93             : 
      94           0 :   a = pool_elt_at_index (pm->arenas, pp->arena_index);
      95             : 
      96           0 :   if (pp->chunks == 0)
      97             :     {
      98           0 :       u32 i, start = 0, prev = ~0;
      99             : 
     100           0 :       for (i = 0; i < a->subpages_per_page; i++)
     101             :         {
     102           0 :           pool_get (pp->chunks, c);
     103           0 :           c->start = start;
     104           0 :           c->prev = prev;
     105           0 :           c->size = pp->n_free_blocks / a->subpages_per_page;
     106           0 :           start += c->size;
     107           0 :           if (prev == ~0)
     108           0 :             pp->first_chunk_index = c - pp->chunks;
     109             :           else
     110           0 :             pp->chunks[prev].next = c - pp->chunks;
     111           0 :           prev = c - pp->chunks;
     112             :         }
     113           0 :       c->next = ~0;
     114           0 :       pp->n_free_chunks = a->subpages_per_page;
     115             :     }
     116             : 
     117           0 :   if (pp->n_free_blocks < n_blocks)
     118           0 :     return 0;
     119             : 
     120           0 :   alloc_chunk_index = pp->first_chunk_index;
     121             : 
     122           0 : next_chunk:
     123           0 :   c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
     124           0 :   off = (block_align - (c->start & (block_align - 1))) & (block_align - 1);
     125             : 
     126           0 :   if (c->used || n_blocks + off > c->size)
     127             :     {
     128           0 :       if (c->next == ~0)
     129           0 :         return 0;
     130           0 :       alloc_chunk_index = c->next;
     131           0 :       goto next_chunk;
     132             :     }
     133             : 
     134             :   /* if alignment is needed create new empty chunk */
     135           0 :   if (off)
     136             :     {
     137             :       u32 offset_chunk_index;
     138             :       clib_pmalloc_chunk_t *co;
     139           0 :       pool_get (pp->chunks, c);
     140           0 :       pp->n_free_chunks++;
     141           0 :       offset_chunk_index = alloc_chunk_index;
     142           0 :       alloc_chunk_index = c - pp->chunks;
     143             : 
     144           0 :       co = pool_elt_at_index (pp->chunks, offset_chunk_index);
     145           0 :       c->size = co->size - off;
     146           0 :       c->next = co->next;
     147           0 :       c->start = co->start + off;
     148           0 :       c->prev = offset_chunk_index;
     149           0 :       co->size = off;
     150           0 :       co->next = alloc_chunk_index;
     151             :     }
     152             : 
     153           0 :   c->used = 1;
     154           0 :   if (c->size > n_blocks)
     155             :     {
     156             :       u32 tail_chunk_index;
     157             :       clib_pmalloc_chunk_t *ct;
     158           0 :       pool_get (pp->chunks, ct);
     159           0 :       pp->n_free_chunks++;
     160           0 :       tail_chunk_index = ct - pp->chunks;
     161           0 :       c = pool_elt_at_index (pp->chunks, alloc_chunk_index);
     162           0 :       ct->size = c->size - n_blocks;
     163           0 :       ct->next = c->next;
     164           0 :       ct->prev = alloc_chunk_index;
     165           0 :       ct->start = c->start + n_blocks;
     166             : 
     167           0 :       c->size = n_blocks;
     168           0 :       c->next = tail_chunk_index;
     169           0 :       if (ct->next != ~0)
     170           0 :         pool_elt_at_index (pp->chunks, ct->next)->prev = tail_chunk_index;
     171             :     }
     172           0 :   else if (c->next != ~0)
     173           0 :     pool_elt_at_index (pp->chunks, c->next)->prev = alloc_chunk_index;
     174             : 
     175           0 :   c = get_chunk (pp, alloc_chunk_index);
     176           0 :   va = pm->base + ((pp - pm->pages) << pm->def_log2_page_sz) +
     177           0 :     (c->start << PMALLOC_LOG2_BLOCK_SZ);
     178           0 :   hash_set (pm->chunk_index_by_va, pointer_to_uword (va), alloc_chunk_index);
     179           0 :   pp->n_free_blocks -= n_blocks;
     180           0 :   pp->n_free_chunks--;
     181           0 :   return va;
     182             : }
     183             : 
     184             : static void
     185         575 : pmalloc_update_lookup_table (clib_pmalloc_main_t * pm, u32 first, u32 count)
     186             : {
     187             :   uword seek, va, pa, p;
     188             :   int fd;
     189         575 :   u32 elts_per_page = 1U << (pm->def_log2_page_sz - pm->lookup_log2_page_sz);
     190             : 
     191         575 :   vec_validate_aligned (pm->lookup_table, vec_len (pm->pages) *
     192             :                         elts_per_page - 1, CLIB_CACHE_LINE_BYTES);
     193             : 
     194         575 :   p = (uword) first *elts_per_page;
     195         575 :   if (pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP)
     196             :     {
     197           0 :       while (p < (uword) elts_per_page * count)
     198             :         {
     199           0 :           pm->lookup_table[p] = pointer_to_uword (pm->base) +
     200           0 :             (p << pm->lookup_log2_page_sz);
     201           0 :           p++;
     202             :         }
     203           0 :       return;
     204             :     }
     205             : 
     206         575 :   fd = open ((char *) "/proc/self/pagemap", O_RDONLY);
     207     4710980 :   while (p < (uword) elts_per_page * count)
     208             :     {
     209     4710400 :       va = pointer_to_uword (pm->base) + (p << pm->lookup_log2_page_sz);
     210     4710400 :       pa = 0;
     211     4710400 :       seek = (va >> clib_mem_get_log2_page_size ()) * sizeof (pa);
     212     9420800 :       if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
     213     9420800 :           read (fd, &pa, sizeof (pa)) == (sizeof (pa)) &&
     214             :           pa & (1ULL << 63) /* page present bit */ )
     215             :         {
     216     4710400 :           pa = (pa & pow2_mask (55)) << clib_mem_get_log2_page_size ();
     217             :         }
     218     4710400 :       pm->lookup_table[p] = va - pa;
     219     4710400 :       p++;
     220             :     }
     221             : 
     222         575 :   if (fd != -1)
     223         575 :     close (fd);
     224             : }
     225             : 
     226             : static inline clib_pmalloc_page_t *
     227         575 : pmalloc_map_pages (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
     228             :                    u32 numa_node, u32 n_pages)
     229             : {
     230         575 :   clib_mem_page_stats_t stats = {};
     231         575 :   clib_pmalloc_page_t *pp = 0;
     232             :   int rv, i, mmap_flags;
     233         575 :   void *va = MAP_FAILED;
     234         575 :   uword size = (uword) n_pages << pm->def_log2_page_sz;
     235             : 
     236         575 :   clib_error_free (pm->error);
     237             : 
     238         575 :   if (pm->max_pages <= vec_len (pm->pages))
     239             :     {
     240           0 :       pm->error = clib_error_return (0, "maximum number of pages reached");
     241           0 :       return 0;
     242             :     }
     243             : 
     244         575 :   if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
     245             :     {
     246           0 :       pm->error = clib_sysfs_prealloc_hugepages (numa_node,
     247           0 :                                                  a->log2_subpage_sz, n_pages);
     248             : 
     249           0 :       if (pm->error)
     250           0 :         return 0;
     251             :     }
     252             : 
     253         575 :   rv = clib_mem_set_numa_affinity (numa_node, /* force */ 1);
     254         575 :   if (rv == CLIB_MEM_ERROR && numa_node != 0)
     255             :     {
     256           0 :       pm->error = clib_error_return_unix (0, "failed to set mempolicy for "
     257             :                                           "numa node %u", numa_node);
     258           0 :       return 0;
     259             :     }
     260             : 
     261         575 :   mmap_flags = MAP_FIXED;
     262             : 
     263         575 :   if (a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM)
     264             :     {
     265         575 :       mmap_flags |= MAP_SHARED;
     266         575 :       a->fd = clib_mem_vm_create_fd (a->log2_subpage_sz, "%s", a->name);
     267         575 :       if (a->fd == -1)
     268           0 :         goto error;
     269         575 :       if ((ftruncate (a->fd, size)) == -1)
     270           0 :         goto error;
     271             :     }
     272             :   else
     273             :     {
     274           0 :       if (a->log2_subpage_sz != clib_mem_get_log2_page_size ())
     275           0 :         mmap_flags |= MAP_HUGETLB;
     276             : 
     277           0 :       mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
     278           0 :       a->fd = -1;
     279             :     }
     280             : 
     281         575 :   va = pm->base + (((uword) vec_len (pm->pages)) << pm->def_log2_page_sz);
     282         575 :   if (mmap (va, size, PROT_READ | PROT_WRITE, mmap_flags, a->fd, 0) ==
     283             :       MAP_FAILED)
     284             :     {
     285           0 :       pm->error = clib_error_return_unix (0, "failed to mmap %u pages at %p "
     286             :                                           "fd %d numa %d flags 0x%x", n_pages,
     287             :                                           va, a->fd, numa_node, mmap_flags);
     288           0 :       va = MAP_FAILED;
     289           0 :       goto error;
     290             :     }
     291             : 
     292         575 :   if (a->log2_subpage_sz != clib_mem_get_log2_page_size () &&
     293           0 :       mlock (va, size) != 0)
     294             :     {
     295           0 :       pm->error = clib_error_return_unix (0, "Unable to lock pages");
     296           0 :       goto error;
     297             :     }
     298             : 
     299         575 :   clib_memset (va, 0, size);
     300             : 
     301         575 :   rv = clib_mem_set_default_numa_affinity ();
     302         575 :   if (rv == CLIB_MEM_ERROR && numa_node != 0)
     303             :     {
     304           0 :       pm->error = clib_error_return_unix (0, "failed to restore mempolicy");
     305           0 :       goto error;
     306             :     }
     307             : 
     308             :   /* we tolerate move_pages failure only if request os for numa node 0
     309             :      to support non-numa kernels */
     310         575 :   clib_mem_get_page_stats (va, CLIB_MEM_PAGE_SZ_DEFAULT, 1, &stats);
     311             : 
     312         575 :   if (stats.per_numa[numa_node] != 1 &&
     313           0 :       !(numa_node == 0 && stats.unknown == 1))
     314             :     {
     315           0 :       u16 allocated_at = ~0;
     316           0 :       if (stats.unknown)
     317           0 :         clib_error_return (0,
     318             :                            "unable to get information about numa allocation");
     319             : 
     320           0 :       for (u16 i = 0; i < CLIB_MAX_NUMAS; i++)
     321           0 :         if (stats.per_numa[i] == 1)
     322           0 :           allocated_at = i;
     323             : 
     324           0 :       clib_error_return (0,
     325             :                          "page allocated on the wrong numa node (%u), "
     326             :                          "expected %u",
     327             :                          allocated_at, numa_node);
     328             : 
     329           0 :       goto error;
     330             :     }
     331             : 
     332        9775 :   for (i = 0; i < n_pages; i++)
     333             :     {
     334        9200 :       vec_add2 (pm->pages, pp, 1);
     335        9200 :       pp->n_free_blocks = 1 << (pm->def_log2_page_sz - PMALLOC_LOG2_BLOCK_SZ);
     336        9200 :       pp->index = pp - pm->pages;
     337        9200 :       pp->arena_index = a->index;
     338        9200 :       vec_add1 (a->page_indices, pp->index);
     339        9200 :       a->n_pages++;
     340             :     }
     341             : 
     342             : 
     343             :   /* if new arena is using smaller page size, we need to rebuild whole
     344             :      lookup table */
     345         575 :   if (a->log2_subpage_sz < pm->lookup_log2_page_sz)
     346             :     {
     347         575 :       pm->lookup_log2_page_sz = a->log2_subpage_sz;
     348         575 :       pmalloc_update_lookup_table (pm, vec_len (pm->pages) - n_pages,
     349             :                                    n_pages);
     350             :     }
     351             :   else
     352           0 :     pmalloc_update_lookup_table (pm, 0, vec_len (pm->pages));
     353             : 
     354             :   /* return pointer to 1st page */
     355         575 :   return pp - (n_pages - 1);
     356             : 
     357           0 : error:
     358           0 :   if (va != MAP_FAILED)
     359             :     {
     360             :       /* unmap & reserve */
     361           0 :       munmap (va, size);
     362           0 :       mmap (va, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
     363             :             -1, 0);
     364             :     }
     365           0 :   if (a->fd != -1)
     366           0 :     close (a->fd);
     367           0 :   return 0;
     368             : }
     369             : 
     370             : __clib_export void *
     371        2300 : clib_pmalloc_create_shared_arena (clib_pmalloc_main_t * pm, char *name,
     372             :                                   uword size, u32 log2_page_sz, u32 numa_node)
     373             : {
     374             :   clib_pmalloc_arena_t *a;
     375             :   clib_pmalloc_page_t *pp;
     376             :   u32 n_pages;
     377             : 
     378        2300 :   clib_error_free (pm->error);
     379             : 
     380        2300 :   if (log2_page_sz == 0)
     381           0 :     log2_page_sz = pm->def_log2_page_sz;
     382        3450 :   else if (log2_page_sz != pm->def_log2_page_sz &&
     383        1150 :            log2_page_sz != clib_mem_get_log2_page_size ())
     384             :     {
     385           0 :       pm->error = clib_error_create ("unsupported page size (%uKB)",
     386             :                                      1 << (log2_page_sz - 10));
     387           0 :       return 0;
     388             :     }
     389             : 
     390        2300 :   n_pages = pmalloc_size2pages (size, pm->def_log2_page_sz);
     391             : 
     392        2300 :   if (n_pages + vec_len (pm->pages) > pm->max_pages)
     393        1725 :     return 0;
     394             : 
     395         575 :   if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
     396           0 :     numa_node = clib_get_current_numa_node ();
     397             : 
     398         575 :   pool_get (pm->arenas, a);
     399         575 :   a->index = a - pm->arenas;
     400         575 :   a->name = format (0, "%s%c", name, 0);
     401         575 :   a->numa_node = numa_node;
     402         575 :   a->flags = CLIB_PMALLOC_ARENA_F_SHARED_MEM;
     403         575 :   a->log2_subpage_sz = log2_page_sz;
     404         575 :   a->subpages_per_page = 1U << (pm->def_log2_page_sz - log2_page_sz);
     405             : 
     406         575 :   if ((pp = pmalloc_map_pages (pm, a, numa_node, n_pages)) == 0)
     407             :     {
     408           0 :       vec_free (a->name);
     409           0 :       memset (a, 0, sizeof (*a));
     410           0 :       pool_put (pm->arenas, a);
     411           0 :       return 0;
     412             :     }
     413             : 
     414         575 :   return pm->base + ((uword) pp->index << pm->def_log2_page_sz);
     415             : }
     416             : 
     417             : static inline void *
     418           0 : clib_pmalloc_alloc_inline (clib_pmalloc_main_t * pm, clib_pmalloc_arena_t * a,
     419             :                            uword size, uword align, u32 numa_node)
     420             : {
     421             :   clib_pmalloc_page_t *pp;
     422             :   u32 n_blocks, block_align, *page_index;
     423             : 
     424           0 :   ASSERT (is_pow2 (align));
     425             : 
     426           0 :   if (numa_node == CLIB_PMALLOC_NUMA_LOCAL)
     427           0 :     numa_node = clib_get_current_numa_node ();
     428             : 
     429           0 :   if (a == 0)
     430             :     {
     431           0 :       if (size > 1ULL << pm->def_log2_page_sz)
     432           0 :         return 0;
     433             : 
     434           0 :       vec_validate_init_empty (pm->default_arena_for_numa_node,
     435             :                                numa_node, ~0);
     436           0 :       if (pm->default_arena_for_numa_node[numa_node] == ~0)
     437             :         {
     438           0 :           pool_get (pm->arenas, a);
     439           0 :           pm->default_arena_for_numa_node[numa_node] = a - pm->arenas;
     440           0 :           a->name = format (0, "default-numa-%u%c", numa_node, 0);
     441           0 :           a->numa_node = numa_node;
     442           0 :           a->log2_subpage_sz = pm->def_log2_page_sz;
     443           0 :           a->subpages_per_page = 1;
     444             :         }
     445             :       else
     446           0 :         a = pool_elt_at_index (pm->arenas,
     447             :                                pm->default_arena_for_numa_node[numa_node]);
     448             :     }
     449           0 :   else if (size > 1ULL << a->log2_subpage_sz)
     450           0 :     return 0;
     451             : 
     452           0 :   n_blocks = round_pow2 (size, PMALLOC_BLOCK_SZ) / PMALLOC_BLOCK_SZ;
     453           0 :   block_align = align >> PMALLOC_LOG2_BLOCK_SZ;
     454             : 
     455           0 :   vec_foreach (page_index, a->page_indices)
     456             :   {
     457           0 :     pp = vec_elt_at_index (pm->pages, *page_index);
     458           0 :     void *rv = alloc_chunk_from_page (pm, pp, n_blocks, block_align,
     459             :                                       numa_node);
     460             : 
     461           0 :     if (rv)
     462           0 :       return rv;
     463             :   }
     464             : 
     465           0 :   if ((a->flags & CLIB_PMALLOC_ARENA_F_SHARED_MEM) == 0 &&
     466           0 :       (pp = pmalloc_map_pages (pm, a, numa_node, 1)))
     467           0 :     return alloc_chunk_from_page (pm, pp, n_blocks, block_align, numa_node);
     468             : 
     469           0 :   return 0;
     470             : }
     471             : 
     472             : __clib_export void *
     473           0 : clib_pmalloc_alloc_aligned_on_numa (clib_pmalloc_main_t * pm, uword size,
     474             :                                     uword align, u32 numa_node)
     475             : {
     476           0 :   return clib_pmalloc_alloc_inline (pm, 0, size, align, numa_node);
     477             : }
     478             : 
     479             : __clib_export void *
     480           0 : clib_pmalloc_alloc_aligned (clib_pmalloc_main_t *pm, uword size, uword align)
     481             : {
     482           0 :   return clib_pmalloc_alloc_inline (pm, 0, size, align,
     483             :                                     CLIB_PMALLOC_NUMA_LOCAL);
     484             : }
     485             : 
     486             : __clib_export void *
     487           0 : clib_pmalloc_alloc_from_arena (clib_pmalloc_main_t *pm, void *arena_va,
     488             :                                uword size, uword align)
     489             : {
     490           0 :   clib_pmalloc_arena_t *a = clib_pmalloc_get_arena (pm, arena_va);
     491           0 :   return clib_pmalloc_alloc_inline (pm, a, size, align, 0);
     492             : }
     493             : 
     494             : static inline int
     495           0 : pmalloc_chunks_mergeable (clib_pmalloc_arena_t * a, clib_pmalloc_page_t * pp,
     496             :                           u32 ci1, u32 ci2)
     497             : {
     498             :   clib_pmalloc_chunk_t *c1, *c2;
     499             : 
     500           0 :   if (ci1 == ~0 || ci2 == ~0)
     501           0 :     return 0;
     502             : 
     503           0 :   c1 = get_chunk (pp, ci1);
     504           0 :   c2 = get_chunk (pp, ci2);
     505             : 
     506           0 :   if (c1->used || c2->used)
     507           0 :     return 0;
     508             : 
     509           0 :   if (c1->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ) !=
     510           0 :       c2->start >> (a->log2_subpage_sz - PMALLOC_LOG2_BLOCK_SZ))
     511           0 :     return 0;
     512             : 
     513           0 :   return 1;
     514             : }
     515             : 
     516             : __clib_export void
     517           0 : clib_pmalloc_free (clib_pmalloc_main_t * pm, void *va)
     518             : {
     519             :   clib_pmalloc_page_t *pp;
     520             :   clib_pmalloc_chunk_t *c;
     521             :   clib_pmalloc_arena_t *a;
     522             :   uword *p;
     523             :   u32 chunk_index, page_index;
     524             : 
     525           0 :   p = hash_get (pm->chunk_index_by_va, pointer_to_uword (va));
     526             : 
     527           0 :   if (p == 0)
     528           0 :     os_panic ();
     529             : 
     530           0 :   chunk_index = p[0];
     531           0 :   page_index = clib_pmalloc_get_page_index (pm, va);
     532           0 :   hash_unset (pm->chunk_index_by_va, pointer_to_uword (va));
     533             : 
     534           0 :   pp = vec_elt_at_index (pm->pages, page_index);
     535           0 :   c = pool_elt_at_index (pp->chunks, chunk_index);
     536           0 :   a = pool_elt_at_index (pm->arenas, pp->arena_index);
     537           0 :   c->used = 0;
     538           0 :   pp->n_free_blocks += c->size;
     539           0 :   pp->n_free_chunks++;
     540             : 
     541             :   /* merge with next if free */
     542           0 :   if (pmalloc_chunks_mergeable (a, pp, chunk_index, c->next))
     543             :     {
     544           0 :       clib_pmalloc_chunk_t *next = get_chunk (pp, c->next);
     545           0 :       c->size += next->size;
     546           0 :       c->next = next->next;
     547           0 :       if (next->next != ~0)
     548           0 :         get_chunk (pp, next->next)->prev = chunk_index;
     549           0 :       memset (next, 0, sizeof (*next));
     550           0 :       pool_put (pp->chunks, next);
     551           0 :       pp->n_free_chunks--;
     552             :     }
     553             : 
     554             :   /* merge with prev if free */
     555           0 :   if (pmalloc_chunks_mergeable (a, pp, c->prev, chunk_index))
     556             :     {
     557           0 :       clib_pmalloc_chunk_t *prev = get_chunk (pp, c->prev);
     558           0 :       prev->size += c->size;
     559           0 :       prev->next = c->next;
     560           0 :       if (c->next != ~0)
     561           0 :         get_chunk (pp, c->next)->prev = c->prev;
     562           0 :       memset (c, 0, sizeof (*c));
     563           0 :       pool_put (pp->chunks, c);
     564           0 :       pp->n_free_chunks--;
     565             :     }
     566           0 : }
     567             : 
     568             : static u8 *
     569           0 : format_pmalloc_page (u8 * s, va_list * va)
     570             : {
     571           0 :   clib_pmalloc_page_t *pp = va_arg (*va, clib_pmalloc_page_t *);
     572           0 :   int verbose = va_arg (*va, int);
     573           0 :   u32 indent = format_get_indent (s);
     574             : 
     575           0 :   if (pp->chunks == 0)
     576           0 :     return s;
     577             : 
     578           0 :   s = format (s, "free %u chunks %u free-chunks %d ",
     579           0 :               (pp->n_free_blocks) << PMALLOC_LOG2_BLOCK_SZ,
     580           0 :               pool_elts (pp->chunks), pp->n_free_chunks);
     581             : 
     582           0 :   if (verbose >= 2)
     583             :     {
     584             :       clib_pmalloc_chunk_t *c;
     585           0 :       c = pool_elt_at_index (pp->chunks, pp->first_chunk_index);
     586           0 :       s = format (s, "\n%U%12s%12s%8s%8s%8s%8s",
     587             :                   format_white_space, indent + 2,
     588             :                   "chunk offset", "size", "used", "index", "prev", "next");
     589             :       while (1)
     590             :         {
     591           0 :           s = format (s, "\n%U%12u%12u%8s%8d%8d%8d",
     592             :                       format_white_space, indent + 2,
     593           0 :                       c->start << PMALLOC_LOG2_BLOCK_SZ,
     594           0 :                       c->size << PMALLOC_LOG2_BLOCK_SZ,
     595           0 :                       c->used ? "yes" : "no",
     596           0 :                       c - pp->chunks, c->prev, c->next);
     597           0 :           if (c->next == ~0)
     598           0 :             break;
     599           0 :           c = pool_elt_at_index (pp->chunks, c->next);
     600             :         }
     601             :     }
     602           0 :   return s;
     603             : }
     604             : 
     605             : __clib_export u8 *
     606           0 : format_pmalloc (u8 * s, va_list * va)
     607             : {
     608           0 :   clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
     609           0 :   int verbose = va_arg (*va, int);
     610           0 :   u32 indent = format_get_indent (s);
     611             : 
     612             :   clib_pmalloc_page_t *pp;
     613             :   clib_pmalloc_arena_t *a;
     614             : 
     615           0 :   s = format (s, "used-pages %u reserved-pages %u default-page-size %U "
     616           0 :               "lookup-page-size %U%s", vec_len (pm->pages), pm->max_pages,
     617           0 :               format_log2_page_size, pm->def_log2_page_sz,
     618             :               format_log2_page_size, pm->lookup_log2_page_sz,
     619           0 :               pm->flags & CLIB_PMALLOC_F_NO_PAGEMAP ? " no-pagemap" : "");
     620             : 
     621             : 
     622           0 :   if (verbose >= 2)
     623           0 :     s = format (s, " va-start %p", pm->base);
     624             : 
     625           0 :   if (pm->error)
     626           0 :     s = format (s, "\n%Ulast-error: %U", format_white_space, indent + 2,
     627             :                 format_clib_error, pm->error);
     628             : 
     629             : 
     630             :   /* *INDENT-OFF* */
     631           0 :   pool_foreach (a, pm->arenas)
     632             :     {
     633             :       u32 *page_index;
     634           0 :       s = format (s, "\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
     635             :                   format_white_space, indent + 2, a->name,
     636           0 :                   vec_len (a->page_indices), format_log2_page_size,
     637             :                   a->log2_subpage_sz, a->numa_node);
     638           0 :       if (a->fd != -1)
     639           0 :         s = format (s, " shared fd %d", a->fd);
     640           0 :       if (verbose >= 1)
     641           0 :         vec_foreach (page_index, a->page_indices)
     642             :           {
     643           0 :             pp = vec_elt_at_index (pm->pages, *page_index);
     644           0 :             s = format (s, "\n%U%U", format_white_space, indent + 4,
     645             :                         format_pmalloc_page, pp, verbose);
     646             :           }
     647             :     }
     648             :   /* *INDENT-ON* */
     649             : 
     650           0 :   return s;
     651             : }
     652             : 
     653             : __clib_export u8 *
     654           0 : format_pmalloc_map (u8 * s, va_list * va)
     655             : {
     656           0 :   clib_pmalloc_main_t *pm = va_arg (*va, clib_pmalloc_main_t *);
     657             : 
     658             :   u32 index;
     659           0 :   s = format (s, "%16s %13s %8s", "virtual-addr", "physical-addr", "size");
     660           0 :   vec_foreach_index (index, pm->lookup_table)
     661             :   {
     662             :     uword *lookup_val, pa, va;
     663           0 :     lookup_val = vec_elt_at_index (pm->lookup_table, index);
     664           0 :     va =
     665           0 :       pointer_to_uword (pm->base) +
     666           0 :       ((uword) index << pm->lookup_log2_page_sz);
     667           0 :     pa = va - *lookup_val;
     668             :     s =
     669           0 :       format (s, "\n %16p %13p %8U", uword_to_pointer (va, u64),
     670             :               uword_to_pointer (pa, u64), format_log2_page_size,
     671             :               pm->lookup_log2_page_sz);
     672             :   }
     673           0 :   return s;
     674             : }
     675             : 
     676             : /*
     677             :  * fd.io coding-style-patch-verification: ON
     678             :  *
     679             :  * Local Variables:
     680             :  * eval: (c-set-style "gnu")
     681             :  * End:
     682             :  */

Generated by: LCOV version 1.14