LCOV - code coverage report
Current view: top level - vppinfra - valloc.c (source / functions) Hit Total Coverage
Test: coverage-filtered.info Lines: 0 147 0.0 %
Date: 2023-07-05 22:20:52 Functions: 0 5 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2018 Cisco and/or its affiliates.
       3             :  * Licensed under the Apache License, Version 2.0 (the "License");
       4             :  * you may not use this file except in compliance with the License.
       5             :  * You may obtain a copy of the License at:
       6             :  *
       7             :  *     http://www.apache.org/licenses/LICENSE-2.0
       8             :  *
       9             :  * Unless required by applicable law or agreed to in writing, software
      10             :  * distributed under the License is distributed on an "AS IS" BASIS,
      11             :  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
      12             :  * See the License for the specific language governing permissions and
      13             :  * limitations under the License.
      14             :  */
      15             : 
      16             : #include <vppinfra/valloc.h>
      17             : 
      18             : /** @file
      19             :     @brief Simple first-fit virtual space allocator
      20             : */
      21             : 
      22             : /** Add a chunk of memory to a virtual allocation arena
      23             :     @param vam - clib_valloc_main_t * pointer to the allocation arena
      24             :     @param template - clib_valloc_chunk_t * pointer to a template chunk which
      25             :     describes the virtual address range to add
      26             : 
      27             :     @note only the baseva and size member of the template chunk are significant
      28             :     It's perfectly OK for the new chunk to be discontinuous with previous
      29             :     chunks, the chunk fusion algorithm won't merge them.
      30             :  */
      31             : 
      32             : __clib_export void
      33           0 : clib_valloc_add_chunk (clib_valloc_main_t *vam, clib_valloc_chunk_t *template)
      34             : {
      35             :   clib_valloc_chunk_t *ch, *new_ch;
      36             :   u32 index;
      37             : 
      38           0 :   ASSERT (vam->flags & CLIB_VALLOC_INITIALIZED);
      39             : 
      40           0 :   clib_spinlock_lock_if_init (&vam->lock);
      41             : 
      42             :   /* Add at the beginning, or at the end... */
      43           0 :   index = vam->first_index;
      44             : 
      45             :   /*
      46             :    * Make sure we're not trying to add an overlapping chunk..
      47             :    * It's worth checking, because someone will eventually do that.
      48             :    */
      49           0 :   if (CLIB_DEBUG > 0 && index != ~0)
      50             :     {
      51           0 :       while (index != ~0)
      52             :         {
      53           0 :           ch = pool_elt_at_index (vam->chunks, index);
      54           0 :           ASSERT (template->baseva < ch->baseva || template->baseva >=
      55             :                   (ch->baseva + ch->size));
      56           0 :           ASSERT (template->baseva + template->size < ch->baseva ||
      57             :                   template->baseva + template->size >=
      58             :                   (ch->baseva + ch->size));
      59           0 :           index = ch->next;
      60             :         }
      61           0 :       index = vam->first_index;
      62             :     }
      63             : 
      64           0 :   if (index != ~0)
      65           0 :     ch = pool_elt_at_index (vam->chunks, index);
      66             : 
      67           0 :   if (index == ~0 || template->baseva < ch->baseva)
      68             :     {
      69           0 :       pool_get (vam->chunks, new_ch);
      70           0 :       clib_memset (new_ch, 0, sizeof (*new_ch));
      71             : 
      72           0 :       if (index != ~0)
      73             :         {
      74           0 :           ch = pool_elt_at_index (vam->chunks, index);
      75             : 
      76           0 :           new_ch->next = index;
      77           0 :           new_ch->prev = ~0;
      78           0 :           ch->prev = new_ch - vam->chunks;
      79             :         }
      80             :       else
      81             :         {
      82           0 :           new_ch->next = new_ch->prev = ~0;
      83             :         }
      84             : 
      85           0 :       new_ch->baseva = template->baseva;
      86           0 :       new_ch->size = template->size;
      87             : 
      88           0 :       vam->first_index = new_ch - vam->chunks;
      89             : 
      90           0 :       hash_set (vam->chunk_index_by_baseva, new_ch->baseva, vam->first_index);
      91             :     }
      92             :   else
      93             :     {
      94             :       /* Walk to the end of the chunk chain */
      95           0 :       while (index != ~0)
      96             :         {
      97           0 :           ch = pool_elt_at_index (vam->chunks, index);
      98           0 :           index = ch->next;
      99             :         }
     100             :       /* we want the last chunk index */
     101           0 :       index = ch - vam->chunks;
     102             : 
     103           0 :       pool_get (vam->chunks, new_ch);
     104           0 :       clib_memset (new_ch, 0, sizeof (*new_ch));
     105             : 
     106           0 :       ch = pool_elt_at_index (vam->chunks, index);
     107             : 
     108           0 :       new_ch->next = ~0;
     109           0 :       new_ch->prev = index;
     110           0 :       ch->next = new_ch - vam->chunks;
     111             : 
     112           0 :       new_ch->baseva = template->baseva;
     113           0 :       new_ch->size = template->size;
     114             : 
     115           0 :       hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
     116             :                 new_ch - vam->chunks);
     117             :     }
     118             : 
     119           0 :   clib_spinlock_unlock_if_init (&vam->lock);
     120           0 : }
     121             : 
     122             : /** Initialize a virtual memory allocation arena
     123             :     @param vam - clib_valloc_main_t * pointer to the arena to initialize
     124             :     @param template - clib_valloc_chunk_t * pointer to a template chunk which
     125             :     describes the initial virtual address range
     126             : */
     127             : __clib_export void
     128           0 : clib_valloc_init (clib_valloc_main_t * vam, clib_valloc_chunk_t * template,
     129             :                   int need_lock)
     130             : {
     131           0 :   ASSERT (template && template->baseva && template->size);
     132           0 :   clib_memset (vam, 0, sizeof (*vam));
     133           0 :   if (need_lock)
     134           0 :     clib_spinlock_init (&vam->lock);
     135             : 
     136           0 :   vam->chunk_index_by_baseva = hash_create (0, sizeof (uword));
     137           0 :   vam->first_index = ~0;
     138           0 :   vam->flags |= CLIB_VALLOC_INITIALIZED;
     139             : 
     140           0 :   clib_valloc_add_chunk (vam, template);
     141           0 : }
     142             : 
     143             : /** Allocate virtual space
     144             :     @param vam - clib_valloc_main_t * pointer to the allocation arena
     145             :     @param size - u64 number of bytes to allocate
     146             :     @os_out_of_memory_on_failure - 1=> panic on allocation failure
     147             :     @return uword allocated space, 0=> failure
     148             : */
     149             : __clib_export uword
     150           0 : clib_valloc_alloc (clib_valloc_main_t * vam, uword size,
     151             :                    int os_out_of_memory_on_failure)
     152             : {
     153             :   clib_valloc_chunk_t *ch, *new_ch, *next_ch;
     154             :   u32 index;
     155             : 
     156           0 :   clib_spinlock_lock_if_init (&vam->lock);
     157             : 
     158           0 :   index = vam->first_index;
     159             : 
     160           0 :   while (index != ~0)
     161             :     {
     162           0 :       ch = pool_elt_at_index (vam->chunks, index);
     163             :       /* If the chunk is free... */
     164           0 :       if ((ch->flags & CLIB_VALLOC_BUSY) == 0)
     165             :         {
     166             :           /* Too small? */
     167           0 :           if (ch->size < size)
     168           0 :             goto next_chunk;
     169             :           /* Exact match? */
     170           0 :           if (ch->size == size)
     171             :             {
     172           0 :               ch->flags |= CLIB_VALLOC_BUSY;
     173           0 :               clib_spinlock_unlock_if_init (&vam->lock);
     174           0 :               return ch->baseva;
     175             :             }
     176             :           /*
     177             :            * The current free chunk is larger than necessary, split the block.
     178             :            */
     179           0 :           pool_get (vam->chunks, new_ch);
     180             :           /* ch might have just moved */
     181           0 :           ch = pool_elt_at_index (vam->chunks, index);
     182           0 :           clib_memset (new_ch, 0, sizeof (*new_ch));
     183           0 :           new_ch->next = new_ch->prev = ~0;
     184           0 :           new_ch->baseva = ch->baseva + size;
     185           0 :           new_ch->size = ch->size - size;
     186           0 :           ch->size = size;
     187             : 
     188             :           /* Insert into doubly-linked list */
     189           0 :           new_ch->next = ch->next;
     190           0 :           new_ch->prev = ch - vam->chunks;
     191             : 
     192           0 :           if (ch->next != ~0)
     193             :             {
     194           0 :               next_ch = pool_elt_at_index (vam->chunks, ch->next);
     195           0 :               next_ch->prev = new_ch - vam->chunks;
     196             :             }
     197           0 :           ch->next = new_ch - vam->chunks;
     198             : 
     199           0 :           hash_set (vam->chunk_index_by_baseva, new_ch->baseva,
     200             :                     new_ch - vam->chunks);
     201             : 
     202           0 :           ch->flags |= CLIB_VALLOC_BUSY;
     203           0 :           clib_spinlock_unlock_if_init (&vam->lock);
     204           0 :           return ch->baseva;
     205             :         }
     206             : 
     207           0 :     next_chunk:
     208           0 :       index = ch->next;
     209             :     }
     210           0 :   clib_spinlock_unlock_if_init (&vam->lock);
     211             : 
     212           0 :   if (os_out_of_memory_on_failure)
     213           0 :     os_out_of_memory ();
     214             : 
     215           0 :   return 0;
     216             : }
     217             : 
     218             : 
     219             : /** Free virtual space
     220             :     @param vam - clib_valloc_main_t * pointer to the allocation arena
     221             :     @param baseva - uword base virtual address of the returned space
     222             :     @return uword - size of the freed virtual chunk
     223             :     @note the size is returned since we know it / in case the caller
     224             :     doesn't memorize chunk sizes
     225             : */
     226             : __clib_export uword
     227           0 : clib_valloc_free (clib_valloc_main_t * vam, uword baseva)
     228             : {
     229             :   clib_valloc_chunk_t *ch, *prev_ch, *next_ch, *n2_ch;
     230             :   u32 index;
     231           0 :   uword return_size = 0;
     232             :   uword *p;
     233             : 
     234           0 :   clib_spinlock_lock_if_init (&vam->lock);
     235             : 
     236           0 :   p = hash_get (vam->chunk_index_by_baseva, baseva);
     237             : 
     238             :   /* Check even in production images */
     239           0 :   if (p == 0)
     240           0 :     os_panic ();
     241             : 
     242           0 :   index = p[0];
     243             : 
     244           0 :   ch = pool_elt_at_index (vam->chunks, index);
     245             : 
     246           0 :   return_size = ch->size;
     247             : 
     248           0 :   ASSERT (ch->baseva == baseva);
     249           0 :   ASSERT ((ch->flags & CLIB_VALLOC_BUSY) != 0);
     250             : 
     251           0 :   ch->flags &= ~CLIB_VALLOC_BUSY;
     252             : 
     253             :   /* combine with previous entry? */
     254           0 :   if (ch->prev != ~0)
     255             :     {
     256           0 :       prev_ch = pool_elt_at_index (vam->chunks, ch->prev);
     257             :       /*
     258             :        * Previous item must be free as well, and
     259             :        * tangent to this block.
     260             :        */
     261           0 :       if ((prev_ch->flags & CLIB_VALLOC_BUSY) == 0
     262           0 :           && ((prev_ch->baseva + prev_ch->size) == ch->baseva))
     263             :         {
     264           0 :           hash_unset (vam->chunk_index_by_baseva, baseva);
     265           0 :           prev_ch->size += ch->size;
     266           0 :           prev_ch->next = ch->next;
     267           0 :           if (ch->next != ~0)
     268             :             {
     269           0 :               next_ch = pool_elt_at_index (vam->chunks, ch->next);
     270           0 :               next_ch->prev = ch->prev;
     271             :             }
     272           0 :           ASSERT (ch - vam->chunks != vam->first_index);
     273           0 :           clib_memset (ch, 0xfe, sizeof (*ch));
     274           0 :           pool_put (vam->chunks, ch);
     275             :           /* See about combining with next elt */
     276           0 :           ch = prev_ch;
     277             :         }
     278             :     }
     279             : 
     280             :   /* Combine with next entry? */
     281           0 :   if (ch->next != ~0)
     282             :     {
     283           0 :       next_ch = pool_elt_at_index (vam->chunks, ch->next);
     284             : 
     285           0 :       if ((next_ch->flags & CLIB_VALLOC_BUSY) == 0
     286           0 :           && ((ch->baseva + ch->size) == next_ch->baseva))
     287             :         {
     288           0 :           hash_unset (vam->chunk_index_by_baseva, next_ch->baseva);
     289           0 :           ch->size += next_ch->size;
     290           0 :           ch->next = next_ch->next;
     291           0 :           if (ch->next != ~0)
     292             :             {
     293           0 :               n2_ch = pool_elt_at_index (vam->chunks, next_ch->next);
     294           0 :               n2_ch->prev = ch - vam->chunks;
     295             :             }
     296           0 :           ASSERT (next_ch - vam->chunks != vam->first_index);
     297           0 :           clib_memset (next_ch, 0xfe, sizeof (*ch));
     298           0 :           pool_put (vam->chunks, next_ch);
     299             :         }
     300             :     }
     301             : 
     302           0 :   clib_spinlock_unlock_if_init (&vam->lock);
     303           0 :   return return_size;
     304             : }
     305             : 
     306             : /** format a virtual allocation arena (varargs)
     307             :     @param vam - clib_valloc_main_t pointer to the arena to format
     308             :     @param verbose - int - verbosity level
     309             :     @return u8 vector
     310             : */
     311             : __clib_export u8 *
     312           0 : format_valloc (u8 *s, va_list *va)
     313             : {
     314           0 :   clib_valloc_main_t *vam = va_arg (*va, clib_valloc_main_t *);
     315           0 :   int verbose = va_arg (*va, int);
     316             :   clib_valloc_chunk_t *ch;
     317             :   u32 index;
     318             :   uword *p;
     319             : 
     320           0 :   clib_spinlock_lock_if_init (&vam->lock);
     321             : 
     322           0 :   s = format (s, "%d chunks, first index %d\n",
     323           0 :               pool_elts (vam->chunks), vam->first_index);
     324             : 
     325           0 :   if (verbose)
     326             :     {
     327           0 :       index = vam->first_index;
     328           0 :       while (index != ~0)
     329             :         {
     330           0 :           ch = pool_elt_at_index (vam->chunks, index);
     331             : 
     332           0 :           s = format (s, "[%d] base %llx size %llx (%lld) prev %d %s\n",
     333             :                       index, ch->baseva, ch->size, ch->size, ch->prev,
     334           0 :                       (ch->flags & CLIB_VALLOC_BUSY) ? "busy" : "free");
     335             : 
     336           0 :           p = hash_get (vam->chunk_index_by_baseva, ch->baseva);
     337           0 :           if (p == 0)
     338             :             {
     339           0 :               s = format (s, "   BUG: baseva not in hash table!\n");
     340             :             }
     341           0 :           else if (p[0] != index)
     342             :             {
     343           0 :               s = format (s, "   BUG: baseva in hash table %d not %d!\n",
     344             :                           p[0], index);
     345             :             }
     346           0 :           index = ch->next;
     347             :         }
     348             :     }
     349             : 
     350           0 :   clib_spinlock_unlock_if_init (&vam->lock);
     351             : 
     352           0 :   return s;
     353             : }
     354             : 
     355             : /*
     356             :  * fd.io coding-style-patch-verification: ON
     357             :  *
     358             :  * Local Variables:
     359             :  * eval: (c-set-style "gnu")
     360             :  * End:
     361             :  */

Generated by: LCOV version 1.14