LCOV - code coverage report
Current view: top level - vppinfra - dlmalloc.c (source / functions) Hit Total Coverage
Test: coverage-filtered.info Lines: 478 1075 44.5 %
Date: 2023-07-05 22:20:52 Functions: 27 57 47.4 %

          Line data    Source code
       1             : /*
       2             :   This is a version (aka dlmalloc) of malloc/free/realloc written by
       3             :   Doug Lea and released to the public domain, as explained at
       4             :   http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
       5             :   comments, complaints, performance data, etc to dl@cs.oswego.edu
       6             : */
       7             : 
       8             : #include <vppinfra/clib.h>
       9             : #include <vppinfra/dlmalloc.h>
      10             : 
      11             : /*------------------------------ internal #includes ---------------------- */
      12             : 
      13             : #ifdef _MSC_VER
      14             : #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
      15             : #endif /* _MSC_VER */
      16             : #if !NO_MALLOC_STATS
      17             : #include <stdio.h>       /* for printing in malloc_stats */
      18             : #endif /* NO_MALLOC_STATS */
      19             : #ifndef LACKS_ERRNO_H
      20             : #include <errno.h>       /* for MALLOC_FAILURE_ACTION */
      21             : #endif /* LACKS_ERRNO_H */
      22             : #ifdef DEBUG
      23             : #if DLM_ABORT_ON_ASSERT_FAILURE
      24             : #undef assert
      25             : #define assert(x) if(!(x)) DLM_ABORT
      26             : #else /* DLM_ABORT_ON_ASSERT_FAILURE */
      27             : #include <assert.h>
      28             : #endif /* DLM_ABORT_ON_ASSERT_FAILURE */
      29             : #else  /* DEBUG */
      30             : #ifndef assert
      31             : #define assert(x)
      32             : #endif
      33             : #define DEBUG 0
      34             : #endif /* DEBUG */
      35             : #if !defined(WIN32) && !defined(LACKS_TIME_H)
      36             : #include <time.h>        /* for magic initialization */
      37             : #endif /* WIN32 */
      38             : #ifndef LACKS_STDLIB_H
      39             : #include <stdlib.h>      /* for abort() */
      40             : #endif /* LACKS_STDLIB_H */
      41             : #ifndef LACKS_STRING_H
      42             : #include <string.h>      /* for memset etc */
      43             : #endif  /* LACKS_STRING_H */
      44             : #if USE_BUILTIN_FFS
      45             : #ifndef LACKS_STRINGS_H
      46             : #include <strings.h>     /* for ffs */
      47             : #endif /* LACKS_STRINGS_H */
      48             : #endif /* USE_BUILTIN_FFS */
      49             : #if HAVE_MMAP
      50             : #ifndef LACKS_SYS_MMAN_H
      51             : /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
      52             : #if (defined(linux) && !defined(__USE_GNU))
      53             : #define __USE_GNU 1
      54             : #include <sys/mman.h>    /* for mmap */
      55             : #undef __USE_GNU
      56             : #else
      57             : #include <sys/mman.h>    /* for mmap */
      58             : #endif /* linux */
      59             : #endif /* LACKS_SYS_MMAN_H */
      60             : #ifndef LACKS_FCNTL_H
      61             : #include <fcntl.h>
      62             : #endif /* LACKS_FCNTL_H */
      63             : #endif /* HAVE_MMAP */
      64             : #ifndef LACKS_UNISTD_H
      65             : #include <unistd.h>     /* for sbrk, sysconf */
      66             : #else /* LACKS_UNISTD_H */
      67             : #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
      68             : extern void*     sbrk(ptrdiff_t);
      69             : #endif /* FreeBSD etc */
      70             : #endif /* LACKS_UNISTD_H */
      71             : 
      72             : /* Declarations for locking */
      73             : #if USE_LOCKS
      74             : #ifndef WIN32
      75             : #if defined (__SVR4) && defined (__sun)  /* solaris */
      76             : #include <thread.h>
      77             : #elif !defined(LACKS_SCHED_H)
      78             : #include <sched.h>
      79             : #endif /* solaris or LACKS_SCHED_H */
      80             : #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
      81             : #include <pthread.h>
      82             : #endif /* USE_RECURSIVE_LOCKS ... */
      83             : #elif defined(_MSC_VER)
      84             : #ifndef _M_AMD64
      85             : /* These are already defined on AMD64 builds */
      86             : #ifdef __cplusplus
      87             : extern "C" {
      88             : #endif /* __cplusplus */
      89             : LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
      90             : LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
      91             : #ifdef __cplusplus
      92             : }
      93             : #endif /* __cplusplus */
      94             : #endif /* _M_AMD64 */
      95             : #pragma intrinsic (_InterlockedCompareExchange)
      96             : #pragma intrinsic (_InterlockedExchange)
      97             : #define interlockedcompareexchange _InterlockedCompareExchange
      98             : #define interlockedexchange _InterlockedExchange
      99             : #elif defined(WIN32) && defined(__GNUC__)
     100             : #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
     101             : #define interlockedexchange __sync_lock_test_and_set
     102             : #endif /* Win32 */
     103             : #else /* USE_LOCKS */
     104             : #endif /* USE_LOCKS */
     105             : 
     106             : #ifndef LOCK_AT_FORK
     107             : #define LOCK_AT_FORK 0
     108             : #endif
     109             : 
     110             : /* Declarations for bit scanning on win32 */
     111             : #if defined(_MSC_VER) && _MSC_VER>=1300
     112             : #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
     113             : #ifdef __cplusplus
     114             : extern "C" {
     115             : #endif /* __cplusplus */
     116             : unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
     117             : unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
     118             : #ifdef __cplusplus
     119             : }
     120             : #endif /* __cplusplus */
     121             : 
     122             : #define BitScanForward _BitScanForward
     123             : #define BitScanReverse _BitScanReverse
     124             : #pragma intrinsic(_BitScanForward)
     125             : #pragma intrinsic(_BitScanReverse)
     126             : #endif /* BitScanForward */
     127             : #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
     128             : 
     129             : #ifndef WIN32
     130             : #ifndef malloc_getpagesize
     131             : #  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
     132             : #    ifndef _SC_PAGE_SIZE
     133             : #      define _SC_PAGE_SIZE _SC_PAGESIZE
     134             : #    endif
     135             : #  endif
     136             : #  ifdef _SC_PAGE_SIZE
     137             : #    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
     138             : #  else
     139             : #    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
     140             :        extern size_t getpagesize();
     141             : #      define malloc_getpagesize getpagesize()
     142             : #    else
     143             : #      ifdef WIN32 /* use supplied emulation of getpagesize */
     144             : #        define malloc_getpagesize getpagesize()
     145             : #      else
     146             : #        ifndef LACKS_SYS_PARAM_H
     147             : #          include <sys/param.h>
     148             : #        endif
     149             : #        ifdef EXEC_PAGESIZE
     150             : #          define malloc_getpagesize EXEC_PAGESIZE
     151             : #        else
     152             : #          ifdef NBPG
     153             : #            ifndef CLSIZE
     154             : #              define malloc_getpagesize NBPG
     155             : #            else
     156             : #              define malloc_getpagesize (NBPG * CLSIZE)
     157             : #            endif
     158             : #          else
     159             : #            ifdef NBPC
     160             : #              define malloc_getpagesize NBPC
     161             : #            else
     162             : #              ifdef PAGESIZE
     163             : #                define malloc_getpagesize PAGESIZE
     164             : #              else /* just guess */
     165             : #                define malloc_getpagesize ((size_t)4096U)
     166             : #              endif
     167             : #            endif
     168             : #          endif
     169             : #        endif
     170             : #      endif
     171             : #    endif
     172             : #  endif
     173             : #endif
     174             : #endif
     175             : 
     176             : /* ------------------- size_t and alignment properties -------------------- */
     177             : 
     178             : /* The byte and bit size of a size_t */
     179             : #define SIZE_T_SIZE         (sizeof(size_t))
     180             : #define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
     181             : 
     182             : /* Some constants coerced to size_t */
     183             : /* Annoying but necessary to avoid errors on some platforms */
     184             : #define SIZE_T_ZERO         ((size_t)0)
     185             : #define SIZE_T_ONE          ((size_t)1)
     186             : #define SIZE_T_TWO          ((size_t)2)
     187             : #define SIZE_T_FOUR         ((size_t)4)
     188             : #define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
     189             : #define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
     190             : #define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
     191             : #define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
     192             : 
     193             : /* The bit mask value corresponding to MALLOC_ALIGNMENT */
     194             : #define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
     195             : 
     196             : /* True if address a has acceptable alignment */
     197             : #define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
     198             : 
     199             : /* the number of bytes to offset an address to align it */
     200             : #define align_offset(A)\
     201             :  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
     202             :   ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
     203             : 
     204             : /* -------------------------- MMAP preliminaries ------------------------- */
     205             : 
     206             : /*
     207             :    If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
     208             :    checks to fail so compiler optimizer can delete code rather than
     209             :    using so many "#if"s.
     210             : */
     211             : 
     212             : 
     213             : /* MORECORE and MMAP must return MFAIL on failure */
     214             : #define MFAIL                ((void*)(MAX_SIZE_T))
     215             : #define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
     216             : 
     217             : #if HAVE_MMAP
     218             : 
     219             : #ifndef WIN32
     220             : #define MUNMAP_DEFAULT(a, s)  munmap((a), (s))
     221             : #define MMAP_PROT            (PROT_READ|PROT_WRITE)
     222             : #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
     223             : #define MAP_ANONYMOUS        MAP_ANON
     224             : #endif /* MAP_ANON */
     225             : #ifdef MAP_ANONYMOUS
     226             : #define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
     227             : #define MMAP_DEFAULT(s)       mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
     228             : #else /* MAP_ANONYMOUS */
     229             : /*
     230             :    Nearly all versions of mmap support MAP_ANONYMOUS, so the following
     231             :    is unlikely to be needed, but is supplied just in case.
     232             : */
     233             : #define MMAP_FLAGS           (MAP_PRIVATE)
     234             : static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
     235             : #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
     236             :            (dev_zero_fd = open("/dev/zero", O_RDWR), \
     237             :             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
     238             :             mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
     239             : #endif /* MAP_ANONYMOUS */
     240             : 
     241             : #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
     242             : 
     243             : #else /* WIN32 */
     244             : 
     245             : /* Win32 MMAP via VirtualAlloc */
     246             : static FORCEINLINE void* win32mmap(size_t size) {
     247             :   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
     248             :   return (ptr != 0)? ptr: MFAIL;
     249             : }
     250             : 
     251             : /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
     252             : static FORCEINLINE void* win32direct_mmap(size_t size) {
     253             :   void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
     254             :                            PAGE_READWRITE);
     255             :   return (ptr != 0)? ptr: MFAIL;
     256             : }
     257             : 
     258             : /* This function supports releasing coalesed segments */
     259             : static FORCEINLINE int win32munmap(void* ptr, size_t size) {
     260             :   MEMORY_BASIC_INFORMATION minfo;
     261             :   char* cptr = (char*)ptr;
     262             :   while (size) {
     263             :     if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
     264             :       return -1;
     265             :     if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
     266             :         minfo.State != MEM_COMMIT || minfo.RegionSize > size)
     267             :       return -1;
     268             :     if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
     269             :       return -1;
     270             :     cptr += minfo.RegionSize;
     271             :     size -= minfo.RegionSize;
     272             :   }
     273             :   return 0;
     274             : }
     275             : 
     276             : #define MMAP_DEFAULT(s)             win32mmap(s)
     277             : #define MUNMAP_DEFAULT(a, s)        win32munmap((a), (s))
     278             : #define DIRECT_MMAP_DEFAULT(s)      win32direct_mmap(s)
     279             : #endif /* WIN32 */
     280             : #endif /* HAVE_MMAP */
     281             : 
     282             : #if HAVE_MREMAP
     283             : #ifndef WIN32
     284             : #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
     285             : #endif /* WIN32 */
     286             : #endif /* HAVE_MREMAP */
     287             : 
     288             : /**
     289             :  * Define CALL_MORECORE
     290             :  */
     291             : #if HAVE_MORECORE
     292             :     #ifdef MORECORE
     293             :         #define CALL_MORECORE(S)    MORECORE(S)
     294             :     #else  /* MORECORE */
     295             :         #define CALL_MORECORE(S)    MORECORE_DEFAULT(S)
     296             :     #endif /* MORECORE */
     297             : #else  /* HAVE_MORECORE */
     298             :     #define CALL_MORECORE(S)        MFAIL
     299             : #endif /* HAVE_MORECORE */
     300             : 
     301             : /**
     302             :  * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
     303             :  */
     304             : #if HAVE_MMAP
     305             :     #define USE_MMAP_BIT            (SIZE_T_ONE)
     306             : 
     307             :     #ifdef MMAP
     308             :         #define CALL_MMAP(s)        MMAP(s)
     309             :     #else /* MMAP */
     310             :         #define CALL_MMAP(s)        MMAP_DEFAULT(s)
     311             :     #endif /* MMAP */
     312             :     #ifdef MUNMAP
     313             :         #define CALL_MUNMAP(a, s)   MUNMAP((a), (s))
     314             :     #else /* MUNMAP */
     315             :         #define CALL_MUNMAP(a, s)   MUNMAP_DEFAULT((a), (s))
     316             :     #endif /* MUNMAP */
     317             :     #ifdef DIRECT_MMAP
     318             :         #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
     319             :     #else /* DIRECT_MMAP */
     320             :         #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
     321             :     #endif /* DIRECT_MMAP */
     322             : #else  /* HAVE_MMAP */
     323             :     #define USE_MMAP_BIT            (SIZE_T_ZERO)
     324             : 
     325             :     #define MMAP(s)                 MFAIL
     326             :     #define MUNMAP(a, s)            (-1)
     327             :     #define DIRECT_MMAP(s)          MFAIL
     328             :     #define CALL_DIRECT_MMAP(s)     DIRECT_MMAP(s)
     329             :     #define CALL_MMAP(s)            MMAP(s)
     330             :     #define CALL_MUNMAP(a, s)       MUNMAP((a), (s))
     331             : #endif /* HAVE_MMAP */
     332             : 
     333             : /**
     334             :  * Define CALL_MREMAP
     335             :  */
     336             : #if HAVE_MMAP && HAVE_MREMAP
     337             :     #ifdef MREMAP
     338             :         #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
     339             :     #else /* MREMAP */
     340             :         #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
     341             :     #endif /* MREMAP */
     342             : #else  /* HAVE_MMAP && HAVE_MREMAP */
     343             :     #define CALL_MREMAP(addr, osz, nsz, mv)     MFAIL
     344             : #endif /* HAVE_MMAP && HAVE_MREMAP */
     345             : 
     346             : /* mstate bit set if contiguous morecore disabled or failed */
     347             : #define USE_NONCONTIGUOUS_BIT (4U)
     348             : 
     349             : /* mstate bit set if no expansion allowed */
     350             : #define USE_NOEXPAND_BIT (8U)
     351             : 
     352             : /* trace allocations if set */
     353             : #define USE_TRACE_BIT (16U)
     354             : 
     355             : /* segment bit set in create_mspace_with_base */
     356             : #define EXTERN_BIT            (8U)
     357             : 
     358             : 
     359             : /* --------------------------- Lock preliminaries ------------------------ */
     360             : 
     361             : /*
     362             :   When locks are defined, there is one global lock, plus
     363             :   one per-mspace lock.
     364             : 
     365             :   The global lock_ensures that mparams.magic and other unique
     366             :   mparams values are initialized only once. It also protects
     367             :   sequences of calls to MORECORE.  In many cases sys_alloc requires
     368             :   two calls, that should not be interleaved with calls by other
     369             :   threads.  This does not protect against direct calls to MORECORE
     370             :   by other threads not using this lock, so there is still code to
     371             :   cope the best we can on interference.
     372             : 
     373             :   Per-mspace locks surround calls to malloc, free, etc.
     374             :   By default, locks are simple non-reentrant mutexes.
     375             : 
     376             :   Because lock-protected regions generally have bounded times, it is
     377             :   OK to use the supplied simple spinlocks. Spinlocks are likely to
     378             :   improve performance for lightly contended applications, but worsen
     379             :   performance under heavy contention.
     380             : 
     381             :   If USE_LOCKS is > 1, the definitions of lock routines here are
     382             :   bypassed, in which case you will need to define the type MLOCK_T,
     383             :   and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
     384             :   and TRY_LOCK.  You must also declare a
     385             :     static MLOCK_T malloc_global_mutex = { initialization values };.
     386             : 
     387             : */
     388             : 
     389             : #if !USE_LOCKS
     390             : #define USE_LOCK_BIT               (0U)
     391             : #define INITIAL_LOCK(l)            (0)
     392             : #define DESTROY_LOCK(l)            (0)
     393             : #define ACQUIRE_MALLOC_GLOBAL_LOCK()
     394             : #define RELEASE_MALLOC_GLOBAL_LOCK()
     395             : 
     396             : #else
     397             : #if USE_LOCKS > 1
     398             : /* -----------------------  User-defined locks ------------------------ */
     399             : /* Define your own lock implementation here */
     400             : /* #define INITIAL_LOCK(lk)  ... */
     401             : /* #define DESTROY_LOCK(lk)  ... */
     402             : /* #define ACQUIRE_LOCK(lk)  ... */
     403             : /* #define RELEASE_LOCK(lk)  ... */
     404             : /* #define TRY_LOCK(lk) ... */
     405             : /* static MLOCK_T malloc_global_mutex = ... */
     406             : 
     407             : #elif USE_SPIN_LOCKS
     408             : 
     409             : /* First, define CAS_LOCK and CLEAR_LOCK on ints */
     410             : /* Note CAS_LOCK defined to return 0 on success */
     411             : 
     412             : #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
     413             : #define CAS_LOCK(sl)     __sync_lock_test_and_set(sl, 1)
     414             : #define CLEAR_LOCK(sl)   __sync_lock_release(sl)
     415             : 
     416             : #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
     417             : /* Custom spin locks for older gcc on x86 */
     418             : static FORCEINLINE int x86_cas_lock(int *sl) {
     419             :   int ret;
     420             :   int val = 1;
     421             :   int cmp = 0;
     422             :   __asm__ __volatile__  ("lock; cmpxchgl %1, %2"
     423             :                          : "=a" (ret)
     424             :                          : "r" (val), "m" (*(sl)), "0"(cmp)
     425             :                          : "memory", "cc");
     426             :   return ret;
     427             : }
     428             : 
     429             : static FORCEINLINE void x86_clear_lock(int* sl) {
     430             :   assert(*sl != 0);
     431             :   int prev = 0;
     432             :   int ret;
     433             :   __asm__ __volatile__ ("lock; xchgl %0, %1"
     434             :                         : "=r" (ret)
     435             :                         : "m" (*(sl)), "0"(prev)
     436             :                         : "memory");
     437             : }
     438             : 
     439             : #define CAS_LOCK(sl)     x86_cas_lock(sl)
     440             : #define CLEAR_LOCK(sl)   x86_clear_lock(sl)
     441             : 
     442             : #else /* Win32 MSC */
     443             : #define CAS_LOCK(sl)     interlockedexchange(sl, (LONG)1)
     444             : #define CLEAR_LOCK(sl)   interlockedexchange (sl, (LONG)0)
     445             : 
     446             : #endif /* ... gcc spins locks ... */
     447             : 
     448             : /* How to yield for a spin lock */
     449             : #define SPINS_PER_YIELD       63
     450             : #if defined(_MSC_VER)
     451             : #define SLEEP_EX_DURATION     50 /* delay for yield/sleep */
     452             : #define SPIN_LOCK_YIELD  SleepEx(SLEEP_EX_DURATION, FALSE)
     453             : #elif defined (__SVR4) && defined (__sun) /* solaris */
     454             : #define SPIN_LOCK_YIELD   thr_yield();
     455             : #elif !defined(LACKS_SCHED_H)
     456             : #define SPIN_LOCK_YIELD   sched_yield();
     457             : #else
     458             : #define SPIN_LOCK_YIELD
     459             : #endif /* ... yield ... */
     460             : 
     461             : #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
     462             : /* Plain spin locks use single word (embedded in malloc_states) */
     463             : __clib_nosanitize_addr
     464       12197 : static int spin_acquire_lock(int *sl) {
     465       12197 :   int spins = 0;
     466      740352 :   while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
     467      727721 :     if ((++spins & SPINS_PER_YIELD) == 0) {
     468        8943 :       SPIN_LOCK_YIELD;
     469             :     }
     470             :   }
     471       12631 :   return 0;
     472             : }
     473             : 
     474             : #define MLOCK_T               int
     475             : #define TRY_LOCK(sl)          !CAS_LOCK(sl)
     476             : #define RELEASE_LOCK(sl)      CLEAR_LOCK(sl)
     477             : #define ACQUIRE_LOCK(sl)      (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
     478             : #define INITIAL_LOCK(sl)      (*sl = 0)
     479             : #define DESTROY_LOCK(sl)      (0)
     480             : static MLOCK_T malloc_global_mutex = 0;
     481             : 
     482             : #else /* USE_RECURSIVE_LOCKS */
     483             : /* types for lock owners */
     484             : #ifdef WIN32
     485             : #define THREAD_ID_T           DWORD
     486             : #define CURRENT_THREAD        GetCurrentThreadId()
     487             : #define EQ_OWNER(X,Y)         ((X) == (Y))
     488             : #else
     489             : /*
     490             :   Note: the following assume that pthread_t is a type that can be
     491             :   initialized to (casted) zero. If this is not the case, you will need to
     492             :   somehow redefine these or not use spin locks.
     493             : */
     494             : #define THREAD_ID_T           pthread_t
     495             : #define CURRENT_THREAD        pthread_self()
     496             : #define EQ_OWNER(X,Y)         pthread_equal(X, Y)
     497             : #endif
     498             : 
     499             : struct malloc_recursive_lock {
     500             :   int sl;
     501             :   unsigned int c;
     502             :   THREAD_ID_T threadid;
     503             : };
     504             : 
     505             : #define MLOCK_T  struct malloc_recursive_lock
     506             : static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
     507             : 
     508             : static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
     509             :   assert(lk->sl != 0);
     510             :   if (--lk->c == 0) {
     511             :     CLEAR_LOCK(&lk->sl);
     512             :   }
     513             : }
     514             : 
     515             : static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
     516             :   THREAD_ID_T mythreadid = CURRENT_THREAD;
     517             :   int spins = 0;
     518             :   for (;;) {
     519             :     if (*((volatile int *)(&lk->sl)) == 0) {
     520             :       if (!CAS_LOCK(&lk->sl)) {
     521             :         lk->threadid = mythreadid;
     522             :         lk->c = 1;
     523             :         return 0;
     524             :       }
     525             :     }
     526             :     else if (EQ_OWNER(lk->threadid, mythreadid)) {
     527             :       ++lk->c;
     528             :       return 0;
     529             :     }
     530             :     if ((++spins & SPINS_PER_YIELD) == 0) {
     531             :       SPIN_LOCK_YIELD;
     532             :     }
     533             :   }
     534             : }
     535             : 
     536             : static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
     537             :   THREAD_ID_T mythreadid = CURRENT_THREAD;
     538             :   if (*((volatile int *)(&lk->sl)) == 0) {
     539             :     if (!CAS_LOCK(&lk->sl)) {
     540             :       lk->threadid = mythreadid;
     541             :       lk->c = 1;
     542             :       return 1;
     543             :     }
     544             :   }
     545             :   else if (EQ_OWNER(lk->threadid, mythreadid)) {
     546             :     ++lk->c;
     547             :     return 1;
     548             :   }
     549             :   return 0;
     550             : }
     551             : 
     552             : #define RELEASE_LOCK(lk)      recursive_release_lock(lk)
     553             : #define TRY_LOCK(lk)          recursive_try_lock(lk)
     554             : #define ACQUIRE_LOCK(lk)      recursive_acquire_lock(lk)
     555             : #define INITIAL_LOCK(lk)      ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
     556             : #define DESTROY_LOCK(lk)      (0)
     557             : #endif /* USE_RECURSIVE_LOCKS */
     558             : 
     559             : #elif defined(WIN32) /* Win32 critical sections */
     560             : #define MLOCK_T               CRITICAL_SECTION
     561             : #define ACQUIRE_LOCK(lk)      (EnterCriticalSection(lk), 0)
     562             : #define RELEASE_LOCK(lk)      LeaveCriticalSection(lk)
     563             : #define TRY_LOCK(lk)          TryEnterCriticalSection(lk)
     564             : #define INITIAL_LOCK(lk)      (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
     565             : #define DESTROY_LOCK(lk)      (DeleteCriticalSection(lk), 0)
     566             : #define NEED_GLOBAL_LOCK_INIT
     567             : 
     568             : static MLOCK_T malloc_global_mutex;
     569             : static volatile LONG malloc_global_mutex_status;
     570             : 
     571             : /* Use spin loop to initialize global lock */
     572             : static void init_malloc_global_mutex() {
     573             :   for (;;) {
     574             :     long stat = malloc_global_mutex_status;
     575             :     if (stat > 0)
     576             :       return;
     577             :     /* transition to < 0 while initializing, then to > 0) */
     578             :     if (stat == 0 &&
     579             :         interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
     580             :       InitializeCriticalSection(&malloc_global_mutex);
     581             :       interlockedexchange(&malloc_global_mutex_status, (LONG)1);
     582             :       return;
     583             :     }
     584             :     SleepEx(0, FALSE);
     585             :   }
     586             : }
     587             : 
     588             : #else /* pthreads-based locks */
     589             : #define MLOCK_T               pthread_mutex_t
     590             : #define ACQUIRE_LOCK(lk)      pthread_mutex_lock(lk)
     591             : #define RELEASE_LOCK(lk)      pthread_mutex_unlock(lk)
     592             : #define TRY_LOCK(lk)          (!pthread_mutex_trylock(lk))
     593             : #define INITIAL_LOCK(lk)      pthread_init_lock(lk)
     594             : #define DESTROY_LOCK(lk)      pthread_mutex_destroy(lk)
     595             : 
     596             : #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
     597             : /* Cope with old-style linux recursive lock initialization by adding */
     598             : /* skipped internal declaration from pthread.h */
     599             : extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
     600             :                                               int __kind));
     601             : #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
     602             : #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
     603             : #endif /* USE_RECURSIVE_LOCKS ... */
     604             : 
     605             : static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
     606             : 
     607             : static int pthread_init_lock (MLOCK_T *lk) {
     608             :   pthread_mutexattr_t attr;
     609             :   if (pthread_mutexattr_init(&attr)) return 1;
     610             : #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
     611             :   if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
     612             : #endif
     613             :   if (pthread_mutex_init(lk, &attr)) return 1;
     614             :   if (pthread_mutexattr_destroy(&attr)) return 1;
     615             :   return 0;
     616             : }
     617             : 
     618             : #endif /* ... lock types ... */
     619             : 
     620             : /* Common code for all lock types */
     621             : #define USE_LOCK_BIT               (2U)
     622             : 
     623             : #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
     624             : #define ACQUIRE_MALLOC_GLOBAL_LOCK()  ACQUIRE_LOCK(&malloc_global_mutex);
     625             : #endif
     626             : 
     627             : #ifndef RELEASE_MALLOC_GLOBAL_LOCK
     628             : #define RELEASE_MALLOC_GLOBAL_LOCK()  RELEASE_LOCK(&malloc_global_mutex);
     629             : #endif
     630             : 
     631             : #endif /* USE_LOCKS */
     632             : 
     633             : /* -----------------------  Chunk representations ------------------------ */
     634             : 
     635             : /*
     636             :   (The following includes lightly edited explanations by Colin Plumb.)
     637             : 
     638             :   The malloc_chunk declaration below is misleading (but accurate and
     639             :   necessary).  It declares a "view" into memory allowing access to
     640             :   necessary fields at known offsets from a given base.
     641             : 
     642             :   Chunks of memory are maintained using a `boundary tag' method as
     643             :   originally described by Knuth.  (See the paper by Paul Wilson
     644             :   ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
     645             :   techniques.)  Sizes of free chunks are stored both in the front of
     646             :   each chunk and at the end.  This makes consolidating fragmented
     647             :   chunks into bigger chunks fast.  The head fields also hold bits
     648             :   representing whether chunks are free or in use.
     649             : 
     650             :   Here are some pictures to make it clearer.  They are "exploded" to
     651             :   show that the state of a chunk can be thought of as extending from
     652             :   the high 31 bits of the head field of its header through the
     653             :   prev_foot and PINUSE_BIT bit of the following chunk header.
     654             : 
     655             :   A chunk that's in use looks like:
     656             : 
     657             :    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     658             :            | Size of previous chunk (if P = 0)                             |
     659             :            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     660             :          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
     661             :          | Size of this chunk                                         1| +-+
     662             :    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     663             :          |                                                               |
     664             :          +-                                                             -+
     665             :          |                                                               |
     666             :          +-                                                             -+
     667             :          |                                                               :
     668             :          +-      size - sizeof(size_t) available payload bytes          -+
     669             :          :                                                               |
     670             :  chunk-> +-                                                             -+
     671             :          |                                                               |
     672             :          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     673             :        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
     674             :        | Size of next chunk (may or may not be in use)               | +-+
     675             :  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     676             : 
     677             :     And if it's free, it looks like this:
     678             : 
     679             :    chunk-> +-                                                             -+
     680             :            | User payload (must be in use, or we would have merged!)       |
     681             :            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     682             :          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
     683             :          | Size of this chunk                                         0| +-+
     684             :    mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     685             :          | Next pointer                                                  |
     686             :          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     687             :          | Prev pointer                                                  |
     688             :          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     689             :          |                                                               :
     690             :          +-      size - sizeof(struct chunk) unused bytes               -+
     691             :          :                                                               |
     692             :  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     693             :          | Size of this chunk                                            |
     694             :          +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     695             :        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
     696             :        | Size of next chunk (must be in use, or we would have merged)| +-+
     697             :  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     698             :        |                                                               :
     699             :        +- User payload                                                -+
     700             :        :                                                               |
     701             :        +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     702             :                                                                      |0|
     703             :                                                                      +-+
     704             :   Note that since we always merge adjacent free chunks, the chunks
     705             :   adjacent to a free chunk must be in use.
     706             : 
     707             :   Given a pointer to a chunk (which can be derived trivially from the
     708             :   payload pointer) we can, in O(1) time, find out whether the adjacent
     709             :   chunks are free, and if so, unlink them from the lists that they
     710             :   are on and merge them with the current chunk.
     711             : 
     712             :   Chunks always begin on even word boundaries, so the mem portion
     713             :   (which is returned to the user) is also on an even word boundary, and
     714             :   thus at least double-word aligned.
     715             : 
     716             :   The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
     717             :   chunk size (which is always a multiple of two words), is an in-use
     718             :   bit for the *previous* chunk.  If that bit is *clear*, then the
     719             :   word before the current chunk size contains the previous chunk
     720             :   size, and can be used to find the front of the previous chunk.
     721             :   The very first chunk allocated always has this bit set, preventing
     722             :   access to non-existent (or non-owned) memory. If pinuse is set for
     723             :   any given chunk, then you CANNOT determine the size of the
     724             :   previous chunk, and might even get a memory addressing fault when
     725             :   trying to do so.
     726             : 
     727             :   The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
     728             :   the chunk size redundantly records whether the current chunk is
     729             :   inuse (unless the chunk is mmapped). This redundancy enables usage
     730             :   checks within free and realloc, and reduces indirection when freeing
     731             :   and consolidating chunks.
     732             : 
     733             :   Each freshly allocated chunk must have both cinuse and pinuse set.
     734             :   That is, each allocated chunk borders either a previously allocated
     735             :   and still in-use chunk, or the base of its memory arena. This is
     736             :   ensured by making all allocations from the `lowest' part of any
     737             :   found chunk.  Further, no free chunk physically borders another one,
     738             :   so each free chunk is known to be preceded and followed by either
     739             :   inuse chunks or the ends of memory.
     740             : 
     741             :   Note that the `foot' of the current chunk is actually represented
     742             :   as the prev_foot of the NEXT chunk. This makes it easier to
     743             :   deal with alignments etc but can be very confusing when trying
     744             :   to extend or adapt this code.
     745             : 
     746             :   The exceptions to all this are
     747             : 
     748             :      1. The special chunk `top' is the top-most available chunk (i.e.,
     749             :         the one bordering the end of available memory). It is treated
     750             :         specially.  Top is never included in any bin, is used only if
     751             :         no other chunk is available, and is released back to the
     752             :         system if it is very large (see M_TRIM_THRESHOLD).  In effect,
     753             :         the top chunk is treated as larger (and thus less well
     754             :         fitting) than any other available chunk.  The top chunk
     755             :         doesn't update its trailing size field since there is no next
     756             :         contiguous chunk that would have to index off it. However,
     757             :         space is still allocated for it (TOP_FOOT_SIZE) to enable
     758             :         separation or merging when space is extended.
     759             : 
     760             :      3. Chunks allocated via mmap, have both cinuse and pinuse bits
     761             :         cleared in their head fields.  Because they are allocated
     762             :         one-by-one, each must carry its own prev_foot field, which is
     763             :         also used to hold the offset this chunk has within its mmapped
     764             :         region, which is needed to preserve alignment. Each mmapped
     765             :         chunk is trailed by the first two fields of a fake next-chunk
     766             :         for sake of usage checks.
     767             : 
     768             : */
     769             : 
     770             : struct malloc_chunk {
     771             :   size_t               prev_foot;  /* Size of previous chunk (if free).  */
     772             :   size_t               head;       /* Size and inuse bits. */
     773             :   struct malloc_chunk* fd;         /* double links -- used only if free. */
     774             :   struct malloc_chunk* bk;
     775             : };
     776             : 
     777             : typedef struct malloc_chunk  mchunk;
     778             : typedef struct malloc_chunk* mchunkptr;
     779             : typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
     780             : typedef unsigned int bindex_t;         /* Described below */
     781             : typedef unsigned int binmap_t;         /* Described below */
     782             : typedef unsigned int flag_t;           /* The type of various bit flag sets */
     783             : 
     784             : /* ------------------- Chunks sizes and alignments ----------------------- */
     785             : 
     786             : #define MCHUNK_SIZE         (sizeof(mchunk))
     787             : 
     788             : #if FOOTERS
     789             : #define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
     790             : #else /* FOOTERS */
     791             : #define CHUNK_OVERHEAD      (SIZE_T_SIZE)
     792             : #endif /* FOOTERS */
     793             : 
     794             : /* MMapped chunks need a second word of overhead ... */
     795             : #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
     796             : /* ... and additional padding for fake next-chunk at foot */
     797             : #define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
     798             : 
     799             : /* The smallest size we can malloc is an aligned minimal chunk */
     800             : #define MIN_CHUNK_SIZE\
     801             :   ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
     802             : 
     803             : /* conversion from malloc headers to user pointers, and back */
     804             : #define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
     805             : #define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
     806             : /* chunk associated with aligned address A */
     807             : #define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
     808             : 
     809             : /* Bounds on request (not chunk) sizes. */
     810             : #define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
     811             : #define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
     812             : 
     813             : /* pad request bytes into a usable size */
     814             : #define pad_request(req) \
     815             :    (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
     816             : 
     817             : /* pad request, checking for minimum (but not maximum) */
     818             : #define request2size(req) \
     819             :   (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
     820             : 
     821             : 
     822             : /* ------------------ Operations on head and foot fields ----------------- */
     823             : 
     824             : /*
     825             :   The head field of a chunk is or'ed with PINUSE_BIT when previous
     826             :   adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
     827             :   use, unless mmapped, in which case both bits are cleared.
     828             : 
     829             :   FLAG4_BIT is not used by this malloc, but might be useful in extensions.
     830             : */
     831             : 
     832             : #define PINUSE_BIT          (SIZE_T_ONE)
     833             : #define CINUSE_BIT          (SIZE_T_TWO)
     834             : #define FLAG4_BIT           (SIZE_T_FOUR)
     835             : #define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
     836             : #define FLAG_BITS           (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
     837             : 
     838             : /* Head value for fenceposts */
     839             : #define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
     840             : 
     841             : /* extraction of fields from head words */
     842             : #define cinuse(p)           ((p)->head & CINUSE_BIT)
     843             : #define pinuse(p)           ((p)->head & PINUSE_BIT)
     844             : #define flag4inuse(p)       ((p)->head & FLAG4_BIT)
     845             : #define is_inuse(p)         (((p)->head & INUSE_BITS) != PINUSE_BIT)
     846             : #define is_mmapped(p)       (((p)->head & INUSE_BITS) == 0)
     847             : 
     848             : #define chunksize(p)        ((p)->head & ~(FLAG_BITS))
     849             : 
     850             : #define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
     851             : #define set_flag4(p)        ((p)->head |= FLAG4_BIT)
     852             : #define clear_flag4(p)      ((p)->head &= ~FLAG4_BIT)
     853             : 
     854             : /* Treat space at ptr +/- offset as a chunk */
     855             : #define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
     856             : #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
     857             : 
     858             : /* Ptr to next or previous physical malloc_chunk. */
     859             : #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
     860             : #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
     861             : 
     862             : /* extract next chunk's pinuse bit */
     863             : #define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
     864             : 
     865             : /* Get/set size at footer */
     866             : #define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
     867             : #define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
     868             : 
     869             : /* Set size, pinuse bit, and foot */
     870             : #define set_size_and_pinuse_of_free_chunk(p, s)\
     871             :   ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
     872             : 
     873             : /* Set size, pinuse bit, foot, and clear next pinuse */
     874             : #define set_free_with_pinuse(p, s, n)\
     875             :   (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
     876             : 
     877             : /* Get the internal overhead associated with chunk p */
     878             : #define overhead_for(p)\
     879             :  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
     880             : 
     881             : /* Return true if malloced space is not necessarily cleared */
     882             : #if MMAP_CLEARS
     883             : #define calloc_must_clear(p) (!is_mmapped(p))
     884             : #else /* MMAP_CLEARS */
     885             : #define calloc_must_clear(p) (1)
     886             : #endif /* MMAP_CLEARS */
     887             : 
     888             : /* ---------------------- Overlaid data structures ----------------------- */
     889             : 
     890             : /*
     891             :   When chunks are not in use, they are treated as nodes of either
     892             :   lists or trees.
     893             : 
     894             :   "Small"  chunks are stored in circular doubly-linked lists, and look
     895             :   like this:
     896             : 
     897             :     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     898             :             |             Size of previous chunk                            |
     899             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     900             :     `head:' |             Size of chunk, in bytes                         |P|
     901             :       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     902             :             |             Forward pointer to next chunk in list             |
     903             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     904             :             |             Back pointer to previous chunk in list            |
     905             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     906             :             |             Unused space (may be 0 bytes long)                .
     907             :             .                                                               .
     908             :             .                                                               |
     909             : nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     910             :     `foot:' |             Size of chunk, in bytes                           |
     911             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     912             : 
     913             :   Larger chunks are kept in a form of bitwise digital trees (aka
     914             :   tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
     915             :   free chunks greater than 256 bytes, their size doesn't impose any
     916             :   constraints on user chunk sizes.  Each node looks like:
     917             : 
     918             :     chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     919             :             |             Size of previous chunk                            |
     920             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     921             :     `head:' |             Size of chunk, in bytes                         |P|
     922             :       mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     923             :             |             Forward pointer to next chunk of same size        |
     924             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     925             :             |             Back pointer to previous chunk of same size       |
     926             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     927             :             |             Pointer to left child (child[0])                  |
     928             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     929             :             |             Pointer to right child (child[1])                 |
     930             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     931             :             |             Pointer to parent                                 |
     932             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     933             :             |             bin index of this chunk                           |
     934             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     935             :             |             Unused space                                      .
     936             :             .                                                               |
     937             : nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     938             :     `foot:' |             Size of chunk, in bytes                           |
     939             :             +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
     940             : 
     941             :   Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
     942             :   of the same size are arranged in a circularly-linked list, with only
     943             :   the oldest chunk (the next to be used, in our FIFO ordering)
     944             :   actually in the tree.  (Tree members are distinguished by a non-null
     945             :   parent pointer.)  If a chunk with the same size an an existing node
     946             :   is inserted, it is linked off the existing node using pointers that
     947             :   work in the same way as fd/bk pointers of small chunks.
     948             : 
     949             :   Each tree contains a power of 2 sized range of chunk sizes (the
     950             :   smallest is 0x100 <= x < 0x180), which is is divided in half at each
     951             :   tree level, with the chunks in the smaller half of the range (0x100
     952             :   <= x < 0x140 for the top nose) in the left subtree and the larger
     953             :   half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
     954             :   done by inspecting individual bits.
     955             : 
     956             :   Using these rules, each node's left subtree contains all smaller
     957             :   sizes than its right subtree.  However, the node at the root of each
     958             :   subtree has no particular ordering relationship to either.  (The
     959             :   dividing line between the subtree sizes is based on trie relation.)
     960             :   If we remove the last chunk of a given size from the interior of the
     961             :   tree, we need to replace it with a leaf node.  The tree ordering
     962             :   rules permit a node to be replaced by any leaf below it.
     963             : 
     964             :   The smallest chunk in a tree (a common operation in a best-fit
     965             :   allocator) can be found by walking a path to the leftmost leaf in
     966             :   the tree.  Unlike a usual binary tree, where we follow left child
     967             :   pointers until we reach a null, here we follow the right child
     968             :   pointer any time the left one is null, until we reach a leaf with
     969             :   both child pointers null. The smallest chunk in the tree will be
     970             :   somewhere along that path.
     971             : 
     972             :   The worst case number of steps to add, find, or remove a node is
     973             :   bounded by the number of bits differentiating chunks within
     974             :   bins. Under current bin calculations, this ranges from 6 up to 21
     975             :   (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
     976             :   is of course much better.
     977             : */
     978             : 
     979             : struct malloc_tree_chunk {
     980             :   /* The first four fields must be compatible with malloc_chunk */
     981             :   size_t                    prev_foot;
     982             :   size_t                    head;
     983             :   struct malloc_tree_chunk* fd;
     984             :   struct malloc_tree_chunk* bk;
     985             : 
     986             :   struct malloc_tree_chunk* child[2];
     987             :   struct malloc_tree_chunk* parent;
     988             :   bindex_t                  index;
     989             : };
     990             : 
     991             : typedef struct malloc_tree_chunk  tchunk;
     992             : typedef struct malloc_tree_chunk* tchunkptr;
     993             : typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
     994             : 
     995             : /* A little helper macro for trees */
     996             : #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
     997             : 
     998             : /* ----------------------------- Segments -------------------------------- */
     999             : 
    1000             : /*
    1001             :   Each malloc space may include non-contiguous segments, held in a
    1002             :   list headed by an embedded malloc_segment record representing the
    1003             :   top-most space. Segments also include flags holding properties of
    1004             :   the space. Large chunks that are directly allocated by mmap are not
    1005             :   included in this list. They are instead independently created and
    1006             :   destroyed without otherwise keeping track of them.
    1007             : 
    1008             :   Segment management mainly comes into play for spaces allocated by
    1009             :   MMAP.  Any call to MMAP might or might not return memory that is
    1010             :   adjacent to an existing segment.  MORECORE normally contiguously
    1011             :   extends the current space, so this space is almost always adjacent,
    1012             :   which is simpler and faster to deal with. (This is why MORECORE is
    1013             :   used preferentially to MMAP when both are available -- see
    1014             :   sys_alloc.)  When allocating using MMAP, we don't use any of the
    1015             :   hinting mechanisms (inconsistently) supported in various
    1016             :   implementations of unix mmap, or distinguish reserving from
    1017             :   committing memory. Instead, we just ask for space, and exploit
    1018             :   contiguity when we get it.  It is probably possible to do
    1019             :   better than this on some systems, but no general scheme seems
    1020             :   to be significantly better.
    1021             : 
    1022             :   Management entails a simpler variant of the consolidation scheme
    1023             :   used for chunks to reduce fragmentation -- new adjacent memory is
    1024             :   normally prepended or appended to an existing segment. However,
    1025             :   there are limitations compared to chunk consolidation that mostly
    1026             :   reflect the fact that segment processing is relatively infrequent
    1027             :   (occurring only when getting memory from system) and that we
    1028             :   don't expect to have huge numbers of segments:
    1029             : 
    1030             :   * Segments are not indexed, so traversal requires linear scans.  (It
    1031             :     would be possible to index these, but is not worth the extra
    1032             :     overhead and complexity for most programs on most platforms.)
    1033             :   * New segments are only appended to old ones when holding top-most
    1034             :     memory; if they cannot be prepended to others, they are held in
    1035             :     different segments.
    1036             : 
    1037             :   Except for the top-most segment of an mstate, each segment record
    1038             :   is kept at the tail of its segment. Segments are added by pushing
    1039             :   segment records onto the list headed by &mstate.seg for the
    1040             :   containing mstate.
    1041             : 
    1042             :   Segment flags control allocation/merge/deallocation policies:
    1043             :   * If EXTERN_BIT set, then we did not allocate this segment,
    1044             :     and so should not try to deallocate or merge with others.
    1045             :     (This currently holds only for the initial segment passed
    1046             :     into create_mspace_with_base.)
    1047             :   * If USE_MMAP_BIT set, the segment may be merged with
    1048             :     other surrounding mmapped segments and trimmed/de-allocated
    1049             :     using munmap.
    1050             :   * If neither bit is set, then the segment was obtained using
    1051             :     MORECORE so can be merged with surrounding MORECORE'd segments
    1052             :     and deallocated/trimmed using MORECORE with negative arguments.
    1053             : */
    1054             : 
    1055             : struct malloc_segment {
    1056             :   char*        base;             /* base address */
    1057             :   size_t       size;             /* allocated size */
    1058             :   struct malloc_segment* next;   /* ptr to next segment */
    1059             :   flag_t       sflags;           /* mmap and extern flag */
    1060             : };
    1061             : 
    1062             : #define is_mmapped_segment(S)  ((S)->sflags & USE_MMAP_BIT)
    1063             : #define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
    1064             : 
    1065             : typedef struct malloc_segment  msegment;
    1066             : typedef struct malloc_segment* msegmentptr;
    1067             : 
    1068             : /* ---------------------------- malloc_state ----------------------------- */
    1069             : 
    1070             : /*
    1071             :    A malloc_state holds all of the bookkeeping for a space.
    1072             :    The main fields are:
    1073             : 
    1074             :   Top
    1075             :     The topmost chunk of the currently active segment. Its size is
    1076             :     cached in topsize.  The actual size of topmost space is
    1077             :     topsize+TOP_FOOT_SIZE, which includes space reserved for adding
    1078             :     fenceposts and segment records if necessary when getting more
    1079             :     space from the system.  The size at which to autotrim top is
    1080             :     cached from mparams in trim_check, except that it is disabled if
    1081             :     an autotrim fails.
    1082             : 
    1083             :   Designated victim (dv)
    1084             :     This is the preferred chunk for servicing small requests that
    1085             :     don't have exact fits.  It is normally the chunk split off most
    1086             :     recently to service another small request.  Its size is cached in
    1087             :     dvsize. The link fields of this chunk are not maintained since it
    1088             :     is not kept in a bin.
    1089             : 
    1090             :   SmallBins
    1091             :     An array of bin headers for free chunks.  These bins hold chunks
    1092             :     with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
    1093             :     chunks of all the same size, spaced 8 bytes apart.  To simplify
    1094             :     use in double-linked lists, each bin header acts as a malloc_chunk
    1095             :     pointing to the real first node, if it exists (else pointing to
    1096             :     itself).  This avoids special-casing for headers.  But to avoid
    1097             :     waste, we allocate only the fd/bk pointers of bins, and then use
    1098             :     repositioning tricks to treat these as the fields of a chunk.
    1099             : 
    1100             :   TreeBins
    1101             :     Treebins are pointers to the roots of trees holding a range of
    1102             :     sizes. There are 2 equally spaced treebins for each power of two
    1103             :     from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
    1104             :     larger.
    1105             : 
    1106             :   Bin maps
    1107             :     There is one bit map for small bins ("smallmap") and one for
    1108             :     treebins ("treemap).  Each bin sets its bit when non-empty, and
    1109             :     clears the bit when empty.  Bit operations are then used to avoid
    1110             :     bin-by-bin searching -- nearly all "search" is done without ever
    1111             :     looking at bins that won't be selected.  The bit maps
    1112             :     conservatively use 32 bits per map word, even if on 64bit system.
    1113             :     For a good description of some of the bit-based techniques used
    1114             :     here, see Henry S. Warren Jr's book "Hacker's Delight" (and
    1115             :     supplement at http://hackersdelight.org/). Many of these are
    1116             :     intended to reduce the branchiness of paths through malloc etc, as
    1117             :     well as to reduce the number of memory locations read or written.
    1118             : 
    1119             :   Segments
    1120             :     A list of segments headed by an embedded malloc_segment record
    1121             :     representing the initial space.
    1122             : 
    1123             :   Address check support
    1124             :     The least_addr field is the least address ever obtained from
    1125             :     MORECORE or MMAP. Attempted frees and reallocs of any address less
    1126             :     than this are trapped (unless INSECURE is defined).
    1127             : 
    1128             :   Magic tag
    1129             :     A cross-check field that should always hold same value as mparams.magic.
    1130             : 
    1131             :   Max allowed footprint
    1132             :     The maximum allowed bytes to allocate from system (zero means no limit)
    1133             : 
    1134             :   Flags
    1135             :     Bits recording whether to use MMAP, locks, or contiguous MORECORE
    1136             : 
    1137             :   Statistics
    1138             :     Each space keeps track of current and maximum system memory
    1139             :     obtained via MORECORE or MMAP.
    1140             : 
    1141             :   Trim support
    1142             :     Fields holding the amount of unused topmost memory that should trigger
    1143             :     trimming, and a counter to force periodic scanning to release unused
    1144             :     non-topmost segments.
    1145             : 
    1146             :   Locking
    1147             :     If USE_LOCKS is defined, the "mutex" lock is acquired and released
    1148             :     around every public call using this mspace.
    1149             : 
    1150             :   Extension support
    1151             :     A void* pointer and a size_t field that can be used to help implement
    1152             :     extensions to this malloc.
    1153             : */
    1154             : 
    1155             : /* Bin types, widths and sizes */
    1156             : #define NSMALLBINS        (32U)
    1157             : #define NTREEBINS         (32U)
    1158             : #define SMALLBIN_SHIFT    (3U)
    1159             : #define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
    1160             : #define TREEBIN_SHIFT     (8U)
    1161             : #define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
    1162             : #define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
    1163             : #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
    1164             : 
    1165             : struct malloc_state {
    1166             :   binmap_t   smallmap;
    1167             :   binmap_t   treemap;
    1168             :   size_t     dvsize;
    1169             :   size_t     topsize;
    1170             :   char*      least_addr;
    1171             :   mchunkptr  dv;
    1172             :   mchunkptr  top;
    1173             :   size_t     trim_check;
    1174             :   size_t     release_checks;
    1175             :   size_t     magic;
    1176             :   mchunkptr  smallbins[(NSMALLBINS+1)*2];
    1177             :   tbinptr    treebins[NTREEBINS];
    1178             :   size_t     footprint;
    1179             :   size_t     max_footprint;
    1180             :   size_t     footprint_limit; /* zero means no limit */
    1181             :   flag_t     mflags;
    1182             : #if USE_LOCKS
    1183             :   MLOCK_T    mutex;     /* locate lock among fields that rarely change */
    1184             : #endif /* USE_LOCKS */
    1185             :   msegment   seg;
    1186             :   void*      extp;      /* Unused but available for extensions */
    1187             :   size_t     exts;
    1188             : };
    1189             : 
    1190             : typedef struct malloc_state*    mstate;
    1191             : 
    1192             : /* ------------- Global malloc_state and malloc_params ------------------- */
    1193             : 
    1194             : /*
    1195             :   malloc_params holds global properties, including those that can be
    1196             :   dynamically set using mallopt. There is a single instance, mparams,
    1197             :   initialized in init_mparams. Note that the non-zeroness of "magic"
    1198             :   also serves as an initialization flag.
    1199             : */
    1200             : 
    1201             : struct malloc_params {
    1202             :   size_t magic;
    1203             :   size_t page_size;
    1204             :   size_t granularity;
    1205             :   size_t mmap_threshold;
    1206             :   size_t trim_threshold;
    1207             :   flag_t default_mflags;
    1208             : };
    1209             : 
    1210             : static struct malloc_params mparams;
    1211             : 
    1212             : /* Ensure mparams initialized */
    1213             : #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
    1214             : 
    1215             : #if !ONLY_MSPACES
    1216             : 
    1217             : /* The global malloc_state used for all non-"mspace" calls */
    1218             : static struct malloc_state _gm_;
    1219             : #define gm                 (&_gm_)
    1220             : #define is_global(M)       ((M) == &_gm_)
    1221             : 
    1222             : #endif /* !ONLY_MSPACES */
    1223             : 
    1224             : #define is_initialized(M)  ((M)->top != 0)
    1225             : 
    1226             : /* -------------------------- system alloc setup ------------------------- */
    1227             : 
    1228             : /* Operations on mflags */
    1229             : 
    1230             : #define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
    1231             : #define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
    1232             : #if USE_LOCKS
    1233             : #define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
    1234             : #else
    1235             : #define disable_lock(M)
    1236             : #endif
    1237             : 
    1238             : #define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
    1239             : #define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
    1240             : #if HAVE_MMAP
    1241             : #define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
    1242             : #else
    1243             : #define disable_mmap(M)
    1244             : #endif
    1245             : 
    1246             : #define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
    1247             : #define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
    1248             : #define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT)
    1249             : #define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT)
    1250             : #define use_trace(M) ((M)->mflags & USE_TRACE_BIT)
    1251             : #define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT)
    1252             : #define disable_trace(M) ((M)->mflags &= ~USE_TRACE_BIT)
    1253             : 
    1254             : #define set_lock(M,L)\
    1255             :  ((M)->mflags = (L)?\
    1256             :   ((M)->mflags | USE_LOCK_BIT) :\
    1257             :   ((M)->mflags & ~USE_LOCK_BIT))
    1258             : 
    1259             : /* page-align a size */
    1260             : #define page_align(S)\
    1261             :  (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
    1262             : 
    1263             : /* granularity-align a size */
    1264             : #define granularity_align(S)\
    1265             :   (((S) + (mparams.granularity - SIZE_T_ONE))\
    1266             :    & ~(mparams.granularity - SIZE_T_ONE))
    1267             : 
    1268             : 
    1269             : /* For mmap, use granularity alignment on windows, else page-align */
    1270             : #ifdef WIN32
    1271             : #define mmap_align(S) granularity_align(S)
    1272             : #else
    1273             : #define mmap_align(S) page_align(S)
    1274             : #endif
    1275             : 
    1276             : /* For sys_alloc, enough padding to ensure can malloc request on success */
    1277             : #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
    1278             : 
    1279             : #define is_page_aligned(S)\
    1280             :    (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
    1281             : #define is_granularity_aligned(S)\
    1282             :    (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
    1283             : 
    1284             : /*  True if segment S holds address A */
    1285             : #define segment_holds(S, A)\
    1286             :   ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
    1287             : 
    1288             : /* Return segment holding given address */
    1289             : __clib_nosanitize_addr
    1290        1529 : static msegmentptr segment_holding(mstate m, char* addr) {
    1291        1529 :   msegmentptr sp = &m->seg;
    1292             :   for (;;) {
    1293        1529 :     if (addr >= sp->base && addr < sp->base + sp->size)
    1294        1529 :       return sp;
    1295           0 :     if ((sp = sp->next) == 0)
    1296           0 :       return 0;
    1297             :   }
    1298             : }
    1299             : 
    1300             : /* Return true if segment contains a segment link */
    1301             : __clib_nosanitize_addr
    1302           0 : static int has_segment_link(mstate m, msegmentptr ss) {
    1303           0 :   msegmentptr sp = &m->seg;
    1304             :   for (;;) {
    1305           0 :     if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
    1306           0 :       return 1;
    1307           0 :     if ((sp = sp->next) == 0)
    1308           0 :       return 0;
    1309             :   }
    1310             : }
    1311             : 
    1312             : #ifndef MORECORE_CANNOT_TRIM
    1313             : #define should_trim(M,s)  ((s) > (M)->trim_check)
    1314             : #else  /* MORECORE_CANNOT_TRIM */
    1315             : #define should_trim(M,s)  (0)
    1316             : #endif /* MORECORE_CANNOT_TRIM */
    1317             : 
    1318             : /*
    1319             :   TOP_FOOT_SIZE is padding at the end of a segment, including space
    1320             :   that may be needed to place segment records and fenceposts when new
    1321             :   noncontiguous segments are added.
    1322             : */
    1323             : #define TOP_FOOT_SIZE\
    1324             :   (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
    1325             : 
    1326             : 
    1327             : /* -------------------------------  Hooks -------------------------------- */
    1328             : 
    1329             : /*
    1330             :   PREACTION should be defined to return 0 on success, and nonzero on
    1331             :   failure. If you are not using locking, you can redefine these to do
    1332             :   anything you like.
    1333             : */
    1334             : 
    1335             : #if USE_LOCKS
    1336             : #define PREACTION(M)  ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
    1337             : #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
    1338             : #else /* USE_LOCKS */
    1339             : 
    1340             : #ifndef PREACTION
    1341             : #define PREACTION(M) (0)
    1342             : #endif  /* PREACTION */
    1343             : 
    1344             : #ifndef POSTACTION
    1345             : #define POSTACTION(M)
    1346             : #endif  /* POSTACTION */
    1347             : 
    1348             : #endif /* USE_LOCKS */
    1349             : 
    1350             : /*
    1351             :   CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
    1352             :   USAGE_ERROR_ACTION is triggered on detected bad frees and
    1353             :   reallocs. The argument p is an address that might have triggered the
    1354             :   fault. It is ignored by the two predefined actions, but might be
    1355             :   useful in custom actions that try to help diagnose errors.
    1356             : */
    1357             : 
    1358             : #if PROCEED_ON_ERROR
    1359             : 
    1360             : /* A count of the number of corruption errors causing resets */
    1361             : int malloc_corruption_error_count;
    1362             : 
    1363             : /* default corruption action */
    1364             : static void reset_on_error(mstate m);
    1365             : 
    1366             : #define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
    1367             : #define USAGE_ERROR_ACTION(m, p)
    1368             : 
    1369             : #else /* PROCEED_ON_ERROR */
    1370             : 
    1371             : #ifndef CORRUPTION_ERROR_ACTION
    1372             : #define CORRUPTION_ERROR_ACTION(m) DLM_ABORT
    1373             : #endif /* CORRUPTION_ERROR_ACTION */
    1374             : 
    1375             : #ifndef USAGE_ERROR_ACTION
    1376             : #define USAGE_ERROR_ACTION(m,p) DLM_ABORT
    1377             : #endif /* USAGE_ERROR_ACTION */
    1378             : 
    1379             : #endif /* PROCEED_ON_ERROR */
    1380             : 
    1381             : 
    1382             : /* -------------------------- Debugging setup ---------------------------- */
    1383             : 
    1384             : #if ! DEBUG
    1385             : 
    1386             : #define check_free_chunk(M,P)
    1387             : #define check_inuse_chunk(M,P)
    1388             : #define check_malloced_chunk(M,P,N)
    1389             : #define check_mmapped_chunk(M,P)
    1390             : #define check_malloc_state(M)
    1391             : #define check_top_chunk(M,P)
    1392             : 
    1393             : #else /* DEBUG */
    1394             : #define check_free_chunk(M,P)       do_check_free_chunk(M,P)
    1395             : #define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
    1396             : #define check_top_chunk(M,P)        do_check_top_chunk(M,P)
    1397             : #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
    1398             : #define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
    1399             : #define check_malloc_state(M)       do_check_malloc_state(M)
    1400             : 
    1401             : static void   do_check_any_chunk(mstate m, mchunkptr p);
    1402             : static void   do_check_top_chunk(mstate m, mchunkptr p);
    1403             : static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
    1404             : static void   do_check_inuse_chunk(mstate m, mchunkptr p);
    1405             : static void   do_check_free_chunk(mstate m, mchunkptr p);
    1406             : static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
    1407             : static void   do_check_tree(mstate m, tchunkptr t);
    1408             : static void   do_check_treebin(mstate m, bindex_t i);
    1409             : static void   do_check_smallbin(mstate m, bindex_t i);
    1410             : static void   do_check_malloc_state(mstate m);
    1411             : static int    bin_find(mstate m, mchunkptr x);
    1412             : static size_t traverse_and_check(mstate m);
    1413             : #endif /* DEBUG */
    1414             : 
    1415             : /* ---------------------------- Indexing Bins ---------------------------- */
    1416             : 
    1417             : #define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
    1418             : #define small_index(s)      (bindex_t)((s)  >> SMALLBIN_SHIFT)
    1419             : #define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
    1420             : #define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
    1421             : 
    1422             : /* addressing by index. See above about smallbin repositioning */
    1423             : #define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
    1424             : #define treebin_at(M,i)     (&((M)->treebins[i]))
    1425             : 
    1426             : /* assign tree index for size S to variable I. Use x86 asm if possible  */
    1427             : #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
    1428             : #define compute_tree_index(S, I)\
    1429             : {\
    1430             :   unsigned int X = S >> TREEBIN_SHIFT;\
    1431             :   if (X == 0)\
    1432             :     I = 0;\
    1433             :   else if (X > 0xFFFF)\
    1434             :     I = NTREEBINS-1;\
    1435             :   else {\
    1436             :     unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
    1437             :     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
    1438             :   }\
    1439             : }
    1440             : 
    1441             : #elif defined (__INTEL_COMPILER)
    1442             : #define compute_tree_index(S, I)\
    1443             : {\
    1444             :   size_t X = S >> TREEBIN_SHIFT;\
    1445             :   if (X == 0)\
    1446             :     I = 0;\
    1447             :   else if (X > 0xFFFF)\
    1448             :     I = NTREEBINS-1;\
    1449             :   else {\
    1450             :     unsigned int K = _bit_scan_reverse (X); \
    1451             :     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
    1452             :   }\
    1453             : }
    1454             : 
    1455             : #elif defined(_MSC_VER) && _MSC_VER>=1300
    1456             : #define compute_tree_index(S, I)\
    1457             : {\
    1458             :   size_t X = S >> TREEBIN_SHIFT;\
    1459             :   if (X == 0)\
    1460             :     I = 0;\
    1461             :   else if (X > 0xFFFF)\
    1462             :     I = NTREEBINS-1;\
    1463             :   else {\
    1464             :     unsigned int K;\
    1465             :     _BitScanReverse((DWORD *) &K, (DWORD) X);\
    1466             :     I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
    1467             :   }\
    1468             : }
    1469             : 
    1470             : #else /* GNUC */
    1471             : #define compute_tree_index(S, I)\
    1472             : {\
    1473             :   size_t X = S >> TREEBIN_SHIFT;\
    1474             :   if (X == 0)\
    1475             :     I = 0;\
    1476             :   else if (X > 0xFFFF)\
    1477             :     I = NTREEBINS-1;\
    1478             :   else {\
    1479             :     unsigned int Y = (unsigned int)X;\
    1480             :     unsigned int N = ((Y - 0x100) >> 16) & 8;\
    1481             :     unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
    1482             :     N += K;\
    1483             :     N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
    1484             :     K = 14 - N + ((Y <<= K) >> 15);\
    1485             :     I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
    1486             :   }\
    1487             : }
    1488             : #endif /* GNUC */
    1489             : 
    1490             : /* Bit representing maximum resolved size in a treebin at i */
    1491             : #define bit_for_tree_index(i) \
    1492             :    (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
    1493             : 
    1494             : /* Shift placing maximum resolved bit in a treebin at i as sign bit */
    1495             : #define leftshift_for_tree_index(i) \
    1496             :    ((i == NTREEBINS-1)? 0 : \
    1497             :     ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
    1498             : 
    1499             : /* The size of the smallest chunk held in bin with index i */
    1500             : #define minsize_for_tree_index(i) \
    1501             :    ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
    1502             :    (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
    1503             : 
    1504             : 
    1505             : /* ------------------------ Operations on bin maps ----------------------- */
    1506             : 
    1507             : /* bit corresponding to given index */
    1508             : #define idx2bit(i)              ((binmap_t)(1) << (i))
    1509             : 
    1510             : /* Mark/Clear bits with given index */
    1511             : #define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
    1512             : #define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
    1513             : #define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
    1514             : 
    1515             : #define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
    1516             : #define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
    1517             : #define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
    1518             : 
    1519             : /* isolate the least set bit of a bitmap */
    1520             : #define least_bit(x)         ((x) & -(x))
    1521             : 
    1522             : /* mask with all bits to left of least bit of x on */
    1523             : #define left_bits(x)         ((x<<1) | -(x<<1))
    1524             : 
    1525             : /* mask with all bits to left of or equal to least bit of x on */
    1526             : #define same_or_left_bits(x) ((x) | -(x))
    1527             : 
    1528             : /* index corresponding to given bit. Use x86 asm if possible */
    1529             : 
    1530             : #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
    1531             : #define compute_bit2idx(X, I)\
    1532             : {\
    1533             :   unsigned int J;\
    1534             :   J = __builtin_ctz(X); \
    1535             :   I = (bindex_t)J;\
    1536             : }
    1537             : 
    1538             : #elif defined (__INTEL_COMPILER)
    1539             : #define compute_bit2idx(X, I)\
    1540             : {\
    1541             :   unsigned int J;\
    1542             :   J = _bit_scan_forward (X); \
    1543             :   I = (bindex_t)J;\
    1544             : }
    1545             : 
    1546             : #elif defined(_MSC_VER) && _MSC_VER>=1300
    1547             : #define compute_bit2idx(X, I)\
    1548             : {\
    1549             :   unsigned int J;\
    1550             :   _BitScanForward((DWORD *) &J, X);\
    1551             :   I = (bindex_t)J;\
    1552             : }
    1553             : 
    1554             : #elif USE_BUILTIN_FFS
    1555             : #define compute_bit2idx(X, I) I = ffs(X)-1
    1556             : 
    1557             : #else
    1558             : #define compute_bit2idx(X, I)\
    1559             : {\
    1560             :   unsigned int Y = X - 1;\
    1561             :   unsigned int K = Y >> (16-4) & 16;\
    1562             :   unsigned int N = K;        Y >>= K;\
    1563             :   N += K = Y >> (8-3) &  8;  Y >>= K;\
    1564             :   N += K = Y >> (4-2) &  4;  Y >>= K;\
    1565             :   N += K = Y >> (2-1) &  2;  Y >>= K;\
    1566             :   N += K = Y >> (1-0) &  1;  Y >>= K;\
    1567             :   I = (bindex_t)(N + Y);\
    1568             : }
    1569             : #endif /* GNUC */
    1570             : 
    1571             : 
    1572             : /* ----------------------- Runtime Check Support ------------------------- */
    1573             : 
    1574             : /*
    1575             :   For security, the main invariant is that malloc/free/etc never
    1576             :   writes to a static address other than malloc_state, unless static
    1577             :   malloc_state itself has been corrupted, which cannot occur via
    1578             :   malloc (because of these checks). In essence this means that we
    1579             :   believe all pointers, sizes, maps etc held in malloc_state, but
    1580             :   check all of those linked or offsetted from other embedded data
    1581             :   structures.  These checks are interspersed with main code in a way
    1582             :   that tends to minimize their run-time cost.
    1583             : 
    1584             :   When FOOTERS is defined, in addition to range checking, we also
    1585             :   verify footer fields of inuse chunks, which can be used guarantee
    1586             :   that the mstate controlling malloc/free is intact.  This is a
    1587             :   streamlined version of the approach described by William Robertson
    1588             :   et al in "Run-time Detection of Heap-based Overflows" LISA'03
    1589             :   http://www.usenix.org/events/lisa03/tech/robertson.html The footer
    1590             :   of an inuse chunk holds the xor of its mstate and a random seed,
    1591             :   that is checked upon calls to free() and realloc().  This is
    1592             :   (probabilistically) unguessable from outside the program, but can be
    1593             :   computed by any code successfully malloc'ing any chunk, so does not
    1594             :   itself provide protection against code that has already broken
    1595             :   security through some other means.  Unlike Robertson et al, we
    1596             :   always dynamically check addresses of all offset chunks (previous,
    1597             :   next, etc). This turns out to be cheaper than relying on hashes.
    1598             : */
    1599             : 
    1600             : #if !INSECURE
    1601             : /* Check if address a is at least as high as any from MORECORE or MMAP */
    1602             : #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
    1603             : /* Check if address of next chunk n is higher than base chunk p */
    1604             : #define ok_next(p, n)    ((char*)(p) < (char*)(n))
    1605             : /* Check if p has inuse status */
    1606             : #define ok_inuse(p)     is_inuse(p)
    1607             : /* Check if p has its pinuse bit on */
    1608             : #define ok_pinuse(p)     pinuse(p)
    1609             : 
    1610             : #else /* !INSECURE */
    1611             : #define ok_address(M, a) (1)
    1612             : #define ok_next(b, n)    (1)
    1613             : #define ok_inuse(p)      (1)
    1614             : #define ok_pinuse(p)     (1)
    1615             : #endif /* !INSECURE */
    1616             : 
    1617             : #if (FOOTERS && !INSECURE)
    1618             : /* Check if (alleged) mstate m has expected magic field */
    1619             : __clib_nosanitize_addr
    1620             : static inline int
    1621   308363000 : ok_magic (const mstate m)
    1622             : {
    1623   308363000 :     return (m->magic == mparams.magic);
    1624             : }
    1625             : #else  /* (FOOTERS && !INSECURE) */
    1626             : #define ok_magic(M)      (1)
    1627             : #endif /* (FOOTERS && !INSECURE) */
    1628             : 
    1629             : /* In gcc, use __builtin_expect to minimize impact of checks */
    1630             : #if !INSECURE
    1631             : #if defined(__GNUC__) && __GNUC__ >= 3
    1632             : #define RTCHECK(e)  __builtin_expect(e, 1)
    1633             : #else /* GNUC */
    1634             : #define RTCHECK(e)  (e)
    1635             : #endif /* GNUC */
    1636             : #else /* !INSECURE */
    1637             : #define RTCHECK(e)  (1)
    1638             : #endif /* !INSECURE */
    1639             : 
    1640             : /* macros to set up inuse chunks with or without footers */
    1641             : 
    1642             : #if !FOOTERS
    1643             : 
    1644             : #define mark_inuse_foot(M,p,s)
    1645             : 
    1646             : /* Macros for setting head/foot of non-mmapped chunks */
    1647             : 
    1648             : /* Set cinuse bit and pinuse bit of next chunk */
    1649             : #define set_inuse(M,p,s)\
    1650             :   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
    1651             :   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
    1652             : 
    1653             : /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
    1654             : #define set_inuse_and_pinuse(M,p,s)\
    1655             :   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
    1656             :   ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
    1657             : 
    1658             : /* Set size, cinuse and pinuse bit of this chunk */
    1659             : #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
    1660             :   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
    1661             : 
    1662             : #else /* FOOTERS */
    1663             : 
    1664             : /* Set foot of inuse chunk to be xor of mstate and seed */
    1665             : #define mark_inuse_foot(M,p,s)\
    1666             :   (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
    1667             : 
    1668             : #define get_mstate_for(p)\
    1669             :   ((mstate)(((mchunkptr)((char*)(p) +\
    1670             :     (chunksize(p))))->prev_foot ^ mparams.magic))
    1671             : 
    1672             : #define set_inuse(M,p,s)\
    1673             :   ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
    1674             :   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
    1675             :   mark_inuse_foot(M,p,s))
    1676             : 
    1677             : #define set_inuse_and_pinuse(M,p,s)\
    1678             :   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
    1679             :   (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
    1680             :  mark_inuse_foot(M,p,s))
    1681             : 
    1682             : #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
    1683             :   ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
    1684             :   mark_inuse_foot(M, p, s))
    1685             : 
    1686             : #endif /* !FOOTERS */
    1687             : 
    1688             : /* ---------------------------- setting mparams -------------------------- */
    1689             : 
    1690             : #if LOCK_AT_FORK
    1691             : static void pre_fork(void)         { ACQUIRE_LOCK(&(gm)->mutex); }
    1692             : static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
    1693             : static void post_fork_child(void)  { INITIAL_LOCK(&(gm)->mutex); }
    1694             : #endif /* LOCK_AT_FORK */
    1695             : 
    1696             : /* Initialize mparams */
    1697         618 : static int init_mparams(void) {
    1698             : #ifdef NEED_GLOBAL_LOCK_INIT
    1699             :   if (malloc_global_mutex_status <= 0)
    1700             :     init_malloc_global_mutex();
    1701             : #endif
    1702             : 
    1703         618 :   ACQUIRE_MALLOC_GLOBAL_LOCK();
    1704         618 :   if (mparams.magic == 0) {
    1705             :     size_t magic;
    1706             :     size_t psize;
    1707             :     size_t gsize;
    1708             : 
    1709             : #ifndef WIN32
    1710         618 :     psize = malloc_getpagesize;
    1711         618 :     gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
    1712             : #else /* WIN32 */
    1713             :     {
    1714             :       SYSTEM_INFO system_info;
    1715             :       GetSystemInfo(&system_info);
    1716             :       psize = system_info.dwPageSize;
    1717             :       gsize = ((DEFAULT_GRANULARITY != 0)?
    1718             :                DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
    1719             :     }
    1720             : #endif /* WIN32 */
    1721             : 
    1722             :     /* Sanity-check configuration:
    1723             :        size_t must be unsigned and as wide as pointer type.
    1724             :        ints must be at least 4 bytes.
    1725             :        alignment must be at least 8.
    1726             :        Alignment, min chunk size, and page size must all be powers of 2.
    1727             :     */
    1728         618 :     if ((sizeof(size_t) != sizeof(char*)) ||
    1729             :         (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
    1730             :         (sizeof(int) < 4)  ||
    1731             :         (MALLOC_ALIGNMENT < (size_t)8U) ||
    1732             :         ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
    1733             :         ((MCHUNK_SIZE      & (MCHUNK_SIZE-SIZE_T_ONE))      != 0) ||
    1734         618 :         ((gsize            & (gsize-SIZE_T_ONE))            != 0) ||
    1735         618 :         ((psize            & (psize-SIZE_T_ONE))            != 0))
    1736           0 :       DLM_ABORT;
    1737         618 :     mparams.granularity = gsize;
    1738         618 :     mparams.page_size = psize;
    1739         618 :     mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
    1740         618 :     mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
    1741             : #if MORECORE_CONTIGUOUS
    1742             :     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
    1743             : #else  /* MORECORE_CONTIGUOUS */
    1744         618 :     mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
    1745             : #endif /* MORECORE_CONTIGUOUS */
    1746             : 
    1747             : #if !ONLY_MSPACES
    1748             :     /* Set up lock for main malloc area */
    1749             :     gm->mflags = mparams.default_mflags;
    1750             :     (void)INITIAL_LOCK(&gm->mutex);
    1751             : #endif
    1752             : #if LOCK_AT_FORK
    1753             :     pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
    1754             : #endif
    1755             : 
    1756             :     {
    1757             : #ifndef DLM_MAGIC_CONSTANT
    1758             : #if USE_DEV_RANDOM
    1759             :       int fd;
    1760             :       unsigned char buf[sizeof(size_t)];
    1761             :       /* Try to use /dev/urandom, else fall back on using time */
    1762             :       if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
    1763             :           read(fd, buf, sizeof(buf)) == sizeof(buf)) {
    1764             :         magic = *((size_t *) buf);
    1765             :         close(fd);
    1766             :       }
    1767             :       else
    1768             : #endif /* USE_DEV_RANDOM */
    1769             : #ifdef WIN32
    1770             :       magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
    1771             : #elif defined(LACKS_TIME_H)
    1772             :       magic = (size_t)&magic ^ (size_t)0x55555555U;
    1773             : #else
    1774             :       magic = (size_t)(time(0) ^ (size_t)0x55555555U);
    1775             : #endif
    1776             :       magic |= (size_t)8U;    /* ensure nonzero */
    1777             :       magic &= ~(size_t)7U;   /* improve chances of fault for bad values */
    1778             : #else
    1779         618 :       magic = DLM_MAGIC_CONSTANT;
    1780             : #endif
    1781             :       /* Until memory modes commonly available, use volatile-write */
    1782         618 :       (*(volatile size_t *)(&(mparams.magic))) = magic;
    1783             :     }
    1784             :   }
    1785             : 
    1786         618 :   RELEASE_MALLOC_GLOBAL_LOCK();
    1787         618 :   return 1;
    1788             : }
    1789             : 
    1790             : /* support for mallopt */
    1791           0 : static int change_mparam(int param_number, int value) {
    1792             :   size_t val;
    1793           0 :   ensure_initialization();
    1794           0 :   val = (value == -1)? MAX_SIZE_T : (size_t)value;
    1795           0 :   switch(param_number) {
    1796           0 :   case M_TRIM_THRESHOLD:
    1797           0 :     mparams.trim_threshold = val;
    1798           0 :     return 1;
    1799           0 :   case M_GRANULARITY:
    1800           0 :     if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
    1801           0 :       mparams.granularity = val;
    1802           0 :       return 1;
    1803             :     }
    1804             :     else
    1805           0 :       return 0;
    1806           0 :   case M_MMAP_THRESHOLD:
    1807           0 :     mparams.mmap_threshold = val;
    1808           0 :     return 1;
    1809           0 :   default:
    1810           0 :     return 0;
    1811             :   }
    1812             : }
    1813             : 
    1814             : #if DEBUG
    1815             : /* ------------------------- Debugging Support --------------------------- */
    1816             : 
    1817             : /* Check properties of any chunk, whether free, inuse, mmapped etc  */
    1818             : static void do_check_any_chunk(mstate m, mchunkptr p) {
    1819             :   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
    1820             :   assert(ok_address(m, p));
    1821             : }
    1822             : 
    1823             : /* Check properties of top chunk */
    1824             : static void do_check_top_chunk(mstate m, mchunkptr p) {
    1825             :   msegmentptr sp = segment_holding(m, (char*)p);
    1826             :   size_t  sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
    1827             :   assert(sp != 0);
    1828             :   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
    1829             :   assert(ok_address(m, p));
    1830             :   assert(sz == m->topsize);
    1831             :   assert(sz > 0);
    1832             :   assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
    1833             :   assert(pinuse(p));
    1834             :   assert(!pinuse(chunk_plus_offset(p, sz)));
    1835             : }
    1836             : 
    1837             : /* Check properties of (inuse) mmapped chunks */
    1838             : static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
    1839             :   size_t  sz = chunksize(p);
    1840             :   size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
    1841             :   assert(is_mmapped(p));
    1842             :   assert(use_mmap(m));
    1843             :   assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
    1844             :   assert(ok_address(m, p));
    1845             :   assert(!is_small(sz));
    1846             :   assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
    1847             :   assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
    1848             :   assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
    1849             : }
    1850             : 
    1851             : /* Check properties of inuse chunks */
    1852             : static void do_check_inuse_chunk(mstate m, mchunkptr p) {
    1853             :   do_check_any_chunk(m, p);
    1854             :   assert(is_inuse(p));
    1855             :   assert(next_pinuse(p));
    1856             :   /* If not pinuse and not mmapped, previous chunk has OK offset */
    1857             :   assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
    1858             :   if (is_mmapped(p))
    1859             :     do_check_mmapped_chunk(m, p);
    1860             : }
    1861             : 
    1862             : /* Check properties of free chunks */
    1863             : static void do_check_free_chunk(mstate m, mchunkptr p) {
    1864             :   size_t sz = chunksize(p);
    1865             :   mchunkptr next = chunk_plus_offset(p, sz);
    1866             :   do_check_any_chunk(m, p);
    1867             :   assert(!is_inuse(p));
    1868             :   assert(!next_pinuse(p));
    1869             :   assert (!is_mmapped(p));
    1870             :   if (p != m->dv && p != m->top) {
    1871             :     if (sz >= MIN_CHUNK_SIZE) {
    1872             :       assert((sz & CHUNK_ALIGN_MASK) == 0);
    1873             :       assert(is_aligned(chunk2mem(p)));
    1874             :       assert(next->prev_foot == sz);
    1875             :       assert(pinuse(p));
    1876             :       assert (next == m->top || is_inuse(next));
    1877             :       assert(p->fd->bk == p);
    1878             :       assert(p->bk->fd == p);
    1879             :     }
    1880             :     else  /* markers are always of size SIZE_T_SIZE */
    1881             :       assert(sz == SIZE_T_SIZE);
    1882             :   }
    1883             : }
    1884             : 
    1885             : /* Check properties of malloced chunks at the point they are malloced */
    1886             : static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
    1887             :   if (mem != 0) {
    1888             :     mchunkptr p = mem2chunk(mem);
    1889             :     size_t sz = p->head & ~INUSE_BITS;
    1890             :     do_check_inuse_chunk(m, p);
    1891             :     assert((sz & CHUNK_ALIGN_MASK) == 0);
    1892             :     assert(sz >= MIN_CHUNK_SIZE);
    1893             :     assert(sz >= s);
    1894             :     /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
    1895             :     assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
    1896             :   }
    1897             : }
    1898             : 
    1899             : /* Check a tree and its subtrees.  */
    1900             : static void do_check_tree(mstate m, tchunkptr t) {
    1901             :   tchunkptr head = 0;
    1902             :   tchunkptr u = t;
    1903             :   bindex_t tindex = t->index;
    1904             :   size_t tsize = chunksize(t);
    1905             :   bindex_t idx;
    1906             :   compute_tree_index(tsize, idx);
    1907             :   assert(tindex == idx);
    1908             :   assert(tsize >= MIN_LARGE_SIZE);
    1909             :   assert(tsize >= minsize_for_tree_index(idx));
    1910             :   assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
    1911             : 
    1912             :   do { /* traverse through chain of same-sized nodes */
    1913             :     do_check_any_chunk(m, ((mchunkptr)u));
    1914             :     assert(u->index == tindex);
    1915             :     assert(chunksize(u) == tsize);
    1916             :     assert(!is_inuse(u));
    1917             :     assert(!next_pinuse(u));
    1918             :     assert(u->fd->bk == u);
    1919             :     assert(u->bk->fd == u);
    1920             :     if (u->parent == 0) {
    1921             :       assert(u->child[0] == 0);
    1922             :       assert(u->child[1] == 0);
    1923             :     }
    1924             :     else {
    1925             :       assert(head == 0); /* only one node on chain has parent */
    1926             :       head = u;
    1927             :       assert(u->parent != u);
    1928             :       assert (u->parent->child[0] == u ||
    1929             :               u->parent->child[1] == u ||
    1930             :               *((tbinptr*)(u->parent)) == u);
    1931             :       if (u->child[0] != 0) {
    1932             :         assert(u->child[0]->parent == u);
    1933             :         assert(u->child[0] != u);
    1934             :         do_check_tree(m, u->child[0]);
    1935             :       }
    1936             :       if (u->child[1] != 0) {
    1937             :         assert(u->child[1]->parent == u);
    1938             :         assert(u->child[1] != u);
    1939             :         do_check_tree(m, u->child[1]);
    1940             :       }
    1941             :       if (u->child[0] != 0 && u->child[1] != 0) {
    1942             :         assert(chunksize(u->child[0]) < chunksize(u->child[1]));
    1943             :       }
    1944             :     }
    1945             :     u = u->fd;
    1946             :   } while (u != t);
    1947             :   assert(head != 0);
    1948             : }
    1949             : 
    1950             : /*  Check all the chunks in a treebin.  */
    1951             : static void do_check_treebin(mstate m, bindex_t i) {
    1952             :   tbinptr* tb = treebin_at(m, i);
    1953             :   tchunkptr t = *tb;
    1954             :   int empty = (m->treemap & (1U << i)) == 0;
    1955             :   if (t == 0)
    1956             :     assert(empty);
    1957             :   if (!empty)
    1958             :     do_check_tree(m, t);
    1959             : }
    1960             : 
    1961             : /*  Check all the chunks in a smallbin.  */
    1962             : static void do_check_smallbin(mstate m, bindex_t i) {
    1963             :   sbinptr b = smallbin_at(m, i);
    1964             :   mchunkptr p = b->bk;
    1965             :   unsigned int empty = (m->smallmap & (1U << i)) == 0;
    1966             :   if (p == b)
    1967             :     assert(empty);
    1968             :   if (!empty) {
    1969             :     for (; p != b; p = p->bk) {
    1970             :       size_t size = chunksize(p);
    1971             :       mchunkptr q;
    1972             :       /* each chunk claims to be free */
    1973             :       do_check_free_chunk(m, p);
    1974             :       /* chunk belongs in bin */
    1975             :       assert(small_index(size) == i);
    1976             :       assert(p->bk == b || chunksize(p->bk) == chunksize(p));
    1977             :       /* chunk is followed by an inuse chunk */
    1978             :       q = next_chunk(p);
    1979             :       if (q->head != FENCEPOST_HEAD)
    1980             :         do_check_inuse_chunk(m, q);
    1981             :     }
    1982             :   }
    1983             : }
    1984             : 
    1985             : /* Find x in a bin. Used in other check functions. */
    1986             : static int bin_find(mstate m, mchunkptr x) {
    1987             :   size_t size = chunksize(x);
    1988             :   if (is_small(size)) {
    1989             :     bindex_t sidx = small_index(size);
    1990             :     sbinptr b = smallbin_at(m, sidx);
    1991             :     if (smallmap_is_marked(m, sidx)) {
    1992             :       mchunkptr p = b;
    1993             :       do {
    1994             :         if (p == x)
    1995             :           return 1;
    1996             :       } while ((p = p->fd) != b);
    1997             :     }
    1998             :   }
    1999             :   else {
    2000             :     bindex_t tidx;
    2001             :     compute_tree_index(size, tidx);
    2002             :     if (treemap_is_marked(m, tidx)) {
    2003             :       tchunkptr t = *treebin_at(m, tidx);
    2004             :       size_t sizebits = size << leftshift_for_tree_index(tidx);
    2005             :       while (t != 0 && chunksize(t) != size) {
    2006             :         t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
    2007             :         sizebits <<= 1;
    2008             :       }
    2009             :       if (t != 0) {
    2010             :         tchunkptr u = t;
    2011             :         do {
    2012             :           if (u == (tchunkptr)x)
    2013             :             return 1;
    2014             :         } while ((u = u->fd) != t);
    2015             :       }
    2016             :     }
    2017             :   }
    2018             :   return 0;
    2019             : }
    2020             : 
    2021             : /* Traverse each chunk and check it; return total */
    2022             : static size_t traverse_and_check(mstate m) {
    2023             :   size_t sum = 0;
    2024             :   if (is_initialized(m)) {
    2025             :     msegmentptr s = &m->seg;
    2026             :     sum += m->topsize + TOP_FOOT_SIZE;
    2027             :     while (s != 0) {
    2028             :       mchunkptr q = align_as_chunk(s->base);
    2029             :       mchunkptr lastq = 0;
    2030             :       assert(pinuse(q));
    2031             :       while (segment_holds(s, q) &&
    2032             :              q != m->top && q->head != FENCEPOST_HEAD) {
    2033             :         sum += chunksize(q);
    2034             :         if (is_inuse(q)) {
    2035             :           assert(!bin_find(m, q));
    2036             :           do_check_inuse_chunk(m, q);
    2037             :         }
    2038             :         else {
    2039             :           assert(q == m->dv || bin_find(m, q));
    2040             :           assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
    2041             :           do_check_free_chunk(m, q);
    2042             :         }
    2043             :         lastq = q;
    2044             :         q = next_chunk(q);
    2045             :       }
    2046             :       s = s->next;
    2047             :     }
    2048             :   }
    2049             :   return sum;
    2050             : }
    2051             : 
    2052             : 
    2053             : /* Check all properties of malloc_state. */
    2054             : static void do_check_malloc_state(mstate m) {
    2055             :   bindex_t i;
    2056             :   size_t total;
    2057             :   /* check bins */
    2058             :   for (i = 0; i < NSMALLBINS; ++i)
    2059             :     do_check_smallbin(m, i);
    2060             :   for (i = 0; i < NTREEBINS; ++i)
    2061             :     do_check_treebin(m, i);
    2062             : 
    2063             :   if (m->dvsize != 0) { /* check dv chunk */
    2064             :     do_check_any_chunk(m, m->dv);
    2065             :     assert(m->dvsize == chunksize(m->dv));
    2066             :     assert(m->dvsize >= MIN_CHUNK_SIZE);
    2067             :     assert(bin_find(m, m->dv) == 0);
    2068             :   }
    2069             : 
    2070             :   if (m->top != 0) {   /* check top chunk */
    2071             :     do_check_top_chunk(m, m->top);
    2072             :     /*assert(m->topsize == chunksize(m->top)); redundant */
    2073             :     assert(m->topsize > 0);
    2074             :     assert(bin_find(m, m->top) == 0);
    2075             :   }
    2076             : 
    2077             :   total = traverse_and_check(m);
    2078             :   assert(total <= m->footprint);
    2079             :   assert(m->footprint <= m->max_footprint);
    2080             : }
    2081             : #endif /* DEBUG */
    2082             : 
    2083             : /* ----------------------------- statistics ------------------------------ */
    2084             : 
    2085             : #if !NO_MALLINFO
    2086             : __clib_nosanitize_addr
    2087        3458 : static struct dlmallinfo internal_mallinfo(mstate m) {
    2088        3458 :   struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
    2089        3458 :   ensure_initialization();
    2090        3458 :   if (!PREACTION(m)) {
    2091             :     check_malloc_state(m);
    2092        3458 :     if (is_initialized(m)) {
    2093        3458 :       size_t nfree = SIZE_T_ONE; /* top always free */
    2094        3458 :       size_t mfree = m->topsize + TOP_FOOT_SIZE;
    2095        3458 :       size_t sum = mfree;
    2096        3458 :       msegmentptr s = &m->seg;
    2097        6916 :       while (s != 0) {
    2098        3458 :         mchunkptr q = align_as_chunk(s->base);
    2099   101295000 :         while (segment_holds(s, q) &&
    2100   101295000 :                q != m->top && q->head != FENCEPOST_HEAD) {
    2101   101292000 :           size_t sz = chunksize(q);
    2102   101292000 :           sum += sz;
    2103   101292000 :           if (!is_inuse(q)) {
    2104      920362 :             mfree += sz;
    2105      920362 :             ++nfree;
    2106             :           }
    2107   101292000 :           q = next_chunk(q);
    2108             :         }
    2109        3458 :         s = s->next;
    2110             :       }
    2111             : 
    2112        3458 :       nm.arena    = sum;
    2113        3458 :       nm.ordblks  = nfree;
    2114        3458 :       nm.hblkhd   = m->footprint - sum;
    2115        3458 :       nm.usmblks  = m->max_footprint;
    2116        3458 :       nm.uordblks = m->footprint - mfree;
    2117        3458 :       nm.fordblks = mfree;
    2118        3458 :       nm.keepcost = m->topsize;
    2119             :     }
    2120             : 
    2121        3458 :     POSTACTION(m);
    2122             :   }
    2123        3458 :   return nm;
    2124             : }
    2125             : #endif /* !NO_MALLINFO */
    2126             : 
    2127             : #if !NO_MALLOC_STATS
    2128           0 : static void internal_malloc_stats(mstate m) {
    2129           0 :   ensure_initialization();
    2130           0 :   if (!PREACTION(m)) {
    2131           0 :     size_t maxfp = 0;
    2132           0 :     size_t fp = 0;
    2133           0 :     size_t used = 0;
    2134             :     check_malloc_state(m);
    2135           0 :     if (is_initialized(m)) {
    2136           0 :       msegmentptr s = &m->seg;
    2137           0 :       maxfp = m->max_footprint;
    2138           0 :       fp = m->footprint;
    2139           0 :       used = fp - (m->topsize + TOP_FOOT_SIZE);
    2140             : 
    2141           0 :       while (s != 0) {
    2142           0 :         mchunkptr q = align_as_chunk(s->base);
    2143           0 :         while (segment_holds(s, q) &&
    2144           0 :                q != m->top && q->head != FENCEPOST_HEAD) {
    2145           0 :           if (!is_inuse(q))
    2146           0 :             used -= chunksize(q);
    2147           0 :           q = next_chunk(q);
    2148             :         }
    2149           0 :         s = s->next;
    2150             :       }
    2151             :     }
    2152           0 :     POSTACTION(m); /* drop lock */
    2153           0 :     fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
    2154           0 :     fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
    2155           0 :     fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
    2156             :   }
    2157           0 : }
    2158             : #endif /* NO_MALLOC_STATS */
    2159             : 
    2160             : /* ----------------------- Operations on smallbins ----------------------- */
    2161             : 
    2162             : /*
    2163             :   Various forms of linking and unlinking are defined as macros.  Even
    2164             :   the ones for trees, which are very long but have very short typical
    2165             :   paths.  This is ugly but reduces reliance on inlining support of
    2166             :   compilers.
    2167             : */
    2168             : 
    2169             : /* Link a free chunk into a smallbin  */
    2170             : #define insert_small_chunk(M, P, S) {\
    2171             :   bindex_t I  = small_index(S);\
    2172             :   mchunkptr B = smallbin_at(M, I);\
    2173             :   mchunkptr F = B;\
    2174             :   assert(S >= MIN_CHUNK_SIZE);\
    2175             :   if (!smallmap_is_marked(M, I))\
    2176             :     mark_smallmap(M, I);\
    2177             :   else if (RTCHECK(ok_address(M, B->fd)))\
    2178             :     F = B->fd;\
    2179             :   else {\
    2180             :     CORRUPTION_ERROR_ACTION(M);\
    2181             :   }\
    2182             :   B->fd = P;\
    2183             :   F->bk = P;\
    2184             :   P->fd = F;\
    2185             :   P->bk = B;\
    2186             : }
    2187             : 
    2188             : /* Unlink a chunk from a smallbin  */
    2189             : #define unlink_small_chunk(M, P, S) {\
    2190             :   mchunkptr F = P->fd;\
    2191             :   mchunkptr B = P->bk;\
    2192             :   bindex_t I = small_index(S);\
    2193             :   assert(P != B);\
    2194             :   assert(P != F);\
    2195             :   assert(chunksize(P) == small_index2size(I));\
    2196             :   if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
    2197             :     if (B == F) {\
    2198             :       clear_smallmap(M, I);\
    2199             :     }\
    2200             :     else if (RTCHECK(B == smallbin_at(M,I) ||\
    2201             :                      (ok_address(M, B) && B->fd == P))) {\
    2202             :       F->bk = B;\
    2203             :       B->fd = F;\
    2204             :     }\
    2205             :     else {\
    2206             :       CORRUPTION_ERROR_ACTION(M);\
    2207             :     }\
    2208             :   }\
    2209             :   else {\
    2210             :     CORRUPTION_ERROR_ACTION(M);\
    2211             :   }\
    2212             : }
    2213             : 
    2214             : /* Unlink the first chunk from a smallbin */
    2215             : #define unlink_first_small_chunk(M, B, P, I) {\
    2216             :   mchunkptr F = P->fd;\
    2217             :   assert(P != B);\
    2218             :   assert(P != F);\
    2219             :   assert(chunksize(P) == small_index2size(I));\
    2220             :   if (B == F) {\
    2221             :     clear_smallmap(M, I);\
    2222             :   }\
    2223             :   else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
    2224             :     F->bk = B;\
    2225             :     B->fd = F;\
    2226             :   }\
    2227             :   else {\
    2228             :     CORRUPTION_ERROR_ACTION(M);\
    2229             :   }\
    2230             : }
    2231             : 
    2232             : /* Replace dv node, binning the old one */
    2233             : /* Used only when dvsize known to be small */
    2234             : #define replace_dv(M, P, S) {\
    2235             :   size_t DVS = M->dvsize;\
    2236             :   assert(is_small(DVS));\
    2237             :   if (DVS != 0) {\
    2238             :     mchunkptr DV = M->dv;\
    2239             :     insert_small_chunk(M, DV, DVS);\
    2240             :   }\
    2241             :   M->dvsize = S;\
    2242             :   M->dv = P;\
    2243             : }
    2244             : 
    2245             : /* ------------------------- Operations on trees ------------------------- */
    2246             : 
    2247             : /* Insert chunk into tree */
    2248             : #define insert_large_chunk(M, X, S) {\
    2249             :   tbinptr* H;\
    2250             :   bindex_t I;\
    2251             :   compute_tree_index(S, I);\
    2252             :   H = treebin_at(M, I);\
    2253             :   X->index = I;\
    2254             :   X->child[0] = X->child[1] = 0;\
    2255             :   if (!treemap_is_marked(M, I)) {\
    2256             :     mark_treemap(M, I);\
    2257             :     *H = X;\
    2258             :     X->parent = (tchunkptr)H;\
    2259             :     X->fd = X->bk = X;\
    2260             :   }\
    2261             :   else {\
    2262             :     tchunkptr T = *H;\
    2263             :     size_t K = S << leftshift_for_tree_index(I);\
    2264             :     for (;;) {\
    2265             :       if (chunksize(T) != S) {\
    2266             :         tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
    2267             :         K <<= 1;\
    2268             :         if (*C != 0)\
    2269             :           T = *C;\
    2270             :         else if (RTCHECK(ok_address(M, C))) {\
    2271             :           *C = X;\
    2272             :           X->parent = T;\
    2273             :           X->fd = X->bk = X;\
    2274             :           break;\
    2275             :         }\
    2276             :         else {\
    2277             :           CORRUPTION_ERROR_ACTION(M);\
    2278             :           break;\
    2279             :         }\
    2280             :       }\
    2281             :       else {\
    2282             :         tchunkptr F = T->fd;\
    2283             :         if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
    2284             :           T->fd = F->bk = X;\
    2285             :           X->fd = F;\
    2286             :           X->bk = T;\
    2287             :           X->parent = 0;\
    2288             :           break;\
    2289             :         }\
    2290             :         else {\
    2291             :           CORRUPTION_ERROR_ACTION(M);\
    2292             :           break;\
    2293             :         }\
    2294             :       }\
    2295             :     }\
    2296             :   }\
    2297             : }
    2298             : 
    2299             : /*
    2300             :   Unlink steps:
    2301             : 
    2302             :   1. If x is a chained node, unlink it from its same-sized fd/bk links
    2303             :      and choose its bk node as its replacement.
    2304             :   2. If x was the last node of its size, but not a leaf node, it must
    2305             :      be replaced with a leaf node (not merely one with an open left or
    2306             :      right), to make sure that lefts and rights of descendents
    2307             :      correspond properly to bit masks.  We use the rightmost descendent
    2308             :      of x.  We could use any other leaf, but this is easy to locate and
    2309             :      tends to counteract removal of leftmosts elsewhere, and so keeps
    2310             :      paths shorter than minimally guaranteed.  This doesn't loop much
    2311             :      because on average a node in a tree is near the bottom.
    2312             :   3. If x is the base of a chain (i.e., has parent links) relink
    2313             :      x's parent and children to x's replacement (or null if none).
    2314             : */
    2315             : 
    2316             : #define unlink_large_chunk(M, X) {\
    2317             :   tchunkptr XP = X->parent;\
    2318             :   tchunkptr R;\
    2319             :   if (X->bk != X) {\
    2320             :     tchunkptr F = X->fd;\
    2321             :     R = X->bk;\
    2322             :     if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
    2323             :       F->bk = R;\
    2324             :       R->fd = F;\
    2325             :     }\
    2326             :     else {\
    2327             :       CORRUPTION_ERROR_ACTION(M);\
    2328             :     }\
    2329             :   }\
    2330             :   else {\
    2331             :     tchunkptr* RP;\
    2332             :     if (((R = *(RP = &(X->child[1]))) != 0) ||\
    2333             :         ((R = *(RP = &(X->child[0]))) != 0)) {\
    2334             :       tchunkptr* CP;\
    2335             :       while ((*(CP = &(R->child[1])) != 0) ||\
    2336             :              (*(CP = &(R->child[0])) != 0)) {\
    2337             :         R = *(RP = CP);\
    2338             :       }\
    2339             :       if (RTCHECK(ok_address(M, RP)))\
    2340             :         *RP = 0;\
    2341             :       else {\
    2342             :         CORRUPTION_ERROR_ACTION(M);\
    2343             :       }\
    2344             :     }\
    2345             :   }\
    2346             :   if (XP != 0) {\
    2347             :     tbinptr* H = treebin_at(M, X->index);\
    2348             :     if (X == *H) {\
    2349             :       if ((*H = R) == 0) \
    2350             :         clear_treemap(M, X->index);\
    2351             :     }\
    2352             :     else if (RTCHECK(ok_address(M, XP))) {\
    2353             :       if (XP->child[0] == X) \
    2354             :         XP->child[0] = R;\
    2355             :       else \
    2356             :         XP->child[1] = R;\
    2357             :     }\
    2358             :     else\
    2359             :       CORRUPTION_ERROR_ACTION(M);\
    2360             :     if (R != 0) {\
    2361             :       if (RTCHECK(ok_address(M, R))) {\
    2362             :         tchunkptr C0, C1;\
    2363             :         R->parent = XP;\
    2364             :         if ((C0 = X->child[0]) != 0) {\
    2365             :           if (RTCHECK(ok_address(M, C0))) {\
    2366             :             R->child[0] = C0;\
    2367             :             C0->parent = R;\
    2368             :           }\
    2369             :           else\
    2370             :             CORRUPTION_ERROR_ACTION(M);\
    2371             :         }\
    2372             :         if ((C1 = X->child[1]) != 0) {\
    2373             :           if (RTCHECK(ok_address(M, C1))) {\
    2374             :             R->child[1] = C1;\
    2375             :             C1->parent = R;\
    2376             :           }\
    2377             :           else\
    2378             :             CORRUPTION_ERROR_ACTION(M);\
    2379             :         }\
    2380             :       }\
    2381             :       else\
    2382             :         CORRUPTION_ERROR_ACTION(M);\
    2383             :     }\
    2384             :   }\
    2385             : }
    2386             : 
    2387             : /* Relays to large vs small bin operations */
    2388             : 
    2389             : #define insert_chunk(M, P, S)\
    2390             :   if (is_small(S)) insert_small_chunk(M, P, S)\
    2391             :   else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
    2392             : 
    2393             : #define unlink_chunk(M, P, S)\
    2394             :   if (is_small(S)) unlink_small_chunk(M, P, S)\
    2395             :   else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
    2396             : 
    2397             : 
    2398             : /* Relays to internal calls to malloc/free from realloc, memalign etc */
    2399             : 
    2400             : #if ONLY_MSPACES
    2401             : #define internal_malloc(m, b) mspace_malloc(m, b)
    2402             : #define internal_free(m, mem) mspace_free(m,mem);
    2403             : #else /* ONLY_MSPACES */
    2404             : #if MSPACES
    2405             : #define internal_malloc(m, b)\
    2406             :   ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
    2407             : #define internal_free(m, mem)\
    2408             :    if (m == gm) dlfree(mem); else mspace_free(m,mem);
    2409             : #else /* MSPACES */
    2410             : #define internal_malloc(m, b) dlmalloc(b)
    2411             : #define internal_free(m, mem) dlfree(mem)
    2412             : #endif /* MSPACES */
    2413             : #endif /* ONLY_MSPACES */
    2414             : 
    2415             : /* -----------------------  Direct-mmapping chunks ----------------------- */
    2416             : 
    2417             : /*
    2418             :   Directly mmapped chunks are set up with an offset to the start of
    2419             :   the mmapped region stored in the prev_foot field of the chunk. This
    2420             :   allows reconstruction of the required argument to MUNMAP when freed,
    2421             :   and also allows adjustment of the returned chunk to meet alignment
    2422             :   requirements (especially in memalign).
    2423             : */
    2424             : 
    2425             : /* Malloc using mmap */
    2426           0 : static void* mmap_alloc(mstate m, size_t nb) {
    2427           0 :   size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
    2428           0 :   if (m->footprint_limit != 0) {
    2429           0 :     size_t fp = m->footprint + mmsize;
    2430           0 :     if (fp <= m->footprint || fp > m->footprint_limit)
    2431           0 :       return 0;
    2432             :   }
    2433           0 :   if (mmsize > nb) {     /* Check for wrap around 0 */
    2434           0 :     char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
    2435           0 :     if (mm != CMFAIL) {
    2436           0 :       size_t offset = align_offset(chunk2mem(mm));
    2437           0 :       size_t psize = mmsize - offset - MMAP_FOOT_PAD;
    2438           0 :       mchunkptr p = (mchunkptr)(mm + offset);
    2439           0 :       p->prev_foot = offset;
    2440           0 :       p->head = psize;
    2441           0 :       mark_inuse_foot(m, p, psize);
    2442           0 :       chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
    2443           0 :       chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
    2444             : 
    2445           0 :       if (m->least_addr == 0 || mm < m->least_addr)
    2446           0 :         m->least_addr = mm;
    2447           0 :       if ((m->footprint += mmsize) > m->max_footprint)
    2448           0 :         m->max_footprint = m->footprint;
    2449             :       assert(is_aligned(chunk2mem(p)));
    2450             :       check_mmapped_chunk(m, p);
    2451           0 :       return chunk2mem(p);
    2452             :     }
    2453             :   }
    2454           0 :   return 0;
    2455             : }
    2456             : 
    2457             : /* Realloc using mmap */
    2458           0 : static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
    2459           0 :   size_t oldsize = chunksize(oldp);
    2460             :   (void)flags; /* placate people compiling -Wunused */
    2461           0 :   if (is_small(nb)) /* Can't shrink mmap regions below small size */
    2462           0 :     return 0;
    2463             :   /* Keep old chunk if big enough but not too big */
    2464           0 :   if (oldsize >= nb + SIZE_T_SIZE &&
    2465           0 :       (oldsize - nb) <= (mparams.granularity << 1))
    2466           0 :     return oldp;
    2467             :   else {
    2468           0 :     size_t offset = oldp->prev_foot;
    2469           0 :     size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
    2470           0 :     size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
    2471           0 :     char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
    2472             :                                   oldmmsize, newmmsize, flags);
    2473           0 :     if (cp != CMFAIL) {
    2474           0 :       mchunkptr newp = (mchunkptr)(cp + offset);
    2475           0 :       size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
    2476           0 :       newp->head = psize;
    2477           0 :       mark_inuse_foot(m, newp, psize);
    2478           0 :       chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
    2479           0 :       chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
    2480             : 
    2481           0 :       if (cp < m->least_addr)
    2482           0 :         m->least_addr = cp;
    2483           0 :       if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
    2484           0 :         m->max_footprint = m->footprint;
    2485             :       check_mmapped_chunk(m, newp);
    2486           0 :       return newp;
    2487             :     }
    2488             :   }
    2489           0 :   return 0;
    2490             : }
    2491             : 
    2492             : 
    2493             : /* -------------------------- mspace management -------------------------- */
    2494             : 
    2495             : /* Initialize top chunk and its size */
    2496             : __clib_nosanitize_addr
    2497        4774 : static void init_top(mstate m, mchunkptr p, size_t psize) {
    2498             :   /* Ensure alignment */
    2499        4774 :   size_t offset = align_offset(chunk2mem(p));
    2500        4774 :   p = (mchunkptr)((char*)p + offset);
    2501        4774 :   psize -= offset;
    2502             : 
    2503        4774 :   m->top = p;
    2504        4774 :   m->topsize = psize;
    2505        4774 :   p->head = psize | PINUSE_BIT;
    2506             :   /* set size of fake trailing chunk holding overhead space only once */
    2507        4774 :   chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
    2508        4774 :   m->trim_check = mparams.trim_threshold; /* reset on each update */
    2509        4774 : }
    2510             : 
    2511             : /* Initialize bins for a new mstate that is otherwise zeroed out */
    2512        4774 : static void init_bins(mstate m) {
    2513             :   /* Establish circular links for smallbins */
    2514             :   bindex_t i;
    2515      157542 :   for (i = 0; i < NSMALLBINS; ++i) {
    2516      152768 :     sbinptr bin = smallbin_at(m,i);
    2517      152768 :     bin->fd = bin->bk = bin;
    2518             :   }
    2519        4774 : }
    2520             : 
    2521             : #if PROCEED_ON_ERROR
    2522             : 
    2523             : /* default corruption action */
    2524             : static void reset_on_error(mstate m) {
    2525             :   int i;
    2526             :   ++malloc_corruption_error_count;
    2527             :   /* Reinitialize fields to forget about all memory */
    2528             :   m->smallmap = m->treemap = 0;
    2529             :   m->dvsize = m->topsize = 0;
    2530             :   m->seg.base = 0;
    2531             :   m->seg.size = 0;
    2532             :   m->seg.next = 0;
    2533             :   m->top = m->dv = 0;
    2534             :   for (i = 0; i < NTREEBINS; ++i)
    2535             :     *treebin_at(m, i) = 0;
    2536             :   init_bins(m);
    2537             : }
    2538             : #endif /* PROCEED_ON_ERROR */
    2539             : 
    2540             : /* Allocate chunk and prepend remainder with chunk in successor base. */
    2541             : __clib_nosanitize_addr
    2542           0 : static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
    2543             :                            size_t nb) {
    2544           0 :   mchunkptr p = align_as_chunk(newbase);
    2545           0 :   mchunkptr oldfirst = align_as_chunk(oldbase);
    2546           0 :   size_t psize = (char*)oldfirst - (char*)p;
    2547           0 :   mchunkptr q = chunk_plus_offset(p, nb);
    2548           0 :   size_t qsize = psize - nb;
    2549           0 :   set_size_and_pinuse_of_inuse_chunk(m, p, nb);
    2550             : 
    2551             :   assert((char*)oldfirst > (char*)q);
    2552             :   assert(pinuse(oldfirst));
    2553             :   assert(qsize >= MIN_CHUNK_SIZE);
    2554             : 
    2555             :   /* consolidate remainder with first chunk of old base */
    2556           0 :   if (oldfirst == m->top) {
    2557           0 :     size_t tsize = m->topsize += qsize;
    2558           0 :     m->top = q;
    2559           0 :     q->head = tsize | PINUSE_BIT;
    2560             :     check_top_chunk(m, q);
    2561             :   }
    2562           0 :   else if (oldfirst == m->dv) {
    2563           0 :     size_t dsize = m->dvsize += qsize;
    2564           0 :     m->dv = q;
    2565           0 :     set_size_and_pinuse_of_free_chunk(q, dsize);
    2566             :   }
    2567             :   else {
    2568           0 :     if (!is_inuse(oldfirst)) {
    2569           0 :       size_t nsize = chunksize(oldfirst);
    2570           0 :       unlink_chunk(m, oldfirst, nsize);
    2571           0 :       oldfirst = chunk_plus_offset(oldfirst, nsize);
    2572           0 :       qsize += nsize;
    2573             :     }
    2574           0 :     set_free_with_pinuse(q, qsize, oldfirst);
    2575           0 :     insert_chunk(m, q, qsize);
    2576             :     check_free_chunk(m, q);
    2577             :   }
    2578             : 
    2579             :   check_malloced_chunk(m, chunk2mem(p), nb);
    2580           0 :   return chunk2mem(p);
    2581             : }
    2582             : 
    2583             : /* Add a segment to hold a new noncontiguous region */
    2584             : __clib_nosanitize_addr
    2585           0 : static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
    2586             :   /* Determine locations and sizes of segment, fenceposts, old top */
    2587           0 :   char* old_top = (char*)m->top;
    2588           0 :   msegmentptr oldsp = segment_holding(m, old_top);
    2589           0 :   char* old_end = oldsp->base + oldsp->size;
    2590           0 :   size_t ssize = pad_request(sizeof(struct malloc_segment));
    2591           0 :   char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
    2592           0 :   size_t offset = align_offset(chunk2mem(rawsp));
    2593           0 :   char* asp = rawsp + offset;
    2594           0 :   char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
    2595           0 :   mchunkptr sp = (mchunkptr)csp;
    2596           0 :   msegmentptr ss = (msegmentptr)(chunk2mem(sp));
    2597           0 :   mchunkptr tnext = chunk_plus_offset(sp, ssize);
    2598           0 :   mchunkptr p = tnext;
    2599           0 :   int __attribute__((unused)) nfences = 0;
    2600             : 
    2601             :   /* reset top to new space */
    2602           0 :   init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
    2603             : 
    2604             :   /* Set up segment record */
    2605             :   assert(is_aligned(ss));
    2606           0 :   set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
    2607           0 :   *ss = m->seg; /* Push current record */
    2608           0 :   m->seg.base = tbase;
    2609           0 :   m->seg.size = tsize;
    2610           0 :   m->seg.sflags = mmapped;
    2611           0 :   m->seg.next = ss;
    2612             : 
    2613             :   /* Insert trailing fenceposts */
    2614           0 :   for (;;) {
    2615           0 :     mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
    2616           0 :     p->head = FENCEPOST_HEAD;
    2617           0 :     ++nfences;
    2618           0 :     if ((char*)(&(nextp->head)) < old_end)
    2619           0 :       p = nextp;
    2620             :     else
    2621           0 :       break;
    2622             :   }
    2623             :   assert(nfences >= 2);
    2624             : 
    2625             :   /* Insert the rest of old top into a bin as an ordinary free chunk */
    2626           0 :   if (csp != old_top) {
    2627           0 :     mchunkptr q = (mchunkptr)old_top;
    2628           0 :     size_t psize = csp - old_top;
    2629           0 :     mchunkptr tn = chunk_plus_offset(q, psize);
    2630           0 :     set_free_with_pinuse(q, psize, tn);
    2631           0 :     insert_chunk(m, q, psize);
    2632             :   }
    2633             : 
    2634             :   check_top_chunk(m, m->top);
    2635           0 : }
    2636             : 
    2637             : /* -------------------------- System allocation -------------------------- */
    2638             : 
    2639             : /* Get memory from system using MORECORE or MMAP */
    2640             : __clib_nosanitize_addr
    2641           0 : static void* sys_alloc(mstate m, size_t nb) {
    2642           0 :   char* tbase = CMFAIL;
    2643           0 :   size_t tsize = 0;
    2644           0 :   flag_t mmap_flag = 0;
    2645             :   size_t asize; /* allocation size */
    2646             : 
    2647           0 :   ensure_initialization();
    2648             : 
    2649           0 :   if (use_noexpand(m))
    2650           0 :       return 0;
    2651             : 
    2652             :   /* Directly map large chunks, but only if already initialized */
    2653           0 :   if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
    2654           0 :     void* mem = mmap_alloc(m, nb);
    2655           0 :     if (mem != 0)
    2656           0 :       return mem;
    2657             :   }
    2658             : 
    2659           0 :   asize = granularity_align(nb + SYS_ALLOC_PADDING);
    2660           0 :   if (asize <= nb)
    2661           0 :     return 0; /* wraparound */
    2662           0 :   if (m->footprint_limit != 0) {
    2663           0 :     size_t fp = m->footprint + asize;
    2664           0 :     if (fp <= m->footprint || fp > m->footprint_limit)
    2665           0 :       return 0;
    2666             :   }
    2667             : 
    2668             :   /*
    2669             :     Try getting memory in any of three ways (in most-preferred to
    2670             :     least-preferred order):
    2671             :     1. A call to MORECORE that can normally contiguously extend memory.
    2672             :        (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
    2673             :        or main space is mmapped or a previous contiguous call failed)
    2674             :     2. A call to MMAP new space (disabled if not HAVE_MMAP).
    2675             :        Note that under the default settings, if MORECORE is unable to
    2676             :        fulfill a request, and HAVE_MMAP is true, then mmap is
    2677             :        used as a noncontiguous system allocator. This is a useful backup
    2678             :        strategy for systems with holes in address spaces -- in this case
    2679             :        sbrk cannot contiguously expand the heap, but mmap may be able to
    2680             :        find space.
    2681             :     3. A call to MORECORE that cannot usually contiguously extend memory.
    2682             :        (disabled if not HAVE_MORECORE)
    2683             : 
    2684             :    In all cases, we need to request enough bytes from system to ensure
    2685             :    we can malloc nb bytes upon success, so pad with enough space for
    2686             :    top_foot, plus alignment-pad to make sure we don't lose bytes if
    2687             :    not on boundary, and round this up to a granularity unit.
    2688             :   */
    2689             : 
    2690             :   if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
    2691             :     char* br = CMFAIL;
    2692             :     size_t ssize = asize; /* sbrk call size */
    2693             :     msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
    2694             :     ACQUIRE_MALLOC_GLOBAL_LOCK();
    2695             : 
    2696             :     if (ss == 0) {  /* First time through or recovery */
    2697             :       char* base = (char*)CALL_MORECORE(0);
    2698             :       if (base != CMFAIL) {
    2699             :         size_t fp;
    2700             :         /* Adjust to end on a page boundary */
    2701             :         if (!is_page_aligned(base))
    2702             :           ssize += (page_align((size_t)base) - (size_t)base);
    2703             :         fp = m->footprint + ssize; /* recheck limits */
    2704             :         if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
    2705             :             (m->footprint_limit == 0 ||
    2706             :              (fp > m->footprint && fp <= m->footprint_limit)) &&
    2707             :             (br = (char*)(CALL_MORECORE(ssize))) == base) {
    2708             :           tbase = base;
    2709             :           tsize = ssize;
    2710             :         }
    2711             :       }
    2712             :     }
    2713             :     else {
    2714             :       /* Subtract out existing available top space from MORECORE request. */
    2715             :       ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
    2716             :       /* Use mem here only if it did continuously extend old space */
    2717             :       if (ssize < HALF_MAX_SIZE_T &&
    2718             :           (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
    2719             :         tbase = br;
    2720             :         tsize = ssize;
    2721             :       }
    2722             :     }
    2723             : 
    2724             :     if (tbase == CMFAIL) {    /* Cope with partial failure */
    2725             :       if (br != CMFAIL) {    /* Try to use/extend the space we did get */
    2726             :         if (ssize < HALF_MAX_SIZE_T &&
    2727             :             ssize < nb + SYS_ALLOC_PADDING) {
    2728             :           size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
    2729             :           if (esize < HALF_MAX_SIZE_T) {
    2730             :             char* end = (char*)CALL_MORECORE(esize);
    2731             :             if (end != CMFAIL)
    2732             :               ssize += esize;
    2733             :             else {            /* Can't use; try to release */
    2734             :               (void) CALL_MORECORE(-ssize);
    2735             :               br = CMFAIL;
    2736             :             }
    2737             :           }
    2738             :         }
    2739             :       }
    2740             :       if (br != CMFAIL) {    /* Use the space we did get */
    2741             :         tbase = br;
    2742             :         tsize = ssize;
    2743             :       }
    2744             :       else
    2745             :         disable_contiguous(m); /* Don't try contiguous path in the future */
    2746             :     }
    2747             : 
    2748             :     RELEASE_MALLOC_GLOBAL_LOCK();
    2749             :   }
    2750             : 
    2751           0 :   if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
    2752           0 :     char* mp = (char*)(CALL_MMAP(asize));
    2753           0 :     if (mp != CMFAIL) {
    2754           0 :       tbase = mp;
    2755           0 :       tsize = asize;
    2756           0 :       mmap_flag = USE_MMAP_BIT;
    2757             :     }
    2758             :   }
    2759             : 
    2760             :   if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
    2761             :     if (asize < HALF_MAX_SIZE_T) {
    2762             :       char* br = CMFAIL;
    2763             :       char* end = CMFAIL;
    2764             :       ACQUIRE_MALLOC_GLOBAL_LOCK();
    2765             :       br = (char*)(CALL_MORECORE(asize));
    2766             :       end = (char*)(CALL_MORECORE(0));
    2767             :       RELEASE_MALLOC_GLOBAL_LOCK();
    2768             :       if (br != CMFAIL && end != CMFAIL && br < end) {
    2769             :         size_t ssize = end - br;
    2770             :         if (ssize > nb + TOP_FOOT_SIZE) {
    2771             :           tbase = br;
    2772             :           tsize = ssize;
    2773             :         }
    2774             :       }
    2775             :     }
    2776             :   }
    2777             : 
    2778           0 :   if (tbase != CMFAIL) {
    2779             : 
    2780           0 :     if ((m->footprint += tsize) > m->max_footprint)
    2781           0 :       m->max_footprint = m->footprint;
    2782             : 
    2783           0 :     if (!is_initialized(m)) { /* first-time initialization */
    2784           0 :       if (m->least_addr == 0 || tbase < m->least_addr)
    2785           0 :         m->least_addr = tbase;
    2786           0 :       m->seg.base = tbase;
    2787           0 :       m->seg.size = tsize;
    2788           0 :       m->seg.sflags = mmap_flag;
    2789           0 :       m->magic = mparams.magic;
    2790           0 :       m->release_checks = MAX_RELEASE_CHECK_RATE;
    2791           0 :       init_bins(m);
    2792             : #if !ONLY_MSPACES
    2793             :       if (is_global(m))
    2794             :         init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
    2795             :       else
    2796             : #endif
    2797             :       {
    2798             :         /* Offset top by embedded malloc_state */
    2799           0 :         mchunkptr mn = next_chunk(mem2chunk(m));
    2800           0 :         init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
    2801             :       }
    2802             :     }
    2803             : 
    2804             :     else {
    2805             :       /* Try to merge with an existing segment */
    2806           0 :       msegmentptr sp = &m->seg;
    2807             :       /* Only consider most recent segment if traversal suppressed */
    2808           0 :       while (sp != 0 && tbase != sp->base + sp->size)
    2809           0 :         sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
    2810           0 :       if (sp != 0 &&
    2811           0 :           !is_extern_segment(sp) &&
    2812           0 :           (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
    2813           0 :           segment_holds(sp, m->top)) { /* append */
    2814           0 :         sp->size += tsize;
    2815           0 :         init_top(m, m->top, m->topsize + tsize);
    2816             :       }
    2817             :       else {
    2818           0 :         if (tbase < m->least_addr)
    2819           0 :           m->least_addr = tbase;
    2820           0 :         sp = &m->seg;
    2821           0 :         while (sp != 0 && sp->base != tbase + tsize)
    2822           0 :           sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
    2823           0 :         if (sp != 0 &&
    2824           0 :             !is_extern_segment(sp) &&
    2825           0 :             (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
    2826           0 :           char* oldbase = sp->base;
    2827           0 :           sp->base = tbase;
    2828           0 :           sp->size += tsize;
    2829           0 :           return prepend_alloc(m, tbase, oldbase, nb);
    2830             :         }
    2831             :         else
    2832           0 :           add_segment(m, tbase, tsize, mmap_flag);
    2833             :       }
    2834             :     }
    2835             : 
    2836           0 :     if (nb < m->topsize) { /* Allocate from new or extended top space */
    2837           0 :       size_t rsize = m->topsize -= nb;
    2838           0 :       mchunkptr p = m->top;
    2839           0 :       mchunkptr r = m->top = chunk_plus_offset(p, nb);
    2840           0 :       r->head = rsize | PINUSE_BIT;
    2841           0 :       set_size_and_pinuse_of_inuse_chunk(m, p, nb);
    2842             :       check_top_chunk(m, m->top);
    2843             :       check_malloced_chunk(m, chunk2mem(p), nb);
    2844           0 :       return chunk2mem(p);
    2845             :     }
    2846             :   }
    2847             : 
    2848           0 :   MALLOC_FAILURE_ACTION;
    2849           0 :   return 0;
    2850             : }
    2851             : 
    2852             : /* -----------------------  system deallocation -------------------------- */
    2853             : 
    2854             : /* Unmap and unlink any mmapped segments that don't contain used chunks */
    2855             : __clib_nosanitize_addr
    2856        5068 : static size_t release_unused_segments(mstate m) {
    2857        5068 :   size_t released = 0;
    2858        5068 :   int nsegs = 0;
    2859        5068 :   msegmentptr pred = &m->seg;
    2860        5068 :   msegmentptr sp = pred->next;
    2861        5068 :   while (sp != 0) {
    2862           0 :     char* base = sp->base;
    2863           0 :     size_t size = sp->size;
    2864           0 :     msegmentptr next = sp->next;
    2865           0 :     ++nsegs;
    2866           0 :     if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
    2867           0 :       mchunkptr p = align_as_chunk(base);
    2868           0 :       size_t psize = chunksize(p);
    2869             :       /* Can unmap if first chunk holds entire segment and not pinned */
    2870           0 :       if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
    2871           0 :         tchunkptr tp = (tchunkptr)p;
    2872             :         assert(segment_holds(sp, (char*)sp));
    2873           0 :         if (p == m->dv) {
    2874           0 :           m->dv = 0;
    2875           0 :           m->dvsize = 0;
    2876             :         }
    2877             :         else {
    2878           0 :           unlink_large_chunk(m, tp);
    2879             :         }
    2880           0 :         if (CALL_MUNMAP(base, size) == 0) {
    2881           0 :           released += size;
    2882           0 :           m->footprint -= size;
    2883             :           /* unlink obsoleted record */
    2884           0 :           sp = pred;
    2885           0 :           sp->next = next;
    2886             :         }
    2887             :         else { /* back out if cannot unmap */
    2888           0 :           insert_large_chunk(m, tp, psize);
    2889             :         }
    2890             :       }
    2891             :     }
    2892             :     if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
    2893             :       break;
    2894           0 :     pred = sp;
    2895           0 :     sp = next;
    2896             :   }
    2897             :   /* Reset check counter */
    2898       10136 :   m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
    2899        5068 :                        (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
    2900        5068 :   return released;
    2901             : }
    2902             : 
    2903             : __clib_nosanitize_addr
    2904        1529 : static int sys_trim(mstate m, size_t pad) {
    2905        1529 :   size_t released = 0;
    2906        1529 :   ensure_initialization();
    2907        1529 :   if (pad < MAX_REQUEST && is_initialized(m)) {
    2908        1529 :     pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
    2909             : 
    2910        1529 :     if (m->topsize > pad) {
    2911             :       /* Shrink top space in granularity-size units, keeping at least one */
    2912        1529 :       size_t unit = mparams.granularity;
    2913        1529 :       size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
    2914             :                       SIZE_T_ONE) * unit;
    2915        1529 :       msegmentptr sp = segment_holding(m, (char*)m->top);
    2916             : 
    2917        1529 :       if (!is_extern_segment(sp)) {
    2918           0 :         if (is_mmapped_segment(sp)) {
    2919           0 :           if (HAVE_MMAP &&
    2920           0 :               sp->size >= extra &&
    2921           0 :               !has_segment_link(m, sp)) { /* can't shrink if pinned */
    2922           0 :             size_t newsize = sp->size - extra;
    2923             :             (void)newsize; /* placate people compiling -Wunused-variable */
    2924             :             /* Prefer mremap, fall back to munmap */
    2925           0 :             if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
    2926           0 :                 (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
    2927           0 :               released = extra;
    2928             :             }
    2929             :           }
    2930             :         }
    2931             :         else if (HAVE_MORECORE) {
    2932             :           if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
    2933             :             extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
    2934             :           ACQUIRE_MALLOC_GLOBAL_LOCK();
    2935             :           {
    2936             :             /* Make sure end of memory is where we last set it. */
    2937             :             char* old_br = (char*)(CALL_MORECORE(0));
    2938             :             if (old_br == sp->base + sp->size) {
    2939             :               char* rel_br = (char*)(CALL_MORECORE(-extra));
    2940             :               char* new_br = (char*)(CALL_MORECORE(0));
    2941             :               if (rel_br != CMFAIL && new_br < old_br)
    2942             :                 released = old_br - new_br;
    2943             :             }
    2944             :           }
    2945             :           RELEASE_MALLOC_GLOBAL_LOCK();
    2946             :         }
    2947             :       }
    2948             : 
    2949        1529 :       if (released != 0) {
    2950           0 :         sp->size -= released;
    2951           0 :         m->footprint -= released;
    2952           0 :         init_top(m, m->top, m->topsize - released);
    2953             :         check_top_chunk(m, m->top);
    2954             :       }
    2955             :     }
    2956             : 
    2957             :     /* Unmap any unused mmapped segments */
    2958             :     if (HAVE_MMAP)
    2959        1529 :       released += release_unused_segments(m);
    2960             : 
    2961             :     /* On failure, disable autotrim to avoid repeated failed future calls */
    2962        1529 :     if (released == 0 && m->topsize > m->trim_check)
    2963        1529 :       m->trim_check = MAX_SIZE_T;
    2964             :   }
    2965             : 
    2966        1529 :   return (released != 0)? 1 : 0;
    2967             : }
    2968             : 
    2969             : /* Consolidate and bin a chunk. Differs from exported versions
    2970             :    of free mainly in that the chunk need not be marked as inuse.
    2971             : */
    2972             : __clib_nosanitize_addr
    2973     3270420 : static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
    2974     3270420 :   mchunkptr next = chunk_plus_offset(p, psize);
    2975     3270420 :   if (!pinuse(p)) {
    2976             :     mchunkptr prev;
    2977           0 :     size_t prevsize = p->prev_foot;
    2978           0 :     if (is_mmapped(p)) {
    2979           0 :       psize += prevsize + MMAP_FOOT_PAD;
    2980           0 :       if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
    2981           0 :         m->footprint -= psize;
    2982           0 :       return;
    2983             :     }
    2984           0 :     prev = chunk_minus_offset(p, prevsize);
    2985           0 :     psize += prevsize;
    2986           0 :     p = prev;
    2987           0 :     if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
    2988           0 :       if (p != m->dv) {
    2989           0 :         unlink_chunk(m, p, prevsize);
    2990             :       }
    2991           0 :       else if ((next->head & INUSE_BITS) == INUSE_BITS) {
    2992           0 :         m->dvsize = psize;
    2993           0 :         set_free_with_pinuse(p, psize, next);
    2994           0 :         return;
    2995             :       }
    2996             :     }
    2997             :     else {
    2998           0 :       CORRUPTION_ERROR_ACTION(m);
    2999             :       return;
    3000             :     }
    3001             :   }
    3002     3270420 :   if (RTCHECK(ok_address(m, next))) {
    3003     3270420 :     if (!cinuse(next)) {  /* consolidate forward */
    3004     1392710 :       if (next == m->top) {
    3005       51583 :         size_t tsize = m->topsize += psize;
    3006       51583 :         m->top = p;
    3007       51583 :         p->head = tsize | PINUSE_BIT;
    3008       51583 :         if (p == m->dv) {
    3009           0 :           m->dv = 0;
    3010           0 :           m->dvsize = 0;
    3011             :         }
    3012       51583 :         return;
    3013             :       }
    3014     1341130 :       else if (next == m->dv) {
    3015      904832 :         size_t dsize = m->dvsize += psize;
    3016      904832 :         m->dv = p;
    3017      904832 :         set_size_and_pinuse_of_free_chunk(p, dsize);
    3018      904832 :         return;
    3019             :       }
    3020             :       else {
    3021      436296 :         size_t nsize = chunksize(next);
    3022      436296 :         psize += nsize;
    3023      436296 :         unlink_chunk(m, next, nsize);
    3024      436296 :         set_size_and_pinuse_of_free_chunk(p, psize);
    3025      436296 :         if (p == m->dv) {
    3026           0 :           m->dvsize = psize;
    3027           0 :           return;
    3028             :         }
    3029             :       }
    3030             :     }
    3031             :     else {
    3032     1877700 :       set_free_with_pinuse(p, psize, next);
    3033             :     }
    3034     2402250 :     insert_chunk(m, p, psize);
    3035             :   }
    3036             :   else {
    3037           0 :     CORRUPTION_ERROR_ACTION(m);
    3038             :   }
    3039             : }
    3040             : 
    3041             : /* ---------------------------- malloc --------------------------- */
    3042             : 
    3043             : /* allocate a large request from the best fitting chunk in a treebin */
    3044             : __clib_nosanitize_addr
    3045    10542500 : static void* tmalloc_large(mstate m, size_t nb) {
    3046    10542500 :   tchunkptr v = 0;
    3047    10542500 :   size_t rsize = -nb; /* Unsigned negation */
    3048             :   tchunkptr t;
    3049             :   bindex_t idx;
    3050    10542500 :   compute_tree_index(nb, idx);
    3051    10542500 :   if ((t = *treebin_at(m, idx)) != 0) {
    3052             :     /* Traverse tree for this bin looking for node with size == nb */
    3053     6357960 :     size_t sizebits = nb << leftshift_for_tree_index(idx);
    3054     6357960 :     tchunkptr rst = 0;  /* The deepest untaken right subtree */
    3055     2778960 :     for (;;) {
    3056             :       tchunkptr rt;
    3057     9136920 :       size_t trem = chunksize(t) - nb;
    3058     9136920 :       if (trem < rsize) {
    3059     3624950 :         v = t;
    3060     3624950 :         if ((rsize = trem) == 0)
    3061      812265 :           break;
    3062             :       }
    3063     8324660 :       rt = t->child[1];
    3064     8324660 :       t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
    3065     8324660 :       if (rt != 0 && rt != t)
    3066      967357 :         rst = rt;
    3067     8324660 :       if (t == 0) {
    3068     5545690 :         t = rst; /* set t to least subtree holding sizes > nb */
    3069     5545690 :         break;
    3070             :       }
    3071     2778960 :       sizebits <<= 1;
    3072             :     }
    3073             :   }
    3074    10542500 :   if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
    3075     7265100 :     binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
    3076     7265100 :     if (leftbits != 0) {
    3077             :       bindex_t i;
    3078     6042660 :       binmap_t leastbit = least_bit(leftbits);
    3079     6042660 :       compute_bit2idx(leastbit, i);
    3080     6042660 :       t = *treebin_at(m, i);
    3081             :     }
    3082             :   }
    3083             : 
    3084    20594400 :   while (t != 0) { /* find smallest of tree or subtree */
    3085    10051900 :     size_t trem = chunksize(t) - nb;
    3086    10051900 :     if (trem < rsize) {
    3087     7594720 :       rsize = trem;
    3088     7594720 :       v = t;
    3089             :     }
    3090    10051900 :     t = leftmost_child(t);
    3091             :   }
    3092             : 
    3093             :   /*  If dv is a better fit, return 0 so malloc will use it */
    3094    10542500 :   if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
    3095     8785560 :     if (RTCHECK(ok_address(m, v))) { /* split */
    3096     8785560 :       mchunkptr r = chunk_plus_offset(v, nb);
    3097             :       assert(chunksize(v) == rsize + nb);
    3098     8785560 :       if (RTCHECK(ok_next(v, r))) {
    3099     8997620 :         unlink_large_chunk(m, v);
    3100     8785560 :         if (rsize < MIN_CHUNK_SIZE)
    3101     1050480 :           set_inuse_and_pinuse(m, v, (rsize + nb));
    3102             :         else {
    3103     7735080 :           set_size_and_pinuse_of_inuse_chunk(m, v, nb);
    3104     7735080 :           set_size_and_pinuse_of_free_chunk(r, rsize);
    3105     8861200 :           insert_chunk(m, r, rsize);
    3106             :         }
    3107     8785560 :         return chunk2mem(v);
    3108             :       }
    3109             :     }
    3110           0 :     CORRUPTION_ERROR_ACTION(m);
    3111             :   }
    3112     1756970 :   return 0;
    3113             : }
    3114             : 
    3115             : /* allocate a small request from the best fitting chunk in a treebin */
    3116             : __clib_nosanitize_addr
    3117     1483920 : static void* tmalloc_small(mstate m, size_t nb) {
    3118             :   tchunkptr t, v;
    3119             :   size_t rsize;
    3120             :   bindex_t i;
    3121     1483920 :   binmap_t leastbit = least_bit(m->treemap);
    3122     1483920 :   compute_bit2idx(leastbit, i);
    3123     1483920 :   v = t = *treebin_at(m, i);
    3124     1483920 :   rsize = chunksize(t) - nb;
    3125             : 
    3126     2497660 :   while ((t = leftmost_child(t)) != 0) {
    3127     1013740 :     size_t trem = chunksize(t) - nb;
    3128     1013740 :     if (trem < rsize) {
    3129      598525 :       rsize = trem;
    3130      598525 :       v = t;
    3131             :     }
    3132             :   }
    3133             : 
    3134     1483920 :   if (RTCHECK(ok_address(m, v))) {
    3135     1483920 :     mchunkptr r = chunk_plus_offset(v, nb);
    3136             :     assert(chunksize(v) == rsize + nb);
    3137     1483920 :     if (RTCHECK(ok_next(v, r))) {
    3138     1539210 :       unlink_large_chunk(m, v);
    3139     1483920 :       if (rsize < MIN_CHUNK_SIZE)
    3140       21839 :         set_inuse_and_pinuse(m, v, (rsize + nb));
    3141             :       else {
    3142     1462080 :         set_size_and_pinuse_of_inuse_chunk(m, v, nb);
    3143     1462080 :         set_size_and_pinuse_of_free_chunk(r, rsize);
    3144     1462080 :         replace_dv(m, r, rsize);
    3145             :       }
    3146     1483920 :       return chunk2mem(v);
    3147             :     }
    3148             :   }
    3149             : 
    3150           0 :   CORRUPTION_ERROR_ACTION(m);
    3151             :   return 0;
    3152             : }
    3153             : 
    3154             : #if !ONLY_MSPACES
    3155             : 
    3156             : void* dlmalloc(size_t bytes) {
    3157             :   /*
    3158             :      Basic algorithm:
    3159             :      If a small request (< 256 bytes minus per-chunk overhead):
    3160             :        1. If one exists, use a remainderless chunk in associated smallbin.
    3161             :           (Remainderless means that there are too few excess bytes to
    3162             :           represent as a chunk.)
    3163             :        2. If it is big enough, use the dv chunk, which is normally the
    3164             :           chunk adjacent to the one used for the most recent small request.
    3165             :        3. If one exists, split the smallest available chunk in a bin,
    3166             :           saving remainder in dv.
    3167             :        4. If it is big enough, use the top chunk.
    3168             :        5. If available, get memory from system and use it
    3169             :      Otherwise, for a large request:
    3170             :        1. Find the smallest available binned chunk that fits, and use it
    3171             :           if it is better fitting than dv chunk, splitting if necessary.
    3172             :        2. If better fitting than any binned chunk, use the dv chunk.
    3173             :        3. If it is big enough, use the top chunk.
    3174             :        4. If request size >= mmap threshold, try to directly mmap this chunk.
    3175             :        5. If available, get memory from system and use it
    3176             : 
    3177             :      The ugly goto's here ensure that postaction occurs along all paths.
    3178             :   */
    3179             : 
    3180             : #if USE_LOCKS
    3181             :   ensure_initialization(); /* initialize in sys_alloc if not using locks */
    3182             : #endif
    3183             : 
    3184             :   if (!PREACTION(gm)) {
    3185             :     void* mem;
    3186             :     size_t nb;
    3187             :     if (bytes <= MAX_SMALL_REQUEST) {
    3188             :       bindex_t idx;
    3189             :       binmap_t smallbits;
    3190             :       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
    3191             :       idx = small_index(nb);
    3192             :       smallbits = gm->smallmap >> idx;
    3193             : 
    3194             :       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
    3195             :         mchunkptr b, p;
    3196             :         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
    3197             :         b = smallbin_at(gm, idx);
    3198             :         p = b->fd;
    3199             :         assert(chunksize(p) == small_index2size(idx));
    3200             :         unlink_first_small_chunk(gm, b, p, idx);
    3201             :         set_inuse_and_pinuse(gm, p, small_index2size(idx));
    3202             :         mem = chunk2mem(p);
    3203             :         check_malloced_chunk(gm, mem, nb);
    3204             :         goto postaction;
    3205             :       }
    3206             : 
    3207             :       else if (nb > gm->dvsize) {
    3208             :         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
    3209             :           mchunkptr b, p, r;
    3210             :           size_t rsize;
    3211             :           bindex_t i;
    3212             :           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
    3213             :           binmap_t leastbit = least_bit(leftbits);
    3214             :           compute_bit2idx(leastbit, i);
    3215             :           b = smallbin_at(gm, i);
    3216             :           p = b->fd;
    3217             :           assert(chunksize(p) == small_index2size(i));
    3218             :           unlink_first_small_chunk(gm, b, p, i);
    3219             :           rsize = small_index2size(i) - nb;
    3220             :           /* Fit here cannot be remainderless if 4byte sizes */
    3221             :           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
    3222             :             set_inuse_and_pinuse(gm, p, small_index2size(i));
    3223             :           else {
    3224             :             set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
    3225             :             r = chunk_plus_offset(p, nb);
    3226             :             set_size_and_pinuse_of_free_chunk(r, rsize);
    3227             :             replace_dv(gm, r, rsize);
    3228             :           }
    3229             :           mem = chunk2mem(p);
    3230             :           check_malloced_chunk(gm, mem, nb);
    3231             :           goto postaction;
    3232             :         }
    3233             : 
    3234             :         else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
    3235             :           check_malloced_chunk(gm, mem, nb);
    3236             :           goto postaction;
    3237             :         }
    3238             :       }
    3239             :     }
    3240             :     else if (bytes >= MAX_REQUEST)
    3241             :       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
    3242             :     else {
    3243             :       nb = pad_request(bytes);
    3244             :       if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
    3245             :         check_malloced_chunk(gm, mem, nb);
    3246             :         goto postaction;
    3247             :       }
    3248             :     }
    3249             : 
    3250             :     if (nb <= gm->dvsize) {
    3251             :       size_t rsize = gm->dvsize - nb;
    3252             :       mchunkptr p = gm->dv;
    3253             :       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
    3254             :         mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
    3255             :         gm->dvsize = rsize;
    3256             :         set_size_and_pinuse_of_free_chunk(r, rsize);
    3257             :         set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
    3258             :       }
    3259             :       else { /* exhaust dv */
    3260             :         size_t dvs = gm->dvsize;
    3261             :         gm->dvsize = 0;
    3262             :         gm->dv = 0;
    3263             :         set_inuse_and_pinuse(gm, p, dvs);
    3264             :       }
    3265             :       mem = chunk2mem(p);
    3266             :       check_malloced_chunk(gm, mem, nb);
    3267             :       goto postaction;
    3268             :     }
    3269             : 
    3270             :     else if (nb < gm->topsize) { /* Split top */
    3271             :       size_t rsize = gm->topsize -= nb;
    3272             :       mchunkptr p = gm->top;
    3273             :       mchunkptr r = gm->top = chunk_plus_offset(p, nb);
    3274             :       r->head = rsize | PINUSE_BIT;
    3275             :       set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
    3276             :       mem = chunk2mem(p);
    3277             :       check_top_chunk(gm, gm->top);
    3278             :       check_malloced_chunk(gm, mem, nb);
    3279             :       goto postaction;
    3280             :     }
    3281             : 
    3282             :     mem = sys_alloc(gm, nb);
    3283             : 
    3284             :   postaction:
    3285             :     POSTACTION(gm);
    3286             :     return mem;
    3287             :   }
    3288             : 
    3289             :   return 0;
    3290             : }
    3291             : 
    3292             : /* ---------------------------- free --------------------------- */
    3293             : 
    3294             : void dlfree(void* mem) {
    3295             :   /*
    3296             :      Consolidate freed chunks with preceding or succeeding bordering
    3297             :      free chunks, if they exist, and then place in a bin.  Intermixed
    3298             :      with special cases for top, dv, mmapped chunks, and usage errors.
    3299             :   */
    3300             : 
    3301             :   if (mem != 0) {
    3302             :     mchunkptr p  = mem2chunk(mem);
    3303             : #if FOOTERS
    3304             :     mstate fm = get_mstate_for(p);
    3305             :     if (!ok_magic(fm)) {
    3306             :       USAGE_ERROR_ACTION(fm, p);
    3307             :       return;
    3308             :     }
    3309             : #else /* FOOTERS */
    3310             : #define fm gm
    3311             : #endif /* FOOTERS */
    3312             :     if (!PREACTION(fm)) {
    3313             :       check_inuse_chunk(fm, p);
    3314             :       if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
    3315             :         size_t psize = chunksize(p);
    3316             :         mchunkptr next = chunk_plus_offset(p, psize);
    3317             :         if (!pinuse(p)) {
    3318             :           size_t prevsize = p->prev_foot;
    3319             :           if (is_mmapped(p)) {
    3320             :             psize += prevsize + MMAP_FOOT_PAD;
    3321             :             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
    3322             :               fm->footprint -= psize;
    3323             :             goto postaction;
    3324             :           }
    3325             :           else {
    3326             :             mchunkptr prev = chunk_minus_offset(p, prevsize);
    3327             :             psize += prevsize;
    3328             :             p = prev;
    3329             :             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
    3330             :               if (p != fm->dv) {
    3331             :                 unlink_chunk(fm, p, prevsize);
    3332             :               }
    3333             :               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
    3334             :                 fm->dvsize = psize;
    3335             :                 set_free_with_pinuse(p, psize, next);
    3336             :                 goto postaction;
    3337             :               }
    3338             :             }
    3339             :             else
    3340             :               goto erroraction;
    3341             :           }
    3342             :         }
    3343             : 
    3344             :         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
    3345             :           if (!cinuse(next)) {  /* consolidate forward */
    3346             :             if (next == fm->top) {
    3347             :               size_t tsize = fm->topsize += psize;
    3348             :               fm->top = p;
    3349             :               p->head = tsize | PINUSE_BIT;
    3350             :               if (p == fm->dv) {
    3351             :                 fm->dv = 0;
    3352             :                 fm->dvsize = 0;
    3353             :               }
    3354             :               if (should_trim(fm, tsize))
    3355             :                 sys_trim(fm, 0);
    3356             :               goto postaction;
    3357             :             }
    3358             :             else if (next == fm->dv) {
    3359             :               size_t dsize = fm->dvsize += psize;
    3360             :               fm->dv = p;
    3361             :               set_size_and_pinuse_of_free_chunk(p, dsize);
    3362             :               goto postaction;
    3363             :             }
    3364             :             else {
    3365             :               size_t nsize = chunksize(next);
    3366             :               psize += nsize;
    3367             :               unlink_chunk(fm, next, nsize);
    3368             :               set_size_and_pinuse_of_free_chunk(p, psize);
    3369             :               if (p == fm->dv) {
    3370             :                 fm->dvsize = psize;
    3371             :                 goto postaction;
    3372             :               }
    3373             :             }
    3374             :           }
    3375             :           else
    3376             :             set_free_with_pinuse(p, psize, next);
    3377             : 
    3378             :           if (is_small(psize)) {
    3379             :             insert_small_chunk(fm, p, psize);
    3380             :             check_free_chunk(fm, p);
    3381             :           }
    3382             :           else {
    3383             :             tchunkptr tp = (tchunkptr)p;
    3384             :             insert_large_chunk(fm, tp, psize);
    3385             :             check_free_chunk(fm, p);
    3386             :             if (--fm->release_checks == 0)
    3387             :               release_unused_segments(fm);
    3388             :           }
    3389             :           goto postaction;
    3390             :         }
    3391             :       }
    3392             :     erroraction:
    3393             :       USAGE_ERROR_ACTION(fm, p);
    3394             :     postaction:
    3395             :       POSTACTION(fm);
    3396             :     }
    3397             :   }
    3398             : #if !FOOTERS
    3399             : #undef fm
    3400             : #endif /* FOOTERS */
    3401             : }
    3402             : 
    3403             : void* dlcalloc(size_t n_elements, size_t elem_size) {
    3404             :   void* mem;
    3405             :   size_t req = 0;
    3406             :   if (n_elements != 0) {
    3407             :     req = n_elements * elem_size;
    3408             :     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
    3409             :         (req / n_elements != elem_size))
    3410             :       req = MAX_SIZE_T; /* force downstream failure on overflow */
    3411             :   }
    3412             :   mem = dlmalloc(req);
    3413             :   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
    3414             :     memset(mem, 0, req);
    3415             :   return mem;
    3416             : }
    3417             : 
    3418             : #endif /* !ONLY_MSPACES */
    3419             : 
    3420             : /* ------------ Internal support for realloc, memalign, etc -------------- */
    3421             : 
    3422             : /* Try to realloc; only in-place unless can_move true */
    3423    23319900 : static __clib_nosanitize_addr mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
    3424             :                                    int can_move) {
    3425    23319900 :   mchunkptr newp = 0;
    3426    23319900 :   size_t oldsize = chunksize(p);
    3427    23319900 :   mchunkptr next = chunk_plus_offset(p, oldsize);
    3428    23319900 :   if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
    3429             :               ok_next(p, next) && ok_pinuse(next))) {
    3430    23319900 :     if (is_mmapped(p)) {
    3431           0 :       newp = mmap_resize(m, p, nb, can_move);
    3432             :     }
    3433    23319900 :     else if (oldsize >= nb) {             /* already big enough */
    3434           0 :       size_t rsize = oldsize - nb;
    3435           0 :       if (rsize >= MIN_CHUNK_SIZE) {      /* split off remainder */
    3436           0 :         mchunkptr r = chunk_plus_offset(p, nb);
    3437           0 :         set_inuse(m, p, nb);
    3438           0 :         set_inuse(m, r, rsize);
    3439           0 :         dispose_chunk(m, r, rsize);
    3440             :       }
    3441           0 :       newp = p;
    3442             :     }
    3443    23319900 :     else if (next == m->top) {  /* extend into top */
    3444      107980 :       if (oldsize + m->topsize > nb) {
    3445      107980 :         size_t newsize = oldsize + m->topsize;
    3446      107980 :         size_t newtopsize = newsize - nb;
    3447      107980 :         mchunkptr newtop = chunk_plus_offset(p, nb);
    3448      107980 :         set_inuse(m, p, nb);
    3449      107980 :         newtop->head = newtopsize |PINUSE_BIT;
    3450      107980 :         m->top = newtop;
    3451      107980 :         m->topsize = newtopsize;
    3452      107980 :         newp = p;
    3453             :       }
    3454             :     }
    3455    23211900 :     else if (next == m->dv) { /* extend into dv */
    3456     8190570 :       size_t dvs = m->dvsize;
    3457     8190570 :       if (oldsize + dvs >= nb) {
    3458     8086220 :         size_t dsize = oldsize + dvs - nb;
    3459     8086220 :         if (dsize >= MIN_CHUNK_SIZE) {
    3460     7829940 :           mchunkptr r = chunk_plus_offset(p, nb);
    3461     7829940 :           mchunkptr n = chunk_plus_offset(r, dsize);
    3462     7829940 :           set_inuse(m, p, nb);
    3463     7829940 :           set_size_and_pinuse_of_free_chunk(r, dsize);
    3464     7829940 :           clear_pinuse(n);
    3465     7829940 :           m->dvsize = dsize;
    3466     7829940 :           m->dv = r;
    3467             :         }
    3468             :         else { /* exhaust dv */
    3469      256276 :           size_t newsize = oldsize + dvs;
    3470      256276 :           set_inuse(m, p, newsize);
    3471      256276 :           m->dvsize = 0;
    3472      256276 :           m->dv = 0;
    3473             :         }
    3474     8086220 :         newp = p;
    3475             :       }
    3476             :     }
    3477    15021300 :     else if (!cinuse(next)) { /* extend into next free chunk */
    3478      971953 :       size_t nextsize = chunksize(next);
    3479      971953 :       if (oldsize + nextsize >= nb) {
    3480      444969 :         size_t rsize = oldsize + nextsize - nb;
    3481      459376 :         unlink_chunk(m, next, nextsize);
    3482      444969 :         if (rsize < MIN_CHUNK_SIZE) {
    3483      179675 :           size_t newsize = oldsize + nextsize;
    3484      179675 :           set_inuse(m, p, newsize);
    3485             :         }
    3486             :         else {
    3487      265294 :           mchunkptr r = chunk_plus_offset(p, nb);
    3488      265294 :           set_inuse(m, p, nb);
    3489      265294 :           set_inuse(m, r, rsize);
    3490      265294 :           dispose_chunk(m, r, rsize);
    3491             :         }
    3492      444969 :         newp = p;
    3493             :       }
    3494             :     }
    3495             :   }
    3496             :   else {
    3497           0 :     USAGE_ERROR_ACTION(m, chunk2mem(p));
    3498             :   }
    3499    23319900 :   return newp;
    3500             : }
    3501             : 
    3502             : __clib_nosanitize_addr
    3503     2073970 : static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
    3504     2073970 :   void* mem = 0;
    3505     2073970 :   if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
    3506           0 :     alignment = MIN_CHUNK_SIZE;
    3507     2073970 :   if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
    3508           0 :     size_t a = MALLOC_ALIGNMENT << 1;
    3509           0 :     while (a < alignment) a <<= 1;
    3510           0 :     alignment = a;
    3511             :   }
    3512     2073970 :   if (bytes >= MAX_REQUEST - alignment) {
    3513           0 :     if (m != 0)  { /* Test isn't needed but avoids compiler warning */
    3514           0 :       MALLOC_FAILURE_ACTION;
    3515             :     }
    3516             :   }
    3517             :   else {
    3518     2073970 :     size_t nb = request2size(bytes);
    3519     2073970 :     size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
    3520     2073970 :     mem = internal_malloc(m, req);
    3521     2073990 :     if (mem != 0) {
    3522     2073990 :       mchunkptr p = mem2chunk(mem);
    3523     2073990 :       if (PREACTION(m))
    3524           0 :         return 0;
    3525     2073990 :       if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
    3526             :         /*
    3527             :           Find an aligned spot inside chunk.  Since we need to give
    3528             :           back leading space in a chunk of at least MIN_CHUNK_SIZE, if
    3529             :           the first calculation places us at a spot with less than
    3530             :           MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
    3531             :           We've allocated enough total room so that this is always
    3532             :           possible.
    3533             :         */
    3534     1484570 :         char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
    3535             :                                                        SIZE_T_ONE)) &
    3536             :                                              -alignment));
    3537     2969130 :         char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
    3538     1484570 :           br : br+alignment;
    3539     1484570 :         mchunkptr newp = (mchunkptr)pos;
    3540     1484570 :         size_t leadsize = pos - (char*)(p);
    3541     1484570 :         size_t newsize = chunksize(p) - leadsize;
    3542             : 
    3543     1484570 :         if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
    3544           0 :           newp->prev_foot = p->prev_foot + leadsize;
    3545           0 :           newp->head = newsize;
    3546             :         }
    3547             :         else { /* Otherwise, give back leader, use the rest */
    3548     1484570 :           set_inuse(m, newp, newsize);
    3549     1484570 :           set_inuse(m, p, leadsize);
    3550     1484570 :           dispose_chunk(m, p, leadsize);
    3551             :         }
    3552     1484570 :         p = newp;
    3553             :       }
    3554             : 
    3555             :       /* Give back spare room at the end */
    3556     2073990 :       if (!is_mmapped(p)) {
    3557     2073990 :         size_t size = chunksize(p);
    3558     2073990 :         if (size > nb + MIN_CHUNK_SIZE) {
    3559     1520560 :           size_t remainder_size = size - nb;
    3560     1520560 :           mchunkptr remainder = chunk_plus_offset(p, nb);
    3561     1520560 :           set_inuse(m, p, nb);
    3562     1520560 :           set_inuse(m, remainder, remainder_size);
    3563     1520560 :           dispose_chunk(m, remainder, remainder_size);
    3564             :         }
    3565             :       }
    3566             : 
    3567     2073990 :       mem = chunk2mem(p);
    3568             :       assert (chunksize(p) >= nb);
    3569             :       assert(((size_t)mem & (alignment - 1)) == 0);
    3570             :       check_inuse_chunk(m, p);
    3571     2073990 :       POSTACTION(m);
    3572             :     }
    3573             :   }
    3574     2073990 :   return mem;
    3575             : }
    3576             : 
    3577             : /*
    3578             :   Common support for independent_X routines, handling
    3579             :     all of the combinations that can result.
    3580             :   The opts arg has:
    3581             :     bit 0 set if all elements are same size (using sizes[0])
    3582             :     bit 1 set if elements should be zeroed
    3583             : */
    3584           0 : static void** ialloc(mstate m,
    3585             :                      size_t n_elements,
    3586             :                      size_t* sizes,
    3587             :                      int opts,
    3588             :                      void* chunks[]) {
    3589             : 
    3590             :   size_t    element_size;   /* chunksize of each element, if all same */
    3591             :   size_t    contents_size;  /* total size of elements */
    3592             :   size_t    array_size;     /* request size of pointer array */
    3593             :   void*     mem;            /* malloced aggregate space */
    3594             :   mchunkptr p;              /* corresponding chunk */
    3595             :   size_t    remainder_size; /* remaining bytes while splitting */
    3596             :   void**    marray;         /* either "chunks" or malloced ptr array */
    3597             :   mchunkptr array_chunk;    /* chunk for malloced ptr array */
    3598             :   flag_t    was_enabled;    /* to disable mmap */
    3599             :   size_t    size;
    3600             :   size_t    i;
    3601             : 
    3602           0 :   ensure_initialization();
    3603             :   /* compute array length, if needed */
    3604           0 :   if (chunks != 0) {
    3605           0 :     if (n_elements == 0)
    3606           0 :       return chunks; /* nothing to do */
    3607           0 :     marray = chunks;
    3608           0 :     array_size = 0;
    3609             :   }
    3610             :   else {
    3611             :     /* if empty req, must still return chunk representing empty array */
    3612           0 :     if (n_elements == 0)
    3613           0 :       return (void**)internal_malloc(m, 0);
    3614           0 :     marray = 0;
    3615           0 :     array_size = request2size(n_elements * (sizeof(void*)));
    3616             :   }
    3617             : 
    3618             :   /* compute total element size */
    3619           0 :   if (opts & 0x1) { /* all-same-size */
    3620           0 :     element_size = request2size(*sizes);
    3621           0 :     contents_size = n_elements * element_size;
    3622             :   }
    3623             :   else { /* add up all the sizes */
    3624           0 :     element_size = 0;
    3625           0 :     contents_size = 0;
    3626           0 :     for (i = 0; i != n_elements; ++i)
    3627           0 :       contents_size += request2size(sizes[i]);
    3628             :   }
    3629             : 
    3630           0 :   size = contents_size + array_size;
    3631             : 
    3632             :   /*
    3633             :      Allocate the aggregate chunk.  First disable direct-mmapping so
    3634             :      malloc won't use it, since we would not be able to later
    3635             :      free/realloc space internal to a segregated mmap region.
    3636             :   */
    3637           0 :   was_enabled = use_mmap(m);
    3638           0 :   disable_mmap(m);
    3639           0 :   mem = internal_malloc(m, size - CHUNK_OVERHEAD);
    3640           0 :   if (was_enabled)
    3641           0 :     enable_mmap(m);
    3642           0 :   if (mem == 0)
    3643           0 :     return 0;
    3644             : 
    3645           0 :   if (PREACTION(m)) return 0;
    3646           0 :   p = mem2chunk(mem);
    3647           0 :   remainder_size = chunksize(p);
    3648             : 
    3649             :   assert(!is_mmapped(p));
    3650             : 
    3651           0 :   if (opts & 0x2) {       /* optionally clear the elements */
    3652           0 :     memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
    3653             :   }
    3654             : 
    3655             :   /* If not provided, allocate the pointer array as final part of chunk */
    3656           0 :   if (marray == 0) {
    3657             :     size_t  array_chunk_size;
    3658           0 :     array_chunk = chunk_plus_offset(p, contents_size);
    3659           0 :     array_chunk_size = remainder_size - contents_size;
    3660           0 :     marray = (void**) (chunk2mem(array_chunk));
    3661           0 :     set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
    3662           0 :     remainder_size = contents_size;
    3663             :   }
    3664             : 
    3665             :   /* split out elements */
    3666           0 :   for (i = 0; ; ++i) {
    3667           0 :     marray[i] = chunk2mem(p);
    3668           0 :     if (i != n_elements-1) {
    3669           0 :       if (element_size != 0)
    3670           0 :         size = element_size;
    3671             :       else
    3672           0 :         size = request2size(sizes[i]);
    3673           0 :       remainder_size -= size;
    3674           0 :       set_size_and_pinuse_of_inuse_chunk(m, p, size);
    3675           0 :       p = chunk_plus_offset(p, size);
    3676             :     }
    3677             :     else { /* the final element absorbs any overallocation slop */
    3678           0 :       set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
    3679           0 :       break;
    3680             :     }
    3681             :   }
    3682             : 
    3683             : #if DEBUG
    3684             :   if (marray != chunks) {
    3685             :     /* final element must have exactly exhausted chunk */
    3686             :     if (element_size != 0) {
    3687             :       assert(remainder_size == element_size);
    3688             :     }
    3689             :     else {
    3690             :       assert(remainder_size == request2size(sizes[i]));
    3691             :     }
    3692             :     check_inuse_chunk(m, mem2chunk(marray));
    3693             :   }
    3694             :   for (i = 0; i != n_elements; ++i)
    3695             :     check_inuse_chunk(m, mem2chunk(marray[i]));
    3696             : 
    3697             : #endif /* DEBUG */
    3698             : 
    3699           0 :   POSTACTION(m);
    3700           0 :   return marray;
    3701             : }
    3702             : 
    3703             : /* Try to free all pointers in the given array.
    3704             :    Note: this could be made faster, by delaying consolidation,
    3705             :    at the price of disabling some user integrity checks, We
    3706             :    still optimize some consolidations by combining adjacent
    3707             :    chunks before freeing, which will occur often if allocated
    3708             :    with ialloc or the array is sorted.
    3709             : */
    3710           0 : static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
    3711           0 :   size_t unfreed = 0;
    3712           0 :   if (!PREACTION(m)) {
    3713             :     void** a;
    3714           0 :     void** fence = &(array[nelem]);
    3715           0 :     for (a = array; a != fence; ++a) {
    3716           0 :       void* mem = *a;
    3717           0 :       if (mem != 0) {
    3718           0 :         mchunkptr p = mem2chunk(mem);
    3719           0 :         size_t psize = chunksize(p);
    3720             : #if FOOTERS
    3721           0 :         if (get_mstate_for(p) != m) {
    3722           0 :           ++unfreed;
    3723           0 :           continue;
    3724             :         }
    3725             : #endif
    3726             :         check_inuse_chunk(m, p);
    3727           0 :         *a = 0;
    3728           0 :         if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
    3729           0 :           void ** b = a + 1; /* try to merge with next chunk */
    3730           0 :           mchunkptr next = next_chunk(p);
    3731           0 :           if (b != fence && *b == chunk2mem(next)) {
    3732           0 :             size_t newsize = chunksize(next) + psize;
    3733           0 :             set_inuse(m, p, newsize);
    3734           0 :             *b = chunk2mem(p);
    3735             :           }
    3736             :           else
    3737           0 :             dispose_chunk(m, p, psize);
    3738             :         }
    3739             :         else {
    3740           0 :           CORRUPTION_ERROR_ACTION(m);
    3741             :           break;
    3742             :         }
    3743             :       }
    3744             :     }
    3745           0 :     if (should_trim(m, m->topsize))
    3746           0 :       sys_trim(m, 0);
    3747           0 :     POSTACTION(m);
    3748             :   }
    3749           0 :   return unfreed;
    3750             : }
    3751             : 
    3752             : /* Traversal */
    3753             : #if MALLOC_INSPECT_ALL
    3754             : static void internal_inspect_all(mstate m,
    3755             :                                  void(*handler)(void *start,
    3756             :                                                 void *end,
    3757             :                                                 size_t used_bytes,
    3758             :                                                 void* callback_arg),
    3759             :                                  void* arg) {
    3760             :   if (is_initialized(m)) {
    3761             :     mchunkptr top = m->top;
    3762             :     msegmentptr s;
    3763             :     for (s = &m->seg; s != 0; s = s->next) {
    3764             :       mchunkptr q = align_as_chunk(s->base);
    3765             :       while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
    3766             :         mchunkptr next = next_chunk(q);
    3767             :         size_t sz = chunksize(q);
    3768             :         size_t used;
    3769             :         void* start;
    3770             :         if (is_inuse(q)) {
    3771             :           used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
    3772             :           start = chunk2mem(q);
    3773             :         }
    3774             :         else {
    3775             :           used = 0;
    3776             :           if (is_small(sz)) {     /* offset by possible bookkeeping */
    3777             :             start = (void*)((char*)q + sizeof(struct malloc_chunk));
    3778             :           }
    3779             :           else {
    3780             :             start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
    3781             :           }
    3782             :         }
    3783             :         if (start < (void*)next)  /* skip if all space is bookkeeping */
    3784             :           handler(start, next, used, arg);
    3785             :         if (q == top)
    3786             :           break;
    3787             :         q = next;
    3788             :       }
    3789             :     }
    3790             :   }
    3791             : }
    3792             : #endif /* MALLOC_INSPECT_ALL */
    3793             : 
    3794             : /* ------------------ Exported realloc, memalign, etc -------------------- */
    3795             : 
    3796             : #if !ONLY_MSPACES
    3797             : 
    3798             : void* dlrealloc(void* oldmem, size_t bytes) {
    3799             :   void* mem = 0;
    3800             :   if (oldmem == 0) {
    3801             :     mem = dlmalloc(bytes);
    3802             :   }
    3803             :   else if (bytes >= MAX_REQUEST) {
    3804             :     MALLOC_FAILURE_ACTION;
    3805             :   }
    3806             : #ifdef REALLOC_ZERO_BYTES_FREES
    3807             :   else if (bytes == 0) {
    3808             :     dlfree(oldmem);
    3809             :   }
    3810             : #endif /* REALLOC_ZERO_BYTES_FREES */
    3811             :   else {
    3812             :     size_t nb = request2size(bytes);
    3813             :     mchunkptr oldp = mem2chunk(oldmem);
    3814             : #if ! FOOTERS
    3815             :     mstate m = gm;
    3816             : #else /* FOOTERS */
    3817             :     mstate m = get_mstate_for(oldp);
    3818             :     if (!ok_magic(m)) {
    3819             :       USAGE_ERROR_ACTION(m, oldmem);
    3820             :       return 0;
    3821             :     }
    3822             : #endif /* FOOTERS */
    3823             :     if (!PREACTION(m)) {
    3824             :       mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
    3825             :       POSTACTION(m);
    3826             :       if (newp != 0) {
    3827             :         check_inuse_chunk(m, newp);
    3828             :         mem = chunk2mem(newp);
    3829             :       }
    3830             :       else {
    3831             :         mem = internal_malloc(m, bytes);
    3832             :         if (mem != 0) {
    3833             :           size_t oc = chunksize(oldp) - overhead_for(oldp);
    3834             :           memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
    3835             :           internal_free(m, oldmem);
    3836             :         }
    3837             :       }
    3838             :     }
    3839             :   }
    3840             :   return mem;
    3841             : }
    3842             : 
    3843             : void* dlrealloc_in_place(void* oldmem, size_t bytes) {
    3844             :   void* mem = 0;
    3845             :   if (oldmem != 0) {
    3846             :     if (bytes >= MAX_REQUEST) {
    3847             :       MALLOC_FAILURE_ACTION;
    3848             :     }
    3849             :     else {
    3850             :       size_t nb = request2size(bytes);
    3851             :       mchunkptr oldp = mem2chunk(oldmem);
    3852             : #if ! FOOTERS
    3853             :       mstate m = gm;
    3854             : #else /* FOOTERS */
    3855             :       mstate m = get_mstate_for(oldp);
    3856             :       if (!ok_magic(m)) {
    3857             :         USAGE_ERROR_ACTION(m, oldmem);
    3858             :         return 0;
    3859             :       }
    3860             : #endif /* FOOTERS */
    3861             :       if (!PREACTION(m)) {
    3862             :         mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
    3863             :         POSTACTION(m);
    3864             :         if (newp == oldp) {
    3865             :           check_inuse_chunk(m, newp);
    3866             :           mem = oldmem;
    3867             :         }
    3868             :       }
    3869             :     }
    3870             :   }
    3871             :   return mem;
    3872             : }
    3873             : 
    3874             : void* dlmemalign(size_t alignment, size_t bytes) {
    3875             :   if (alignment <= MALLOC_ALIGNMENT) {
    3876             :     return dlmalloc(bytes);
    3877             :   }
    3878             :   return internal_memalign(gm, alignment, bytes);
    3879             : }
    3880             : 
    3881             : int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
    3882             :   void* mem = 0;
    3883             :   if (alignment == MALLOC_ALIGNMENT)
    3884             :     mem = dlmalloc(bytes);
    3885             :   else {
    3886             :     size_t d = alignment / sizeof(void*);
    3887             :     size_t r = alignment % sizeof(void*);
    3888             :     if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
    3889             :       return EINVAL;
    3890             :     else if (bytes <= MAX_REQUEST - alignment) {
    3891             :       if (alignment <  MIN_CHUNK_SIZE)
    3892             :         alignment = MIN_CHUNK_SIZE;
    3893             :       mem = internal_memalign(gm, alignment, bytes);
    3894             :     }
    3895             :   }
    3896             :   if (mem == 0)
    3897             :     return ENOMEM;
    3898             :   else {
    3899             :     *pp = mem;
    3900             :     return 0;
    3901             :   }
    3902             : }
    3903             : 
    3904             : void* dlvalloc(size_t bytes) {
    3905             :   size_t pagesz;
    3906             :   ensure_initialization();
    3907             :   pagesz = mparams.page_size;
    3908             :   return dlmemalign(pagesz, bytes);
    3909             : }
    3910             : 
    3911             : void* dlpvalloc(size_t bytes) {
    3912             :   size_t pagesz;
    3913             :   ensure_initialization();
    3914             :   pagesz = mparams.page_size;
    3915             :   return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
    3916             : }
    3917             : 
    3918             : void** dlindependent_calloc(size_t n_elements, size_t elem_size,
    3919             :                             void* chunks[]) {
    3920             :   size_t sz = elem_size; /* serves as 1-element array */
    3921             :   return ialloc(gm, n_elements, &sz, 3, chunks);
    3922             : }
    3923             : 
    3924             : void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
    3925             :                               void* chunks[]) {
    3926             :   return ialloc(gm, n_elements, sizes, 0, chunks);
    3927             : }
    3928             : 
    3929             : size_t dlbulk_free(void* array[], size_t nelem) {
    3930             :   return internal_bulk_free(gm, array, nelem);
    3931             : }
    3932             : 
    3933             : #if MALLOC_INSPECT_ALL
    3934             : void dlmalloc_inspect_all(void(*handler)(void *start,
    3935             :                                          void *end,
    3936             :                                          size_t used_bytes,
    3937             :                                          void* callback_arg),
    3938             :                           void* arg) {
    3939             :   ensure_initialization();
    3940             :   if (!PREACTION(gm)) {
    3941             :     internal_inspect_all(gm, handler, arg);
    3942             :     POSTACTION(gm);
    3943             :   }
    3944             : }
    3945             : #endif /* MALLOC_INSPECT_ALL */
    3946             : 
    3947             : int dlmalloc_trim(size_t pad) {
    3948             :   int result = 0;
    3949             :   ensure_initialization();
    3950             :   if (!PREACTION(gm)) {
    3951             :     result = sys_trim(gm, pad);
    3952             :     POSTACTION(gm);
    3953             :   }
    3954             :   return result;
    3955             : }
    3956             : 
    3957             : size_t dlmalloc_footprint(void) {
    3958             :   return gm->footprint;
    3959             : }
    3960             : 
    3961             : size_t dlmalloc_max_footprint(void) {
    3962             :   return gm->max_footprint;
    3963             : }
    3964             : 
    3965             : size_t dlmalloc_footprint_limit(void) {
    3966             :   size_t maf = gm->footprint_limit;
    3967             :   return maf == 0 ? MAX_SIZE_T : maf;
    3968             : }
    3969             : 
    3970             : size_t dlmalloc_set_footprint_limit(size_t bytes) {
    3971             :   size_t result;  /* invert sense of 0 */
    3972             :   if (bytes == 0)
    3973             :     result = granularity_align(1); /* Use minimal size */
    3974             :   if (bytes == MAX_SIZE_T)
    3975             :     result = 0;                    /* disable */
    3976             :   else
    3977             :     result = granularity_align(bytes);
    3978             :   return gm->footprint_limit = result;
    3979             : }
    3980             : 
    3981             : #if !NO_MALLINFO
    3982             : struct dlmallinfo dlmallinfo(void) {
    3983             :   return internal_mallinfo(gm);
    3984             : }
    3985             : #endif /* NO_MALLINFO */
    3986             : 
    3987             : #if !NO_MALLOC_STATS
    3988             : void dlmalloc_stats() {
    3989             :   internal_malloc_stats(gm);
    3990             : }
    3991             : #endif /* NO_MALLOC_STATS */
    3992             : 
    3993             : int dlmallopt(int param_number, int value) {
    3994             :   return change_mparam(param_number, value);
    3995             : }
    3996             : 
    3997             : size_t dlmalloc_usable_size(void* mem) {
    3998             :   if (mem != 0) {
    3999             :     mchunkptr p = mem2chunk(mem);
    4000             :     if (is_inuse(p))
    4001             :       return chunksize(p) - overhead_for(p);
    4002             :   }
    4003             :   return 0;
    4004             : }
    4005             : 
    4006             : #endif /* !ONLY_MSPACES */
    4007             : 
    4008             : /* ----------------------------- user mspaces ---------------------------- */
    4009             : 
    4010             : #if MSPACES
    4011             : 
    4012        4774 : static mstate init_user_mstate(char* tbase, size_t tsize) {
    4013        4774 :   size_t msize = pad_request(sizeof(struct malloc_state));
    4014             :   mchunkptr mn;
    4015        4774 :   mchunkptr msp = align_as_chunk(tbase);
    4016        4774 :   mstate m = (mstate)(chunk2mem(msp));
    4017        4774 :   memset(m, 0, msize);
    4018        4774 :   (void)INITIAL_LOCK(&m->mutex);
    4019        4774 :   msp->head = (msize|INUSE_BITS);
    4020        4774 :   m->seg.base = m->least_addr = tbase;
    4021        4774 :   m->seg.size = m->footprint = m->max_footprint = tsize;
    4022        4774 :   m->magic = mparams.magic;
    4023        4774 :   m->release_checks = MAX_RELEASE_CHECK_RATE;
    4024        4774 :   m->mflags = mparams.default_mflags;
    4025        4774 :   m->extp = 0;
    4026        4774 :   m->exts = 0;
    4027        4774 :   disable_contiguous(m);
    4028        4774 :   init_bins(m);
    4029        4774 :   mn = next_chunk(mem2chunk(m));
    4030        4774 :   init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
    4031             :   check_top_chunk(m, m->top);
    4032        4774 :   return m;
    4033             : }
    4034             : 
    4035           0 : mspace create_mspace(size_t capacity, int locked) {
    4036           0 :   mstate m = 0;
    4037             :   size_t msize;
    4038           0 :   ensure_initialization();
    4039           0 :   msize = pad_request(sizeof(struct malloc_state));
    4040           0 :   if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
    4041           0 :     size_t rs = ((capacity == 0)? mparams.granularity :
    4042           0 :                  (capacity + TOP_FOOT_SIZE + msize));
    4043           0 :     size_t tsize = granularity_align(rs);
    4044           0 :     char* tbase = (char*)(CALL_MMAP(tsize));
    4045           0 :     if (tbase != CMFAIL) {
    4046           0 :       m = init_user_mstate(tbase, tsize);
    4047           0 :       m->seg.sflags = USE_MMAP_BIT;
    4048           0 :       set_lock(m, locked);
    4049             :     }
    4050             :   }
    4051           0 :   return (mspace)m;
    4052             : }
    4053             : 
    4054        4774 : mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
    4055        4774 :   mstate m = 0;
    4056             :   size_t msize;
    4057        4774 :   ensure_initialization();
    4058        4774 :   msize = pad_request(sizeof(struct malloc_state));
    4059        4774 :   if (capacity > msize + TOP_FOOT_SIZE &&
    4060        4774 :       capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
    4061        4774 :     m = init_user_mstate((char*)base, capacity);
    4062        4774 :     m->seg.sflags = EXTERN_BIT;
    4063        4774 :     set_lock(m, locked);
    4064             :   }
    4065        4774 :   return (mspace)m;
    4066             : }
    4067             : 
    4068           0 : int mspace_track_large_chunks(mspace msp, int enable) {
    4069           0 :   int ret = 0;
    4070           0 :   mstate ms = (mstate)msp;
    4071           0 :   if (!PREACTION(ms)) {
    4072           0 :     if (!use_mmap(ms)) {
    4073           0 :       ret = 1;
    4074             :     }
    4075           0 :     if (!enable) {
    4076           0 :       enable_mmap(ms);
    4077             :     } else {
    4078           0 :       disable_mmap(ms);
    4079             :     }
    4080           0 :     POSTACTION(ms);
    4081             :   }
    4082           0 :   return ret;
    4083             : }
    4084             : 
    4085             : __clib_nosanitize_addr
    4086        1470 : size_t destroy_mspace(mspace msp) {
    4087        1470 :   size_t freed = 0;
    4088        1470 :   mstate ms = (mstate)msp;
    4089        1470 :   if (ok_magic(ms)) {
    4090        1470 :     msegmentptr sp = &ms->seg;
    4091             :     (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
    4092        2940 :     while (sp != 0) {
    4093        1470 :       char* base = sp->base;
    4094        1470 :       size_t size = sp->size;
    4095        1470 :       flag_t flag = sp->sflags;
    4096             :       (void)base; /* placate people compiling -Wunused-variable */
    4097        1470 :       sp = sp->next;
    4098        1470 :       if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
    4099           0 :           CALL_MUNMAP(base, size) == 0)
    4100           0 :         freed += size;
    4101             :     }
    4102             :   }
    4103             :   else {
    4104           0 :     USAGE_ERROR_ACTION(ms,ms);
    4105             :   }
    4106        1470 :   return freed;
    4107             : }
    4108             : 
    4109           0 : void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
    4110             : {
    4111             :   mstate ms;
    4112             :   msegment *this_seg;
    4113             : 
    4114           0 :   ms = (mstate)msp;
    4115           0 :   this_seg = &ms->seg;
    4116             : 
    4117           0 :   *addrp = this_seg->base;
    4118           0 :   *sizep = this_seg->size;
    4119           0 : }
    4120             : 
    4121             : __clib_nosanitize_addr
    4122    64261400 : int mspace_is_heap_object (mspace msp, void *p)
    4123             : {
    4124             :   msegment *this_seg;
    4125             :   char *pp, *base;
    4126             :   mstate ms;
    4127             : 
    4128    64261400 :   ms = (mstate)msp;
    4129             : 
    4130    64261400 :   this_seg = &ms->seg;
    4131    64261400 :   pp = (char *) p;
    4132             : 
    4133    64261400 :   while (this_seg)
    4134             :     {
    4135    64261400 :       base = this_seg->base;
    4136    64261400 :       if (pp >= base && pp < (base + this_seg->size))
    4137    64261400 :         return 1;
    4138           0 :       this_seg = this_seg->next;
    4139             :     }
    4140             : 
    4141           0 :   if (pp > ms->least_addr && pp <= ms->least_addr + ms->footprint)
    4142           0 :     return 1;
    4143             : 
    4144           0 :   return 0;
    4145             : }
    4146             : 
    4147             : __clib_nosanitize_addr
    4148        4774 : void *mspace_least_addr (mspace msp)
    4149             : {
    4150        4774 :   mstate ms = (mstate) msp;
    4151        4774 :   return (void *) ms->least_addr;
    4152             : }
    4153             : 
    4154        4774 : void mspace_disable_expand (mspace msp)
    4155             : {
    4156        4774 :   mstate ms = (mstate)msp;
    4157             : 
    4158        4774 :   disable_expand (ms);
    4159        4774 : }
    4160             : 
    4161             : __clib_nosanitize_addr
    4162           0 : int mspace_enable_disable_trace (mspace msp, int enable)
    4163             : {
    4164           0 :   mstate ms = (mstate)msp;
    4165           0 :   int was_enabled = 0;
    4166             : 
    4167           0 :   if (use_trace(ms))
    4168           0 :     was_enabled = 1;
    4169             : 
    4170           0 :   if (enable)
    4171           0 :     enable_trace (ms);
    4172             :   else
    4173           0 :     disable_trace (ms);
    4174             : 
    4175           0 :   return (was_enabled);
    4176             : }
    4177             : 
    4178             : __clib_nosanitize_addr
    4179           0 : int mspace_is_traced (mspace msp)
    4180             : {
    4181           0 :   mstate ms = (mstate)msp;
    4182             : 
    4183           0 :   if (use_trace(ms))
    4184           0 :     return 1;
    4185           0 :   return 0;
    4186             : }
    4187             : 
    4188             : __clib_nosanitize_addr
    4189           0 : void* mspace_get_aligned (mspace msp,
    4190             :                           unsigned long n_user_data_bytes,
    4191             :                           unsigned long align,
    4192             :                           unsigned long align_offset) {
    4193             :   char *rv;
    4194             :   unsigned long searchp;
    4195             :   unsigned *wwp;                /* "where's Waldo" pointer */
    4196           0 :   mstate ms = (mstate)msp;
    4197             : 
    4198             :   /*
    4199             :    * Allocate space for the "Where's Waldo?" pointer
    4200             :    * the base of the dlmalloc object
    4201             :    */
    4202           0 :   n_user_data_bytes += sizeof(unsigned);
    4203           0 :   align = align < MALLOC_ALIGNMENT ? MALLOC_ALIGNMENT : align;
    4204             : 
    4205             :   /*
    4206             :    * Alignment requests greater than 4K must be at offset zero,
    4207             :    * and must be freed using mspace_free_no_offset - or never freed -
    4208             :    * since the "Where's Waldo?" pointer would waste too much space.
    4209             :    *
    4210             :    * Waldo is the address of the chunk of memory returned by mspace_malloc,
    4211             :    * which we need later to call mspace_free...
    4212             :    */
    4213           0 :   if (align > 4<<10 || align_offset == ~0UL) {
    4214           0 :     n_user_data_bytes -= sizeof(unsigned);
    4215             :     assert(align_offset == 0);
    4216           0 :     rv = internal_memalign(ms, (size_t)align, n_user_data_bytes);
    4217             : 
    4218             :     /* Trace the allocation */
    4219           0 :     if (rv && use_trace(ms)) {
    4220           0 :       mchunkptr p  = mem2chunk(rv);
    4221           0 :       size_t psize = chunksize(p);
    4222           0 :       mheap_get_trace ((unsigned long)rv, psize);
    4223             :     }
    4224           0 :     return rv;
    4225             :   }
    4226             : 
    4227           0 :   align = clib_max (align, MALLOC_ALIGNMENT);
    4228           0 :   align = max_pow2 (align);
    4229             : 
    4230             :   /* Correct align offset to be smaller than alignment. */
    4231           0 :   align_offset &= (align - 1);
    4232             : 
    4233           0 :   n_user_data_bytes += align;
    4234           0 :   rv = mspace_malloc (msp, n_user_data_bytes);
    4235             : 
    4236           0 :   if (rv == 0)
    4237           0 :       return rv;
    4238             : 
    4239             :   /* Honor the alignment request */
    4240           0 :   searchp = (unsigned long)(rv + sizeof (unsigned));
    4241             : 
    4242             : #if 0  /* this is the idea... */
    4243             :   while ((searchp + align_offset) % align)
    4244             :     searchp++;
    4245             : #endif
    4246             : 
    4247             :   {
    4248             :     unsigned long where_now, delta;
    4249             : 
    4250           0 :     where_now = (searchp + align_offset) % align;
    4251           0 :     delta = align - where_now;
    4252             : 
    4253           0 :     searchp += delta;
    4254             :   }
    4255             : 
    4256           0 :   wwp = (unsigned *)(searchp - sizeof(unsigned));
    4257           0 :   *wwp = (searchp - (((unsigned long) rv) + sizeof (*wwp)));
    4258             :   assert (*wwp < align);
    4259             : 
    4260           0 :   if (use_trace(ms)) {
    4261           0 :     mchunkptr p  = mem2chunk(rv);
    4262           0 :     size_t psize = chunksize(p);
    4263           0 :     mheap_get_trace (searchp, psize);
    4264             :   }
    4265           0 :   return (void *) searchp;
    4266             : }
    4267             : 
    4268             : __clib_nosanitize_addr
    4269           0 : void mspace_put (mspace msp, void *p_arg)
    4270             : {
    4271             :   char *object_header;
    4272             :   unsigned *wwp;
    4273           0 :   mstate ms = (mstate)msp;
    4274             : 
    4275             :   /* Find the object header delta */
    4276           0 :   wwp = (unsigned *)p_arg;
    4277           0 :   wwp --;
    4278             : 
    4279             :   /* Recover the dlmalloc object pointer */
    4280           0 :   object_header = (char *)wwp;
    4281           0 :   object_header -= *wwp;
    4282             : 
    4283             :   /* Tracing (if enabled) */
    4284           0 :   if (use_trace(ms))
    4285             :     {
    4286           0 :       mchunkptr p  = mem2chunk(object_header);
    4287           0 :       size_t psize = chunksize(p);
    4288             : 
    4289           0 :       mheap_put_trace ((unsigned long)p_arg, psize);
    4290             :     }
    4291             : 
    4292             : #if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR)
    4293             :   /* Poison the object */
    4294             :   {
    4295           0 :     size_t psize = mspace_usable_size (object_header);
    4296           0 :     memset (object_header, 0x13, psize);
    4297             :   }
    4298             : #endif
    4299             : 
    4300             :   /* And free it... */
    4301           0 :   mspace_free (msp, object_header);
    4302           0 : }
    4303             : 
    4304           0 : void mspace_put_no_offset (mspace msp, void *p_arg)
    4305             : {
    4306           0 :   mstate ms = (mstate)msp;
    4307             : 
    4308           0 :   if (use_trace(ms))
    4309             :     {
    4310           0 :       mchunkptr p  = mem2chunk(p_arg);
    4311           0 :       size_t psize = chunksize(p);
    4312             : 
    4313           0 :       mheap_put_trace ((unsigned long)p_arg, psize);
    4314             :     }
    4315           0 :   mspace_free (msp, p_arg);
    4316           0 : }
    4317             : 
    4318             : __clib_nosanitize_addr
    4319           0 : size_t mspace_usable_size_with_delta (const void *p)
    4320             : {
    4321             :   size_t usable_size;
    4322             :   char *object_header;
    4323             :   unsigned *wwp;
    4324             : 
    4325             :   /* Find the object header delta */
    4326           0 :   wwp = (unsigned *)p;
    4327           0 :   wwp --;
    4328             : 
    4329             :   /* Recover the dlmalloc object pointer */
    4330           0 :   object_header = (char *)wwp;
    4331           0 :   object_header -= *wwp;
    4332             : 
    4333           0 :   usable_size = mspace_usable_size (object_header);
    4334             :   /* account for the offset and the size of the offset... */
    4335           0 :   usable_size -= (*wwp + sizeof (*wwp));
    4336           0 :   return usable_size;
    4337             : }
    4338             : 
    4339             : /*
    4340             :   mspace versions of routines are near-clones of the global
    4341             :   versions. This is not so nice but better than the alternatives.
    4342             : */
    4343             : 
    4344             : __clib_nosanitize_addr
    4345   110471000 : void* mspace_malloc(mspace msp, size_t bytes) {
    4346   110471000 :   mstate ms = (mstate)msp;
    4347   110471000 :   if (!ok_magic(ms)) {
    4348           0 :     USAGE_ERROR_ACTION(ms,ms);
    4349             :     return 0;
    4350             :   }
    4351   110473000 :   if (!PREACTION(ms)) {
    4352             :     void* mem;
    4353             :     size_t nb;
    4354   110473000 :     if (bytes <= MAX_SMALL_REQUEST) {
    4355             :       bindex_t idx;
    4356             :       binmap_t smallbits;
    4357    99610000 :       nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
    4358    99610000 :       idx = small_index(nb);
    4359    99610000 :       smallbits = ms->smallmap >> idx;
    4360             : 
    4361    99610000 :       if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
    4362             :         mchunkptr b, p;
    4363    25334900 :         idx += ~smallbits & 1;       /* Uses next bin if idx empty */
    4364    25334900 :         b = smallbin_at(ms, idx);
    4365    25334900 :         p = b->fd;
    4366             :         assert(chunksize(p) == small_index2size(idx));
    4367    25334900 :         unlink_first_small_chunk(ms, b, p, idx);
    4368    25334900 :         set_inuse_and_pinuse(ms, p, small_index2size(idx));
    4369    25334900 :         mem = chunk2mem(p);
    4370             :         check_malloced_chunk(ms, mem, nb);
    4371    25334900 :         goto postaction;
    4372             :       }
    4373             : 
    4374    74275100 :       else if (nb > ms->dvsize) {
    4375     4745540 :         if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
    4376             :           mchunkptr b, p, r;
    4377             :           size_t rsize;
    4378             :           bindex_t i;
    4379     2700020 :           binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
    4380     2700020 :           binmap_t leastbit = least_bit(leftbits);
    4381     2700020 :           compute_bit2idx(leastbit, i);
    4382     2700020 :           b = smallbin_at(ms, i);
    4383     2700020 :           p = b->fd;
    4384             :           assert(chunksize(p) == small_index2size(i));
    4385     2700020 :           unlink_first_small_chunk(ms, b, p, i);
    4386     2700020 :           rsize = small_index2size(i) - nb;
    4387             :           /* Fit here cannot be remainderless if 4byte sizes */
    4388     2700020 :           if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
    4389     1072310 :             set_inuse_and_pinuse(ms, p, small_index2size(i));
    4390             :           else {
    4391     1627710 :             set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
    4392     1627710 :             r = chunk_plus_offset(p, nb);
    4393     1627710 :             set_size_and_pinuse_of_free_chunk(r, rsize);
    4394     1627710 :             replace_dv(ms, r, rsize);
    4395             :           }
    4396     2700020 :           mem = chunk2mem(p);
    4397             :           check_malloced_chunk(ms, mem, nb);
    4398     2700020 :           goto postaction;
    4399             :         }
    4400             : 
    4401     2045520 :         else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
    4402             :           check_malloced_chunk(ms, mem, nb);
    4403     1483920 :           goto postaction;
    4404             :         }
    4405             :       }
    4406             :     }
    4407    10862900 :     else if (bytes >= MAX_REQUEST)
    4408           0 :       nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
    4409             :     else {
    4410    10862900 :       nb = pad_request(bytes);
    4411    10862900 :       if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
    4412             :         check_malloced_chunk(ms, mem, nb);
    4413     8785560 :         goto postaction;
    4414             :       }
    4415             :     }
    4416             : 
    4417    72168500 :     if (nb <= ms->dvsize) {
    4418    71340500 :       size_t rsize = ms->dvsize - nb;
    4419    71340500 :       mchunkptr p = ms->dv;
    4420    71340500 :       if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
    4421    69760000 :         mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
    4422    69760000 :         ms->dvsize = rsize;
    4423    69760000 :         set_size_and_pinuse_of_free_chunk(r, rsize);
    4424    69760000 :         set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
    4425             :       }
    4426             :       else { /* exhaust dv */
    4427     1580540 :         size_t dvs = ms->dvsize;
    4428     1580540 :         ms->dvsize = 0;
    4429     1580540 :         ms->dv = 0;
    4430     1580540 :         set_inuse_and_pinuse(ms, p, dvs);
    4431             :       }
    4432    71340500 :       mem = chunk2mem(p);
    4433             :       check_malloced_chunk(ms, mem, nb);
    4434    71340500 :       goto postaction;
    4435             :     }
    4436             : 
    4437      827989 :     else if (nb < ms->topsize) { /* Split top */
    4438      827989 :       size_t rsize = ms->topsize -= nb;
    4439      827989 :       mchunkptr p = ms->top;
    4440      827989 :       mchunkptr r = ms->top = chunk_plus_offset(p, nb);
    4441      827989 :       r->head = rsize | PINUSE_BIT;
    4442      827989 :       set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
    4443      827989 :       mem = chunk2mem(p);
    4444             :       check_top_chunk(ms, ms->top);
    4445             :       check_malloced_chunk(ms, mem, nb);
    4446      827989 :       goto postaction;
    4447             :     }
    4448             : 
    4449           0 :     mem = sys_alloc(ms, nb);
    4450             : 
    4451   110473000 :   postaction:
    4452   110473000 :     POSTACTION(ms);
    4453   110473000 :     return mem;
    4454             :   }
    4455             : 
    4456           0 :   return 0;
    4457             : }
    4458             : 
    4459             : __clib_nosanitize_addr
    4460    64099700 : void mspace_free(mspace msp, void* mem) {
    4461    64099700 :   if (mem != 0) {
    4462    64099700 :     mchunkptr p  = mem2chunk(mem);
    4463             : #if FOOTERS
    4464    64099700 :     mstate fm = get_mstate_for(p);
    4465             :     (void)msp; /* placate people compiling -Wunused */
    4466             : #else /* FOOTERS */
    4467             :     mstate fm = (mstate)msp;
    4468             : #endif /* FOOTERS */
    4469    64099700 :     if (!ok_magic(fm)) {
    4470           0 :       USAGE_ERROR_ACTION(fm, p);
    4471             :       return;
    4472             :     }
    4473    64099700 :     if (!PREACTION(fm)) {
    4474             :       check_inuse_chunk(fm, p);
    4475    64100400 :       if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
    4476    64100400 :         size_t psize = chunksize(p);
    4477    64100400 :         mchunkptr next = chunk_plus_offset(p, psize);
    4478    64100400 :         if (!pinuse(p)) {
    4479    15920300 :           size_t prevsize = p->prev_foot;
    4480    15920300 :           if (is_mmapped(p)) {
    4481           0 :             psize += prevsize + MMAP_FOOT_PAD;
    4482           0 :             if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
    4483           0 :               fm->footprint -= psize;
    4484           0 :             goto postaction;
    4485             :           }
    4486             :           else {
    4487    15920300 :             mchunkptr prev = chunk_minus_offset(p, prevsize);
    4488    15920300 :             psize += prevsize;
    4489    15920300 :             p = prev;
    4490    15920300 :             if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
    4491    15920300 :               if (p != fm->dv) {
    4492    16014400 :                 unlink_chunk(fm, p, prevsize);
    4493             :               }
    4494       12834 :               else if ((next->head & INUSE_BITS) == INUSE_BITS) {
    4495        8460 :                 fm->dvsize = psize;
    4496        8460 :                 set_free_with_pinuse(p, psize, next);
    4497        8460 :                 goto postaction;
    4498             :               }
    4499             :             }
    4500             :             else
    4501           0 :               goto erroraction;
    4502             :           }
    4503             :         }
    4504             : 
    4505    64091900 :         if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
    4506    64091900 :           if (!cinuse(next)) {  /* consolidate forward */
    4507    19966900 :             if (next == fm->top) {
    4508       76301 :               size_t tsize = fm->topsize += psize;
    4509       76301 :               fm->top = p;
    4510       76301 :               p->head = tsize | PINUSE_BIT;
    4511       76301 :               if (p == fm->dv) {
    4512         195 :                 fm->dv = 0;
    4513         195 :                 fm->dvsize = 0;
    4514             :               }
    4515       76301 :               if (should_trim(fm, tsize))
    4516        1529 :                 sys_trim(fm, 0);
    4517       76301 :               goto postaction;
    4518             :             }
    4519    19890600 :             else if (next == fm->dv) {
    4520     6600440 :               size_t dsize = fm->dvsize += psize;
    4521     6600440 :               fm->dv = p;
    4522     6600440 :               set_size_and_pinuse_of_free_chunk(p, dsize);
    4523     6600440 :               goto postaction;
    4524             :             }
    4525             :             else {
    4526    13290200 :               size_t nsize = chunksize(next);
    4527    13290200 :               psize += nsize;
    4528    13423100 :               unlink_chunk(fm, next, nsize);
    4529    13290200 :               set_size_and_pinuse_of_free_chunk(p, psize);
    4530    13290200 :               if (p == fm->dv) {
    4531        4179 :                 fm->dvsize = psize;
    4532        4179 :                 goto postaction;
    4533             :               }
    4534             :             }
    4535             :           }
    4536             :           else
    4537    44125000 :             set_free_with_pinuse(p, psize, next);
    4538             : 
    4539    57411000 :           if (is_small(psize)) {
    4540    41739300 :             insert_small_chunk(fm, p, psize);
    4541             :             check_free_chunk(fm, p);
    4542             :           }
    4543             :           else {
    4544    15671700 :             tchunkptr tp = (tchunkptr)p;
    4545    25729600 :             insert_large_chunk(fm, tp, psize);
    4546             :             check_free_chunk(fm, p);
    4547    15671700 :             if (--fm->release_checks == 0)
    4548        3539 :               release_unused_segments(fm);
    4549             :           }
    4550    57411000 :           goto postaction;
    4551             :         }
    4552             :       }
    4553           0 :     erroraction:
    4554           0 :       USAGE_ERROR_ACTION(fm, p);
    4555    64100400 :     postaction:
    4556    64100400 :       POSTACTION(fm);
    4557             :     }
    4558             :   }
    4559             : }
    4560             : 
    4561           0 : void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
    4562             :   void* mem;
    4563           0 :   size_t req = 0;
    4564           0 :   mstate ms = (mstate)msp;
    4565           0 :   if (!ok_magic(ms)) {
    4566           0 :     USAGE_ERROR_ACTION(ms,ms);
    4567             :     return 0;
    4568             :   }
    4569           0 :   if (n_elements != 0) {
    4570           0 :     req = n_elements * elem_size;
    4571           0 :     if (((n_elements | elem_size) & ~(size_t)0xffff) &&
    4572           0 :         (req / n_elements != elem_size))
    4573           0 :       req = MAX_SIZE_T; /* force downstream failure on overflow */
    4574             :   }
    4575           0 :   mem = internal_malloc(ms, req);
    4576           0 :   if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
    4577           0 :     memset(mem, 0, req);
    4578           0 :   return mem;
    4579             : }
    4580             : 
    4581           0 : void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
    4582           0 :   void* mem = 0;
    4583           0 :   if (oldmem == 0) {
    4584           0 :     mem = mspace_malloc(msp, bytes);
    4585             :   }
    4586           0 :   else if (bytes >= MAX_REQUEST) {
    4587           0 :     MALLOC_FAILURE_ACTION;
    4588             :   }
    4589             : #ifdef REALLOC_ZERO_BYTES_FREES
    4590             :   else if (bytes == 0) {
    4591             :     mspace_free(msp, oldmem);
    4592             :   }
    4593             : #endif /* REALLOC_ZERO_BYTES_FREES */
    4594             :   else {
    4595           0 :     size_t nb = request2size(bytes);
    4596           0 :     mchunkptr oldp = mem2chunk(oldmem);
    4597             : #if ! FOOTERS
    4598             :     mstate m = (mstate)msp;
    4599             : #else /* FOOTERS */
    4600           0 :     mstate m = get_mstate_for(oldp);
    4601           0 :     if (!ok_magic(m)) {
    4602           0 :       USAGE_ERROR_ACTION(m, oldmem);
    4603             :       return 0;
    4604             :     }
    4605             : #endif /* FOOTERS */
    4606           0 :     if (!PREACTION(m)) {
    4607           0 :       mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
    4608           0 :       POSTACTION(m);
    4609           0 :       if (newp != 0) {
    4610             :         check_inuse_chunk(m, newp);
    4611           0 :         mem = chunk2mem(newp);
    4612             :       }
    4613             :       else {
    4614           0 :         mem = mspace_malloc(m, bytes);
    4615           0 :         if (mem != 0) {
    4616           0 :           size_t oc = chunksize(oldp) - overhead_for(oldp);
    4617           0 :           memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
    4618           0 :           mspace_free(m, oldmem);
    4619             :         }
    4620             :       }
    4621             :     }
    4622             :   }
    4623           0 :   return mem;
    4624             : }
    4625             : 
    4626             : __clib_nosanitize_addr
    4627    23318400 : void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
    4628    23318400 :   void* mem = 0;
    4629    23318400 :   if (oldmem != 0) {
    4630    23318400 :     if (bytes >= MAX_REQUEST) {
    4631           0 :       MALLOC_FAILURE_ACTION;
    4632             :     }
    4633             :     else {
    4634    23318400 :       size_t nb = request2size(bytes);
    4635    23318400 :       mchunkptr oldp = mem2chunk(oldmem);
    4636             : #if ! FOOTERS
    4637             :       mstate m = (mstate)msp;
    4638             : #else /* FOOTERS */
    4639    23318400 :       mstate m = get_mstate_for(oldp);
    4640             :       (void)msp; /* placate people compiling -Wunused */
    4641    23318400 :       if (!ok_magic(m)) {
    4642           0 :         USAGE_ERROR_ACTION(m, oldmem);
    4643             :         return 0;
    4644             :       }
    4645             : #endif /* FOOTERS */
    4646    23318400 :       if (!PREACTION(m)) {
    4647    23318400 :         mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
    4648    23319900 :         POSTACTION(m);
    4649    23319900 :         if (newp == oldp) {
    4650             :           check_inuse_chunk(m, newp);
    4651     8639170 :           mem = oldmem;
    4652             :         }
    4653             :       }
    4654             :     }
    4655             :   }
    4656    23319900 :   return mem;
    4657             : }
    4658             : 
    4659             : __clib_nosanitize_addr
    4660   110471000 : void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
    4661   110471000 :   mstate ms = (mstate)msp;
    4662   110471000 :   if (!ok_magic(ms)) {
    4663           0 :     USAGE_ERROR_ACTION(ms,ms);
    4664             :     return 0;
    4665             :   }
    4666   110471000 :   if (alignment <= MALLOC_ALIGNMENT)
    4667   108397000 :     return mspace_malloc(msp, bytes);
    4668     2073970 :   return internal_memalign(ms, alignment, bytes);
    4669             : }
    4670             : 
    4671           0 : void** mspace_independent_calloc(mspace msp, size_t n_elements,
    4672             :                                  size_t elem_size, void* chunks[]) {
    4673           0 :   size_t sz = elem_size; /* serves as 1-element array */
    4674           0 :   mstate ms = (mstate)msp;
    4675           0 :   if (!ok_magic(ms)) {
    4676           0 :     USAGE_ERROR_ACTION(ms,ms);
    4677             :     return 0;
    4678             :   }
    4679           0 :   return ialloc(ms, n_elements, &sz, 3, chunks);
    4680             : }
    4681             : 
    4682           0 : void** mspace_independent_comalloc(mspace msp, size_t n_elements,
    4683             :                                    size_t sizes[], void* chunks[]) {
    4684           0 :   mstate ms = (mstate)msp;
    4685           0 :   if (!ok_magic(ms)) {
    4686           0 :     USAGE_ERROR_ACTION(ms,ms);
    4687             :     return 0;
    4688             :   }
    4689           0 :   return ialloc(ms, n_elements, sizes, 0, chunks);
    4690             : }
    4691             : 
    4692           0 : size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
    4693           0 :   return internal_bulk_free((mstate)msp, array, nelem);
    4694             : }
    4695             : 
    4696             : #if MALLOC_INSPECT_ALL
    4697             : void mspace_inspect_all(mspace msp,
    4698             :                         void(*handler)(void *start,
    4699             :                                        void *end,
    4700             :                                        size_t used_bytes,
    4701             :                                        void* callback_arg),
    4702             :                         void* arg) {
    4703             :   mstate ms = (mstate)msp;
    4704             :   if (ok_magic(ms)) {
    4705             :     if (!PREACTION(ms)) {
    4706             :       internal_inspect_all(ms, handler, arg);
    4707             :       POSTACTION(ms);
    4708             :     }
    4709             :   }
    4710             :   else {
    4711             :     USAGE_ERROR_ACTION(ms,ms);
    4712             :   }
    4713             : }
    4714             : #endif /* MALLOC_INSPECT_ALL */
    4715             : 
    4716           0 : int mspace_trim(mspace msp, size_t pad) {
    4717           0 :   int result = 0;
    4718           0 :   mstate ms = (mstate)msp;
    4719           0 :   if (ok_magic(ms)) {
    4720           0 :     if (!PREACTION(ms)) {
    4721           0 :       result = sys_trim(ms, pad);
    4722           0 :       POSTACTION(ms);
    4723             :     }
    4724             :   }
    4725             :   else {
    4726           0 :     USAGE_ERROR_ACTION(ms,ms);
    4727             :   }
    4728           0 :   return result;
    4729             : }
    4730             : 
    4731             : #if !NO_MALLOC_STATS
    4732           0 : void mspace_malloc_stats(mspace msp) {
    4733           0 :   mstate ms = (mstate)msp;
    4734           0 :   if (ok_magic(ms)) {
    4735           0 :     internal_malloc_stats(ms);
    4736             :   }
    4737             :   else {
    4738           0 :     USAGE_ERROR_ACTION(ms,ms);
    4739             :   }
    4740           0 : }
    4741             : #endif /* NO_MALLOC_STATS */
    4742             : 
    4743        4774 : size_t mspace_footprint(mspace msp) {
    4744        4774 :   size_t result = 0;
    4745        4774 :   mstate ms = (mstate)msp;
    4746        4774 :   if (ok_magic(ms)) {
    4747        4774 :     result = ms->footprint;
    4748             :   }
    4749             :   else {
    4750           0 :     USAGE_ERROR_ACTION(ms,ms);
    4751             :   }
    4752        4774 :   return result;
    4753             : }
    4754             : 
    4755           0 : size_t mspace_max_footprint(mspace msp) {
    4756           0 :   size_t result = 0;
    4757           0 :   mstate ms = (mstate)msp;
    4758           0 :   if (ok_magic(ms)) {
    4759           0 :     result = ms->max_footprint;
    4760             :   }
    4761             :   else {
    4762           0 :     USAGE_ERROR_ACTION(ms,ms);
    4763             :   }
    4764           0 :   return result;
    4765             : }
    4766             : 
    4767           0 : size_t mspace_footprint_limit(mspace msp) {
    4768           0 :   size_t result = 0;
    4769           0 :   mstate ms = (mstate)msp;
    4770           0 :   if (ok_magic(ms)) {
    4771           0 :     size_t maf = ms->footprint_limit;
    4772           0 :     result = (maf == 0) ? MAX_SIZE_T : maf;
    4773             :   }
    4774             :   else {
    4775           0 :     USAGE_ERROR_ACTION(ms,ms);
    4776             :   }
    4777           0 :   return result;
    4778             : }
    4779             : 
    4780           0 : size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
    4781           0 :   size_t result = 0;
    4782           0 :   mstate ms = (mstate)msp;
    4783           0 :   if (ok_magic(ms)) {
    4784           0 :     if (bytes == 0)
    4785           0 :       result = granularity_align(1); /* Use minimal size */
    4786           0 :     if (bytes == MAX_SIZE_T)
    4787           0 :       result = 0;                    /* disable */
    4788             :     else
    4789           0 :       result = granularity_align(bytes);
    4790           0 :     ms->footprint_limit = result;
    4791             :   }
    4792             :   else {
    4793           0 :     USAGE_ERROR_ACTION(ms,ms);
    4794             :   }
    4795           0 :   return result;
    4796             : }
    4797             : 
    4798             : #if !NO_MALLINFO
    4799             : __clib_nosanitize_addr
    4800        3458 : struct dlmallinfo mspace_mallinfo(mspace msp) {
    4801        3458 :   mstate ms = (mstate)msp;
    4802        3458 :   if (!ok_magic(ms)) {
    4803           0 :     USAGE_ERROR_ACTION(ms,ms);
    4804             :   }
    4805        3458 :   return internal_mallinfo(ms);
    4806             : }
    4807             : #endif /* NO_MALLINFO */
    4808             : 
    4809             : __clib_nosanitize_addr
    4810  1601870000 : size_t mspace_usable_size(const void* mem) {
    4811  1601870000 :   if (mem != 0) {
    4812  1601880000 :     mchunkptr p = mem2chunk(mem);
    4813  1601880000 :     if (is_inuse(p))
    4814  1601890000 :       return chunksize(p) - overhead_for(p);
    4815             :   }
    4816           0 :   return 0;
    4817             : }
    4818             : 
    4819           0 : int mspace_mallopt(int param_number, int value) {
    4820           0 :   return change_mparam(param_number, value);
    4821             : }
    4822             : 
    4823             : #endif /* MSPACES */
    4824             : 
    4825             : 
    4826             : /* -------------------- Alternative MORECORE functions ------------------- */
    4827             : 
    4828             : /*
    4829             :   Guidelines for creating a custom version of MORECORE:
    4830             : 
    4831             :   * For best performance, MORECORE should allocate in multiples of pagesize.
    4832             :   * MORECORE may allocate more memory than requested. (Or even less,
    4833             :       but this will usually result in a malloc failure.)
    4834             :   * MORECORE must not allocate memory when given argument zero, but
    4835             :       instead return one past the end address of memory from previous
    4836             :       nonzero call.
    4837             :   * For best performance, consecutive calls to MORECORE with positive
    4838             :       arguments should return increasing addresses, indicating that
    4839             :       space has been contiguously extended.
    4840             :   * Even though consecutive calls to MORECORE need not return contiguous
    4841             :       addresses, it must be OK for malloc'ed chunks to span multiple
    4842             :       regions in those cases where they do happen to be contiguous.
    4843             :   * MORECORE need not handle negative arguments -- it may instead
    4844             :       just return MFAIL when given negative arguments.
    4845             :       Negative arguments are always multiples of pagesize. MORECORE
    4846             :       must not misinterpret negative args as large positive unsigned
    4847             :       args. You can suppress all such calls from even occurring by defining
    4848             :       MORECORE_CANNOT_TRIM,
    4849             : 
    4850             :   As an example alternative MORECORE, here is a custom allocator
    4851             :   kindly contributed for pre-OSX macOS.  It uses virtually but not
    4852             :   necessarily physically contiguous non-paged memory (locked in,
    4853             :   present and won't get swapped out).  You can use it by uncommenting
    4854             :   this section, adding some #includes, and setting up the appropriate
    4855             :   defines above:
    4856             : 
    4857             :       #define MORECORE osMoreCore
    4858             : 
    4859             :   There is also a shutdown routine that should somehow be called for
    4860             :   cleanup upon program exit.
    4861             : 
    4862             :   #define MAX_POOL_ENTRIES 100
    4863             :   #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
    4864             :   static int next_os_pool;
    4865             :   void *our_os_pools[MAX_POOL_ENTRIES];
    4866             : 
    4867             :   void *osMoreCore(int size)
    4868             :   {
    4869             :     void *ptr = 0;
    4870             :     static void *sbrk_top = 0;
    4871             : 
    4872             :     if (size > 0)
    4873             :     {
    4874             :       if (size < MINIMUM_MORECORE_SIZE)
    4875             :          size = MINIMUM_MORECORE_SIZE;
    4876             :       if (CurrentExecutionLevel() == kTaskLevel)
    4877             :          ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
    4878             :       if (ptr == 0)
    4879             :       {
    4880             :         return (void *) MFAIL;
    4881             :       }
    4882             :       // save ptrs so they can be freed during cleanup
    4883             :       our_os_pools[next_os_pool] = ptr;
    4884             :       next_os_pool++;
    4885             :       ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
    4886             :       sbrk_top = (char *) ptr + size;
    4887             :       return ptr;
    4888             :     }
    4889             :     else if (size < 0)
    4890             :     {
    4891             :       // we don't currently support shrink behavior
    4892             :       return (void *) MFAIL;
    4893             :     }
    4894             :     else
    4895             :     {
    4896             :       return sbrk_top;
    4897             :     }
    4898             :   }
    4899             : 
    4900             :   // cleanup any allocated memory pools
    4901             :   // called as last thing before shutting down driver
    4902             : 
    4903             :   void osCleanupMem(void)
    4904             :   {
    4905             :     void **ptr;
    4906             : 
    4907             :     for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
    4908             :       if (*ptr)
    4909             :       {
    4910             :          PoolDeallocate(*ptr);
    4911             :          *ptr = 0;
    4912             :       }
    4913             :   }
    4914             : 
    4915             : */
    4916             : 
    4917             : 
    4918             : /* -----------------------------------------------------------------------
    4919             : History:
    4920             :     v2.8.6 Wed Aug 29 06:57:58 2012  Doug Lea
    4921             :       * fix bad comparison in dlposix_memalign
    4922             :       * don't reuse adjusted asize in sys_alloc
    4923             :       * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
    4924             :       * reduce compiler warnings -- thanks to all who reported/suggested these
    4925             : 
    4926             :     v2.8.5 Sun May 22 10:26:02 2011  Doug Lea  (dl at gee)
    4927             :       * Always perform unlink checks unless INSECURE
    4928             :       * Add posix_memalign.
    4929             :       * Improve realloc to expand in more cases; expose realloc_in_place.
    4930             :         Thanks to Peter Buhr for the suggestion.
    4931             :       * Add footprint_limit, inspect_all, bulk_free. Thanks
    4932             :         to Barry Hayes and others for the suggestions.
    4933             :       * Internal refactorings to avoid calls while holding locks
    4934             :       * Use non-reentrant locks by default. Thanks to Roland McGrath
    4935             :         for the suggestion.
    4936             :       * Small fixes to mspace_destroy, reset_on_error.
    4937             :       * Various configuration extensions/changes. Thanks
    4938             :          to all who contributed these.
    4939             : 
    4940             :     V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
    4941             :       * Update Creative Commons URL
    4942             : 
    4943             :     V2.8.4 Wed May 27 09:56:23 2009  Doug Lea  (dl at gee)
    4944             :       * Use zeros instead of prev foot for is_mmapped
    4945             :       * Add mspace_track_large_chunks; thanks to Jean Brouwers
    4946             :       * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
    4947             :       * Fix insufficient sys_alloc padding when using 16byte alignment
    4948             :       * Fix bad error check in mspace_footprint
    4949             :       * Adaptations for ptmalloc; thanks to Wolfram Gloger.
    4950             :       * Reentrant spin locks; thanks to Earl Chew and others
    4951             :       * Win32 improvements; thanks to Niall Douglas and Earl Chew
    4952             :       * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
    4953             :       * Extension hook in malloc_state
    4954             :       * Various small adjustments to reduce warnings on some compilers
    4955             :       * Various configuration extensions/changes for more platforms. Thanks
    4956             :          to all who contributed these.
    4957             : 
    4958             :     V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
    4959             :       * Add max_footprint functions
    4960             :       * Ensure all appropriate literals are size_t
    4961             :       * Fix conditional compilation problem for some #define settings
    4962             :       * Avoid concatenating segments with the one provided
    4963             :         in create_mspace_with_base
    4964             :       * Rename some variables to avoid compiler shadowing warnings
    4965             :       * Use explicit lock initialization.
    4966             :       * Better handling of sbrk interference.
    4967             :       * Simplify and fix segment insertion, trimming and mspace_destroy
    4968             :       * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
    4969             :       * Thanks especially to Dennis Flanagan for help on these.
    4970             : 
    4971             :     V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
    4972             :       * Fix memalign brace error.
    4973             : 
    4974             :     V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
    4975             :       * Fix improper #endif nesting in C++
    4976             :       * Add explicit casts needed for C++
    4977             : 
    4978             :     V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
    4979             :       * Use trees for large bins
    4980             :       * Support mspaces
    4981             :       * Use segments to unify sbrk-based and mmap-based system allocation,
    4982             :         removing need for emulation on most platforms without sbrk.
    4983             :       * Default safety checks
    4984             :       * Optional footer checks. Thanks to William Robertson for the idea.
    4985             :       * Internal code refactoring
    4986             :       * Incorporate suggestions and platform-specific changes.
    4987             :         Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
    4988             :         Aaron Bachmann,  Emery Berger, and others.
    4989             :       * Speed up non-fastbin processing enough to remove fastbins.
    4990             :       * Remove useless cfree() to avoid conflicts with other apps.
    4991             :       * Remove internal memcpy, memset. Compilers handle builtins better.
    4992             :       * Remove some options that no one ever used and rename others.
    4993             : 
    4994             :     V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
    4995             :       * Fix malloc_state bitmap array misdeclaration
    4996             : 
    4997             :     V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
    4998             :       * Allow tuning of FIRST_SORTED_BIN_SIZE
    4999             :       * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
    5000             :       * Better detection and support for non-contiguousness of MORECORE.
    5001             :         Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
    5002             :       * Bypass most of malloc if no frees. Thanks To Emery Berger.
    5003             :       * Fix freeing of old top non-contiguous chunk im sysmalloc.
    5004             :       * Raised default trim and map thresholds to 256K.
    5005             :       * Fix mmap-related #defines. Thanks to Lubos Lunak.
    5006             :       * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
    5007             :       * Branch-free bin calculation
    5008             :       * Default trim and mmap thresholds now 256K.
    5009             : 
    5010             :     V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
    5011             :       * Introduce independent_comalloc and independent_calloc.
    5012             :         Thanks to Michael Pachos for motivation and help.
    5013             :       * Make optional .h file available
    5014             :       * Allow > 2GB requests on 32bit systems.
    5015             :       * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
    5016             :         Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
    5017             :         and Anonymous.
    5018             :       * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
    5019             :         helping test this.)
    5020             :       * memalign: check alignment arg
    5021             :       * realloc: don't try to shift chunks backwards, since this
    5022             :         leads to  more fragmentation in some programs and doesn't
    5023             :         seem to help in any others.
    5024             :       * Collect all cases in malloc requiring system memory into sysmalloc
    5025             :       * Use mmap as backup to sbrk
    5026             :       * Place all internal state in malloc_state
    5027             :       * Introduce fastbins (although similar to 2.5.1)
    5028             :       * Many minor tunings and cosmetic improvements
    5029             :       * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
    5030             :       * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
    5031             :         Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
    5032             :       * Include errno.h to support default failure action.
    5033             : 
    5034             :     V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
    5035             :       * return null for negative arguments
    5036             :       * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
    5037             :          * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
    5038             :           (e.g. WIN32 platforms)
    5039             :          * Cleanup header file inclusion for WIN32 platforms
    5040             :          * Cleanup code to avoid Microsoft Visual C++ compiler complaints
    5041             :          * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
    5042             :            memory allocation routines
    5043             :          * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
    5044             :          * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
    5045             :            usage of 'assert' in non-WIN32 code
    5046             :          * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
    5047             :            avoid infinite loop
    5048             :       * Always call 'fREe()' rather than 'free()'
    5049             : 
    5050             :     V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
    5051             :       * Fixed ordering problem with boundary-stamping
    5052             : 
    5053             :     V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
    5054             :       * Added pvalloc, as recommended by H.J. Liu
    5055             :       * Added 64bit pointer support mainly from Wolfram Gloger
    5056             :       * Added anonymously donated WIN32 sbrk emulation
    5057             :       * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
    5058             :       * malloc_extend_top: fix mask error that caused wastage after
    5059             :         foreign sbrks
    5060             :       * Add linux mremap support code from HJ Liu
    5061             : 
    5062             :     V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
    5063             :       * Integrated most documentation with the code.
    5064             :       * Add support for mmap, with help from
    5065             :         Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
    5066             :       * Use last_remainder in more cases.
    5067             :       * Pack bins using idea from  colin@nyx10.cs.du.edu
    5068             :       * Use ordered bins instead of best-fit threshold
    5069             :       * Eliminate block-local decls to simplify tracing and debugging.
    5070             :       * Support another case of realloc via move into top
    5071             :       * Fix error occurring when initial sbrk_base not word-aligned.
    5072             :       * Rely on page size for units instead of SBRK_UNIT to
    5073             :         avoid surprises about sbrk alignment conventions.
    5074             :       * Add mallinfo, mallopt. Thanks to Raymond Nijssen
    5075             :         (raymond@es.ele.tue.nl) for the suggestion.
    5076             :       * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
    5077             :       * More precautions for cases where other routines call sbrk,
    5078             :         courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
    5079             :       * Added macros etc., allowing use in linux libc from
    5080             :         H.J. Lu (hjl@gnu.ai.mit.edu)
    5081             :       * Inverted this history list
    5082             : 
    5083             :     V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
    5084             :       * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
    5085             :       * Removed all preallocation code since under current scheme
    5086             :         the work required to undo bad preallocations exceeds
    5087             :         the work saved in good cases for most test programs.
    5088             :       * No longer use return list or unconsolidated bins since
    5089             :         no scheme using them consistently outperforms those that don't
    5090             :         given above changes.
    5091             :       * Use best fit for very large chunks to prevent some worst-cases.
    5092             :       * Added some support for debugging
    5093             : 
    5094             :     V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
    5095             :       * Removed footers when chunks are in use. Thanks to
    5096             :         Paul Wilson (wilson@cs.texas.edu) for the suggestion.
    5097             : 
    5098             :     V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
    5099             :       * Added malloc_trim, with help from Wolfram Gloger
    5100             :         (wmglo@Dent.MED.Uni-Muenchen.DE).
    5101             : 
    5102             :     V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
    5103             : 
    5104             :     V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
    5105             :       * realloc: try to expand in both directions
    5106             :       * malloc: swap order of clean-bin strategy;
    5107             :       * realloc: only conditionally expand backwards
    5108             :       * Try not to scavenge used bins
    5109             :       * Use bin counts as a guide to preallocation
    5110             :       * Occasionally bin return list chunks in first scan
    5111             :       * Add a few optimizations from colin@nyx10.cs.du.edu
    5112             : 
    5113             :     V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
    5114             :       * faster bin computation & slightly different binning
    5115             :       * merged all consolidations to one part of malloc proper
    5116             :          (eliminating old malloc_find_space & malloc_clean_bin)
    5117             :       * Scan 2 returns chunks (not just 1)
    5118             :       * Propagate failure in realloc if malloc returns 0
    5119             :       * Add stuff to allow compilation on non-ANSI compilers
    5120             :           from kpv@research.att.com
    5121             : 
    5122             :     V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
    5123             :       * removed potential for odd address access in prev_chunk
    5124             :       * removed dependency on getpagesize.h
    5125             :       * misc cosmetics and a bit more internal documentation
    5126             :       * anticosmetics: mangled names in macros to evade debugger strangeness
    5127             :       * tested on sparc, hp-700, dec-mips, rs6000
    5128             :           with gcc & native cc (hp, dec only) allowing
    5129             :           Detlefs & Zorn comparison study (in SIGPLAN Notices.)
    5130             : 
    5131             :     Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
    5132             :       * Based loosely on libg++-1.2X malloc. (It retains some of the overall
    5133             :          structure of old version,  but most details differ.)
    5134             : 
    5135             : */

Generated by: LCOV version 1.14