2022-04-15 06:39:48 +00:00
|
|
|
#include "libc/nexgen32e/rdtsc.h"
|
2020-06-15 14:18:57 +00:00
|
|
|
#include "libc/dce.h"
|
2022-04-15 06:39:48 +00:00
|
|
|
#include "libc/assert.h"
|
|
|
|
#include "libc/rand/rand.h"
|
|
|
|
#include "libc/runtime/sysconf.h"
|
2020-06-15 14:18:57 +00:00
|
|
|
#include "libc/sysv/consts/map.h"
|
|
|
|
#include "libc/sysv/consts/prot.h"
|
2022-04-15 06:39:48 +00:00
|
|
|
#include "libc/runtime/runtime.h"
|
|
|
|
#include "libc/errno.h"
|
|
|
|
#include "libc/errno.h"
|
|
|
|
#include "libc/stdio/stdio.h"
|
|
|
|
#include "libc/intrin/kprintf.h"
|
|
|
|
#include "third_party/dlmalloc/vespene.internal.h"
|
|
|
|
#include "libc/calls/calls.h"
|
|
|
|
#include "libc/calls/calls.h"
|
|
|
|
#include "libc/runtime/runtime.h"
|
|
|
|
#include "libc/bits/weaken.h"
|
|
|
|
#include "libc/intrin/kprintf.h"
|
|
|
|
#include "libc/mem/mem.h"
|
|
|
|
// clang-format off
|
|
|
|
|
|
|
|
#define FOOTERS 0
|
|
|
|
#define MSPACES 0
|
|
|
|
|
|
|
|
#define HAVE_MMAP 1
|
|
|
|
#define HAVE_MREMAP 0
|
|
|
|
#define HAVE_MORECORE 0
|
|
|
|
#define USE_LOCKS 1
|
|
|
|
#define MORECORE_CONTIGUOUS 0
|
|
|
|
#define MALLOC_INSPECT_ALL 1
|
|
|
|
|
|
|
|
#if IsTiny()
|
|
|
|
#define INSECURE 1
|
|
|
|
#define PROCEED_ON_ERROR 1
|
|
|
|
#define ABORT_ON_ASSERT_FAILURE 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if IsModeDbg()
|
|
|
|
#define DEBUG 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define LACKS_UNISTD_H
|
|
|
|
#define LACKS_FCNTL_H
|
|
|
|
#define LACKS_SYS_PARAM_H
|
|
|
|
#define LACKS_SYS_MMAN_H
|
|
|
|
#define LACKS_STRINGS_H
|
|
|
|
#define LACKS_STRING_H
|
|
|
|
#define LACKS_SYS_TYPES_H
|
|
|
|
#define LACKS_ERRNO_H
|
|
|
|
#define LACKS_STDLIB_H
|
|
|
|
#define LACKS_SCHED_H
|
|
|
|
#define LACKS_TIME_H
|
|
|
|
|
|
|
|
/* Version identifier to allow people to support multiple versions */
|
|
|
|
#ifndef DLMALLOC_VERSION
|
|
|
|
#define DLMALLOC_VERSION 20806
|
|
|
|
#endif /* DLMALLOC_VERSION */
|
|
|
|
|
|
|
|
#ifndef DLMALLOC_EXPORT
|
|
|
|
#define DLMALLOC_EXPORT extern
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* The maximum possible size_t value has all bits set */
|
|
|
|
#define MAX_SIZE_T (~(size_t)0)
|
|
|
|
|
|
|
|
#ifndef USE_LOCKS /* ensure true if spin or recursive locks set */
|
|
|
|
#define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
|
|
|
|
(defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
|
|
|
|
#endif /* USE_LOCKS */
|
|
|
|
|
|
|
|
#if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */
|
|
|
|
#if ((defined(__GNUC__) && \
|
|
|
|
((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
|
|
|
|
defined(__i386__) || defined(__x86_64__))) || \
|
|
|
|
(defined(_MSC_VER) && _MSC_VER>=1310))
|
|
|
|
#ifndef USE_SPIN_LOCKS
|
|
|
|
#define USE_SPIN_LOCKS 1
|
|
|
|
#endif /* USE_SPIN_LOCKS */
|
|
|
|
#elif USE_SPIN_LOCKS
|
|
|
|
#error "USE_SPIN_LOCKS defined without implementation"
|
|
|
|
#endif /* ... locks available... */
|
|
|
|
#elif !defined(USE_SPIN_LOCKS)
|
|
|
|
#define USE_SPIN_LOCKS 0
|
|
|
|
#endif /* USE_LOCKS */
|
|
|
|
|
|
|
|
#ifndef ONLY_MSPACES
|
|
|
|
#define ONLY_MSPACES 0
|
|
|
|
#endif /* ONLY_MSPACES */
|
|
|
|
#ifndef MSPACES
|
|
|
|
#if ONLY_MSPACES
|
|
|
|
#define MSPACES 1
|
|
|
|
#else /* ONLY_MSPACES */
|
|
|
|
#define MSPACES 0
|
|
|
|
#endif /* ONLY_MSPACES */
|
|
|
|
#endif /* MSPACES */
|
|
|
|
#ifndef MALLOC_ALIGNMENT
|
|
|
|
#define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
|
|
|
|
#endif /* MALLOC_ALIGNMENT */
|
|
|
|
#ifndef FOOTERS
|
|
|
|
#define FOOTERS 0
|
|
|
|
#endif /* FOOTERS */
|
|
|
|
#ifndef ABORT
|
|
|
|
#define ABORT abort()
|
|
|
|
#endif /* ABORT */
|
|
|
|
#ifndef ABORT_ON_ASSERT_FAILURE
|
|
|
|
#define ABORT_ON_ASSERT_FAILURE 1
|
|
|
|
#endif /* ABORT_ON_ASSERT_FAILURE */
|
|
|
|
#ifndef PROCEED_ON_ERROR
|
|
|
|
#define PROCEED_ON_ERROR 0
|
|
|
|
#endif /* PROCEED_ON_ERROR */
|
|
|
|
|
|
|
|
#ifndef INSECURE
|
|
|
|
#define INSECURE 0
|
|
|
|
#endif /* INSECURE */
|
|
|
|
#ifndef MALLOC_INSPECT_ALL
|
|
|
|
#define MALLOC_INSPECT_ALL 0
|
|
|
|
#endif /* MALLOC_INSPECT_ALL */
|
|
|
|
#ifndef HAVE_MMAP
|
|
|
|
#define HAVE_MMAP 1
|
|
|
|
#endif /* HAVE_MMAP */
|
|
|
|
#ifndef MMAP_CLEARS
|
|
|
|
#define MMAP_CLEARS 1
|
|
|
|
#endif /* MMAP_CLEARS */
|
|
|
|
#ifndef HAVE_MREMAP
|
|
|
|
#ifdef linux
|
|
|
|
#define HAVE_MREMAP 1
|
|
|
|
#define _GNU_SOURCE /* Turns on mremap() definition */
|
|
|
|
#else /* linux */
|
|
|
|
#define HAVE_MREMAP 0
|
|
|
|
#endif /* linux */
|
|
|
|
#endif /* HAVE_MREMAP */
|
|
|
|
#ifndef MALLOC_FAILURE_ACTION
|
|
|
|
#define MALLOC_FAILURE_ACTION errno = ENOMEM;
|
|
|
|
#endif /* MALLOC_FAILURE_ACTION */
|
|
|
|
#ifndef HAVE_MORECORE
|
|
|
|
#if ONLY_MSPACES
|
|
|
|
#define HAVE_MORECORE 0
|
|
|
|
#else /* ONLY_MSPACES */
|
|
|
|
#define HAVE_MORECORE 1
|
|
|
|
#endif /* ONLY_MSPACES */
|
|
|
|
#endif /* HAVE_MORECORE */
|
|
|
|
#if !HAVE_MORECORE
|
|
|
|
#define MORECORE_CONTIGUOUS 0
|
|
|
|
#else /* !HAVE_MORECORE */
|
|
|
|
#define MORECORE_DEFAULT sbrk
|
|
|
|
#ifndef MORECORE_CONTIGUOUS
|
|
|
|
#define MORECORE_CONTIGUOUS 1
|
|
|
|
#endif /* MORECORE_CONTIGUOUS */
|
|
|
|
#endif /* HAVE_MORECORE */
|
|
|
|
#ifndef DEFAULT_GRANULARITY
|
|
|
|
#if (MORECORE_CONTIGUOUS || defined(WIN32))
|
|
|
|
#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
|
|
|
|
#else /* MORECORE_CONTIGUOUS */
|
|
|
|
#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
|
|
|
|
#endif /* MORECORE_CONTIGUOUS */
|
|
|
|
#endif /* DEFAULT_GRANULARITY */
|
|
|
|
#ifndef DEFAULT_TRIM_THRESHOLD
|
|
|
|
#ifndef MORECORE_CANNOT_TRIM
|
|
|
|
#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
|
|
|
|
#else /* MORECORE_CANNOT_TRIM */
|
|
|
|
#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
|
|
|
|
#endif /* MORECORE_CANNOT_TRIM */
|
|
|
|
#endif /* DEFAULT_TRIM_THRESHOLD */
|
|
|
|
#ifndef DEFAULT_MMAP_THRESHOLD
|
|
|
|
#if HAVE_MMAP
|
|
|
|
#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
|
|
|
|
#else /* HAVE_MMAP */
|
|
|
|
#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
|
|
|
|
#endif /* HAVE_MMAP */
|
|
|
|
#endif /* DEFAULT_MMAP_THRESHOLD */
|
|
|
|
#ifndef MAX_RELEASE_CHECK_RATE
|
|
|
|
#if HAVE_MMAP
|
|
|
|
#define MAX_RELEASE_CHECK_RATE 4095
|
|
|
|
#else
|
|
|
|
#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
|
|
|
|
#endif /* HAVE_MMAP */
|
|
|
|
#endif /* MAX_RELEASE_CHECK_RATE */
|
|
|
|
#ifndef USE_BUILTIN_FFS
|
|
|
|
#define USE_BUILTIN_FFS 0
|
|
|
|
#endif /* USE_BUILTIN_FFS */
|
|
|
|
#ifndef USE_DEV_RANDOM
|
|
|
|
#define USE_DEV_RANDOM 0
|
|
|
|
#endif /* USE_DEV_RANDOM */
|
|
|
|
#ifndef NO_MALLINFO
|
|
|
|
#define NO_MALLINFO 0
|
|
|
|
#endif /* NO_MALLINFO */
|
|
|
|
#ifndef MALLINFO_FIELD_TYPE
|
|
|
|
#define MALLINFO_FIELD_TYPE size_t
|
|
|
|
#endif /* MALLINFO_FIELD_TYPE */
|
|
|
|
#ifndef NO_MALLOC_STATS
|
|
|
|
#define NO_MALLOC_STATS 0
|
|
|
|
#endif /* NO_MALLOC_STATS */
|
|
|
|
#ifndef NO_SEGMENT_TRAVERSAL
|
|
|
|
#define NO_SEGMENT_TRAVERSAL 0
|
|
|
|
#endif /* NO_SEGMENT_TRAVERSAL */
|
|
|
|
|
|
|
|
/*
|
|
|
|
mallopt tuning options. SVID/XPG defines four standard parameter
|
|
|
|
numbers for mallopt, normally defined in malloc.h. None of these
|
|
|
|
are used in this malloc, so setting them has no effect. But this
|
|
|
|
malloc does support the following options.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define M_TRIM_THRESHOLD (-1)
|
|
|
|
#define M_GRANULARITY (-2)
|
|
|
|
#define M_MMAP_THRESHOLD (-3)
|
|
|
|
|
|
|
|
/* ------------------------ Mallinfo declarations ------------------------ */
|
|
|
|
|
|
|
|
/*
|
|
|
|
Try to persuade compilers to inline. The most critical functions for
|
|
|
|
inlining are defined as macros, so these aren't used for them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define FORCEINLINE forceinline
|
|
|
|
#define NOINLINE dontinline
|
|
|
|
|
|
|
|
#if !ONLY_MSPACES
|
|
|
|
|
|
|
|
/* ------------------- Declarations of public routines ------------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc(size_t n)
|
|
|
|
Returns a pointer to a newly allocated chunk of at least n bytes, or
|
|
|
|
null if no space is available, in which case errno is set to ENOMEM
|
|
|
|
on ANSI C systems.
|
|
|
|
|
|
|
|
If n is zero, malloc returns a minimum-sized chunk. (The minimum
|
|
|
|
size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
|
|
|
|
systems.) Note that size_t is an unsigned type, so calls with
|
|
|
|
arguments that would be negative if signed are interpreted as
|
|
|
|
requests for huge amounts of space, which will often fail. The
|
|
|
|
maximum supported value of n differs across systems, but is in all
|
|
|
|
cases less than the maximum representable value of a size_t.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* dlmalloc(size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
free(void* p)
|
|
|
|
Releases the chunk of memory pointed to by p, that had been previously
|
|
|
|
allocated using malloc or a related routine such as realloc.
|
|
|
|
It has no effect if p is null. If p was not malloced or already
|
|
|
|
freed, free(p) will by default cause the current program to abort.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void dlfree(void*);
|
|
|
|
|
|
|
|
/*
|
|
|
|
calloc(size_t n_elements, size_t element_size);
|
|
|
|
Returns a pointer to n_elements * element_size bytes, with all locations
|
|
|
|
set to zero.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* dlcalloc(size_t, size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
realloc(void* p, size_t n)
|
|
|
|
Returns a pointer to a chunk of size n that contains the same data
|
|
|
|
as does chunk p up to the minimum of (n, p's size) bytes, or null
|
|
|
|
if no space is available.
|
|
|
|
|
|
|
|
The returned pointer may or may not be the same as p. The algorithm
|
|
|
|
prefers extending p in most cases when possible, otherwise it
|
|
|
|
employs the equivalent of a malloc-copy-free sequence.
|
|
|
|
|
|
|
|
If p is null, realloc is equivalent to malloc.
|
|
|
|
|
|
|
|
If space is not available, realloc returns null, errno is set (if on
|
|
|
|
ANSI) and p is NOT freed.
|
|
|
|
|
|
|
|
if n is for fewer bytes than already held by p, the newly unused
|
|
|
|
space is lopped off and freed if possible. realloc with a size
|
|
|
|
argument of zero (re)allocates a minimum-sized chunk.
|
|
|
|
|
|
|
|
The old unix realloc convention of allowing the last-free'd chunk
|
|
|
|
to be used as an argument to realloc is not supported.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* dlrealloc(void*, size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
realloc_in_place(void* p, size_t n)
|
|
|
|
Resizes the space allocated for p to size n, only if this can be
|
|
|
|
done without moving p (i.e., only if there is adjacent space
|
|
|
|
available if n is greater than p's current allocated size, or n is
|
|
|
|
less than or equal to p's size). This may be used instead of plain
|
|
|
|
realloc if an alternative allocation strategy is needed upon failure
|
|
|
|
to expand space; for example, reallocation of a buffer that must be
|
|
|
|
memory-aligned or cleared. You can use realloc_in_place to trigger
|
|
|
|
these alternatives only when needed.
|
|
|
|
|
|
|
|
Returns p if successful; otherwise null.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
memalign(size_t alignment, size_t n);
|
|
|
|
Returns a pointer to a newly allocated chunk of n bytes, aligned
|
|
|
|
in accord with the alignment argument.
|
|
|
|
|
|
|
|
The alignment argument should be a power of two. If the argument is
|
|
|
|
not a power of two, the nearest greater power is used.
|
|
|
|
8-byte alignment is guaranteed by normal malloc calls, so don't
|
|
|
|
bother calling memalign with an argument of 8 or less.
|
|
|
|
|
|
|
|
Overreliance on memalign is a sure way to fragment space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* dlmemalign(size_t, size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
int posix_memalign(void** pp, size_t alignment, size_t n);
|
|
|
|
Allocates a chunk of n bytes, aligned in accord with the alignment
|
|
|
|
argument. Differs from memalign only in that it (1) assigns the
|
|
|
|
allocated memory to *pp rather than returning it, (2) fails and
|
|
|
|
returns EINVAL if the alignment is not a power of two (3) fails and
|
|
|
|
returns ENOMEM if memory cannot be allocated.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
valloc(size_t n);
|
|
|
|
Equivalent to memalign(pagesize, n), where pagesize is the page
|
|
|
|
size of the system. If the pagesize is unknown, 4096 is used.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* dlvalloc(size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mallopt(int parameter_number, int parameter_value)
|
|
|
|
Sets tunable parameters The format is to provide a
|
|
|
|
(parameter-number, parameter-value) pair. mallopt then sets the
|
|
|
|
corresponding parameter to the argument value if it can (i.e., so
|
|
|
|
long as the value is meaningful), and returns 1 if successful else
|
|
|
|
0. To workaround the fact that mallopt is specified to use int,
|
|
|
|
not size_t parameters, the value -1 is specially treated as the
|
|
|
|
maximum unsigned size_t value.
|
|
|
|
|
|
|
|
SVID/XPG/ANSI defines four standard param numbers for mallopt,
|
|
|
|
normally defined in malloc.h. None of these are use in this malloc,
|
|
|
|
so setting them has no effect. But this malloc also supports other
|
|
|
|
options in mallopt. See below for details. Briefly, supported
|
|
|
|
parameters are as follows (listed defaults are for "typical"
|
|
|
|
configurations).
|
|
|
|
|
|
|
|
Symbol param # default allowed param values
|
|
|
|
M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables)
|
|
|
|
M_GRANULARITY -2 page size any power of 2 >= page size
|
|
|
|
M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT int dlmallopt(int, int);
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_footprint();
|
|
|
|
Returns the number of bytes obtained from the system. The total
|
|
|
|
number of bytes allocated by malloc, realloc etc., is less than this
|
|
|
|
value. Unlike mallinfo, this function returns only a precomputed
|
|
|
|
result, so can be called frequently to monitor memory consumption.
|
|
|
|
Even if locks are otherwise defined, this function does not use them,
|
|
|
|
so results might not be up to date.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t dlmalloc_footprint(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_max_footprint();
|
|
|
|
Returns the maximum number of bytes obtained from the system. This
|
|
|
|
value will be greater than current footprint if deallocated space
|
|
|
|
has been reclaimed by the system. The peak number of bytes allocated
|
|
|
|
by malloc, realloc etc., is less than this value. Unlike mallinfo,
|
|
|
|
this function returns only a precomputed result, so can be called
|
|
|
|
frequently to monitor memory consumption. Even if locks are
|
|
|
|
otherwise defined, this function does not use them, so results might
|
|
|
|
not be up to date.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_footprint_limit();
|
|
|
|
Returns the number of bytes that the heap is allowed to obtain from
|
|
|
|
the system, returning the last value returned by
|
|
|
|
malloc_set_footprint_limit, or the maximum size_t value if
|
|
|
|
never set. The returned value reflects a permission. There is no
|
|
|
|
guarantee that this number of bytes can actually be obtained from
|
|
|
|
the system.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t dlmalloc_footprint_limit();
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_set_footprint_limit();
|
|
|
|
Sets the maximum number of bytes to obtain from the system, causing
|
|
|
|
failure returns from malloc and related functions upon attempts to
|
|
|
|
exceed this value. The argument value may be subject to page
|
|
|
|
rounding to an enforceable limit; this actual value is returned.
|
|
|
|
Using an argument of the maximum possible size_t effectively
|
|
|
|
disables checks. If the argument is less than or equal to the
|
|
|
|
current malloc_footprint, then all future allocations that require
|
|
|
|
additional system memory will fail. However, invocation cannot
|
|
|
|
retroactively deallocate existing used memory.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes);
|
|
|
|
|
|
|
|
#if MALLOC_INSPECT_ALL
|
|
|
|
/*
|
|
|
|
malloc_inspect_all(void(*handler)(void *start,
|
|
|
|
void *end,
|
|
|
|
size_t used_bytes,
|
|
|
|
void* callback_arg),
|
|
|
|
void* arg);
|
|
|
|
Traverses the heap and calls the given handler for each managed
|
|
|
|
region, skipping all bytes that are (or may be) used for bookkeeping
|
|
|
|
purposes. Traversal does not include include chunks that have been
|
|
|
|
directly memory mapped. Each reported region begins at the start
|
|
|
|
address, and continues up to but not including the end address. The
|
|
|
|
first used_bytes of the region contain allocated data. If
|
|
|
|
used_bytes is zero, the region is unallocated. The handler is
|
|
|
|
invoked with the given callback argument. If locks are defined, they
|
|
|
|
are held during the entire traversal. It is a bad idea to invoke
|
|
|
|
other malloc functions from within the handler.
|
|
|
|
|
|
|
|
For example, to count the number of in-use chunks with size greater
|
|
|
|
than 1000, you could write:
|
|
|
|
static int count = 0;
|
|
|
|
void count_chunks(void* start, void* end, size_t used, void* arg) {
|
|
|
|
if (used >= 1000) ++count;
|
|
|
|
}
|
|
|
|
then:
|
|
|
|
malloc_inspect_all(count_chunks, NULL);
|
|
|
|
|
|
|
|
malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
|
|
|
|
void* arg);
|
|
|
|
|
|
|
|
#endif /* MALLOC_INSPECT_ALL */
|
|
|
|
|
|
|
|
/*
|
|
|
|
mallinfo()
|
|
|
|
Returns (by copy) a struct containing various summary statistics:
|
|
|
|
|
|
|
|
arena: current total non-mmapped bytes allocated from system
|
|
|
|
ordblks: the number of free chunks
|
|
|
|
smblks: always zero.
|
|
|
|
hblks: current number of mmapped regions
|
|
|
|
hblkhd: total bytes held in mmapped regions
|
|
|
|
usmblks: the maximum total allocated space. This will be greater
|
|
|
|
than current total if trimming has occurred.
|
|
|
|
fsmblks: always zero
|
|
|
|
uordblks: current total allocated space (normal or mmapped)
|
|
|
|
fordblks: total free space
|
|
|
|
keepcost: the maximum number of bytes that could ideally be released
|
|
|
|
back to system via malloc_trim. ("ideally" means that
|
|
|
|
it ignores page restrictions etc.)
|
|
|
|
|
|
|
|
Because these fields are ints, but internal bookkeeping may
|
|
|
|
be kept as longs, the reported values may wrap around zero and
|
|
|
|
thus be inaccurate.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT struct mallinfo dlmallinfo(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
|
|
|
|
|
|
|
|
independent_calloc is similar to calloc, but instead of returning a
|
|
|
|
single cleared space, it returns an array of pointers to n_elements
|
|
|
|
independent elements that can hold contents of size elem_size, each
|
|
|
|
of which starts out cleared, and can be independently freed,
|
|
|
|
realloc'ed etc. The elements are guaranteed to be adjacently
|
|
|
|
allocated (this is not guaranteed to occur with multiple callocs or
|
|
|
|
mallocs), which may also improve cache locality in some
|
|
|
|
applications.
|
|
|
|
|
|
|
|
The "chunks" argument is optional (i.e., may be null, which is
|
|
|
|
probably the most typical usage). If it is null, the returned array
|
|
|
|
is itself dynamically allocated and should also be freed when it is
|
|
|
|
no longer needed. Otherwise, the chunks array must be of at least
|
|
|
|
n_elements in length. It is filled in with the pointers to the
|
|
|
|
chunks.
|
|
|
|
|
|
|
|
In either case, independent_calloc returns this pointer array, or
|
|
|
|
null if the allocation failed. If n_elements is zero and "chunks"
|
|
|
|
is null, it returns a chunk representing an array with zero elements
|
|
|
|
(which should be freed if not wanted).
|
|
|
|
|
|
|
|
Each element must be freed when it is no longer needed. This can be
|
|
|
|
done all at once using bulk_free.
|
|
|
|
|
|
|
|
independent_calloc simplifies and speeds up implementations of many
|
|
|
|
kinds of pools. It may also be useful when constructing large data
|
|
|
|
structures that initially have a fixed number of fixed-sized nodes,
|
|
|
|
but the number is not known at compile time, and some of the nodes
|
|
|
|
may later need to be freed. For example:
|
|
|
|
|
|
|
|
struct Node { int item; struct Node* next; };
|
|
|
|
|
|
|
|
struct Node* build_list() {
|
|
|
|
struct Node** pool;
|
|
|
|
int n = read_number_of_nodes_needed();
|
|
|
|
if (n <= 0) return 0;
|
|
|
|
pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
|
|
|
|
if (pool == 0) die();
|
|
|
|
// organize into a linked list...
|
|
|
|
struct Node* first = pool[0];
|
|
|
|
for (i = 0; i < n-1; ++i)
|
|
|
|
pool[i]->next = pool[i+1];
|
|
|
|
free(pool); // Can now free the array (or not, if it is needed later)
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**);
|
|
|
|
|
|
|
|
/*
|
|
|
|
independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
|
|
|
|
|
|
|
|
independent_comalloc allocates, all at once, a set of n_elements
|
|
|
|
chunks with sizes indicated in the "sizes" array. It returns
|
|
|
|
an array of pointers to these elements, each of which can be
|
|
|
|
independently freed, realloc'ed etc. The elements are guaranteed to
|
|
|
|
be adjacently allocated (this is not guaranteed to occur with
|
|
|
|
multiple callocs or mallocs), which may also improve cache locality
|
|
|
|
in some applications.
|
|
|
|
|
|
|
|
The "chunks" argument is optional (i.e., may be null). If it is null
|
|
|
|
the returned array is itself dynamically allocated and should also
|
|
|
|
be freed when it is no longer needed. Otherwise, the chunks array
|
|
|
|
must be of at least n_elements in length. It is filled in with the
|
|
|
|
pointers to the chunks.
|
|
|
|
|
|
|
|
In either case, independent_comalloc returns this pointer array, or
|
|
|
|
null if the allocation failed. If n_elements is zero and chunks is
|
|
|
|
null, it returns a chunk representing an array with zero elements
|
|
|
|
(which should be freed if not wanted).
|
|
|
|
|
|
|
|
Each element must be freed when it is no longer needed. This can be
|
|
|
|
done all at once using bulk_free.
|
|
|
|
|
|
|
|
independent_comallac differs from independent_calloc in that each
|
|
|
|
element may have a different size, and also that it does not
|
|
|
|
automatically clear elements.
|
|
|
|
|
|
|
|
independent_comalloc can be used to speed up allocation in cases
|
|
|
|
where several structs or objects must always be allocated at the
|
|
|
|
same time. For example:
|
|
|
|
|
|
|
|
struct Head { ... }
|
|
|
|
struct Foot { ... }
|
|
|
|
|
|
|
|
void send_message(char* msg) {
|
|
|
|
int msglen = strlen(msg);
|
|
|
|
size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
|
|
|
|
void* chunks[3];
|
|
|
|
if (independent_comalloc(3, sizes, chunks) == 0)
|
|
|
|
die();
|
|
|
|
struct Head* head = (struct Head*)(chunks[0]);
|
|
|
|
char* body = (char*)(chunks[1]);
|
|
|
|
struct Foot* foot = (struct Foot*)(chunks[2]);
|
|
|
|
// ...
|
|
|
|
}
|
|
|
|
|
|
|
|
In general though, independent_comalloc is worth using only for
|
|
|
|
larger values of n_elements. For small values, you probably won't
|
|
|
|
detect enough difference from series of malloc calls to bother.
|
|
|
|
|
|
|
|
Overuse of independent_comalloc can increase overall memory usage,
|
|
|
|
since it cannot reuse existing noncontiguous small chunks that
|
|
|
|
might be available for some of the elements.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**);
|
|
|
|
|
|
|
|
/*
|
|
|
|
bulk_free(void* array[], size_t n_elements)
|
|
|
|
Frees and clears (sets to null) each non-null pointer in the given
|
|
|
|
array. This is likely to be faster than freeing them one-by-one.
|
|
|
|
If footers are used, pointers that have been allocated in different
|
|
|
|
mspaces are not freed or cleared, and the count of all such pointers
|
|
|
|
is returned. For large arrays of pointers with poor locality, it
|
|
|
|
may be worthwhile to sort this array before calling bulk_free.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements);
|
|
|
|
|
|
|
|
/*
|
|
|
|
pvalloc(size_t n);
|
|
|
|
Equivalent to valloc(minimum-page-that-holds(n)), that is,
|
|
|
|
round up n to nearest pagesize.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* dlpvalloc(size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_trim(size_t pad);
|
|
|
|
|
|
|
|
If possible, gives memory back to the system (via negative arguments
|
|
|
|
to sbrk) if there is unused memory at the `high' end of the malloc
|
|
|
|
pool or in unused MMAP segments. You can call this after freeing
|
|
|
|
large blocks of memory to potentially reduce the system-level memory
|
|
|
|
requirements of a program. However, it cannot guarantee to reduce
|
|
|
|
memory. Under some allocation patterns, some large free blocks of
|
|
|
|
memory will be locked between two used chunks, so they cannot be
|
|
|
|
given back to the system.
|
|
|
|
|
|
|
|
The `pad' argument to malloc_trim represents the amount of free
|
|
|
|
trailing space to leave untrimmed. If this argument is zero, only
|
|
|
|
the minimum amount of memory to maintain internal data structures
|
|
|
|
will be left. Non-zero arguments can be supplied to maintain enough
|
|
|
|
trailing space to service future expected allocations without having
|
|
|
|
to re-obtain memory from the system.
|
|
|
|
|
|
|
|
Malloc_trim returns 1 if it actually released any memory, else 0.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT int dlmalloc_trim(size_t);
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_stats();
|
|
|
|
Prints on stderr the amount of space obtained from the system (both
|
|
|
|
via sbrk and mmap), the maximum amount (which may be more than
|
|
|
|
current if malloc_trim and/or munmap got called), and the current
|
|
|
|
number of bytes allocated via malloc (or realloc, etc) but not yet
|
|
|
|
freed. Note that this is the number of bytes allocated, not the
|
|
|
|
number requested. It will be larger than the number requested
|
|
|
|
because of alignment and bookkeeping overhead. Because it includes
|
|
|
|
alignment wastage as being in use, this figure may be greater than
|
|
|
|
zero even when no user-level chunks are allocated.
|
|
|
|
|
|
|
|
The reported current and maximum system memory can be inaccurate if
|
|
|
|
a program makes other calls to system memory allocation functions
|
|
|
|
(normally sbrk) outside of malloc.
|
|
|
|
|
|
|
|
malloc_stats prints only the most commonly interesting statistics.
|
|
|
|
More information can be obtained by calling mallinfo.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void dlmalloc_stats(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_usable_size(void* p);
|
|
|
|
|
|
|
|
Returns the number of bytes you can actually use in
|
|
|
|
an allocated chunk, which may be more than you requested (although
|
|
|
|
often not) due to alignment and minimum size constraints.
|
|
|
|
You can use this many bytes without worrying about
|
|
|
|
overwriting other allocated objects. This is not a particularly great
|
|
|
|
programming practice. malloc_usable_size can be more useful in
|
|
|
|
debugging and assertions, for example:
|
|
|
|
|
|
|
|
p = malloc(n);
|
|
|
|
assert(malloc_usable_size(p) >= 256);
|
|
|
|
*/
|
|
|
|
size_t dlmalloc_usable_size(void*);
|
|
|
|
|
|
|
|
#endif /* ONLY_MSPACES */
|
|
|
|
|
|
|
|
#if MSPACES
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace is an opaque type representing an independent
|
|
|
|
region of space that supports mspace_malloc, etc.
|
|
|
|
*/
|
|
|
|
typedef void* mspace;
|
|
|
|
|
|
|
|
/*
|
|
|
|
create_mspace creates and returns a new independent space with the
|
|
|
|
given initial capacity, or, if 0, the default granularity size. It
|
|
|
|
returns null if there is no system memory available to create the
|
|
|
|
space. If argument locked is non-zero, the space uses a separate
|
|
|
|
lock to control access. The capacity of the space will grow
|
|
|
|
dynamically as needed to service mspace_malloc requests. You can
|
|
|
|
control the sizes of incremental increases of this space by
|
|
|
|
compiling with a different DEFAULT_GRANULARITY or dynamically
|
|
|
|
setting with mallopt(M_GRANULARITY, value).
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked);
|
|
|
|
|
|
|
|
/*
|
|
|
|
destroy_mspace destroys the given space, and attempts to return all
|
|
|
|
of its memory back to the system, returning the total number of
|
|
|
|
bytes freed. After destruction, the results of access to all memory
|
|
|
|
used by the space become undefined.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t destroy_mspace(mspace msp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
create_mspace_with_base uses the memory supplied as the initial base
|
|
|
|
of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
|
|
|
|
space is used for bookkeeping, so the capacity must be at least this
|
|
|
|
large. (Otherwise 0 is returned.) When this initial space is
|
|
|
|
exhausted, additional memory will be obtained from the system.
|
|
|
|
Destroying this space will deallocate all additionally allocated
|
|
|
|
space (if possible) but not the initial base.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_track_large_chunks controls whether requests for large chunks
|
|
|
|
are allocated in their own untracked mmapped regions, separate from
|
|
|
|
others in this mspace. By default large chunks are not tracked,
|
|
|
|
which reduces fragmentation. However, such chunks are not
|
|
|
|
necessarily released to the system upon destroy_mspace. Enabling
|
|
|
|
tracking by setting to true may increase fragmentation, but avoids
|
|
|
|
leakage when relying on destroy_mspace to release all memory
|
|
|
|
allocated using this space. The function returns the previous
|
|
|
|
setting.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable);
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_malloc behaves as malloc, but operates within
|
|
|
|
the given space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_free behaves as free, but operates within
|
|
|
|
the given space.
|
|
|
|
|
|
|
|
If compiled with FOOTERS==1, mspace_free is not actually needed.
|
|
|
|
free may be called instead of mspace_free because freed chunks from
|
|
|
|
any space are handled by their originating spaces.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_realloc behaves as realloc, but operates within
|
|
|
|
the given space.
|
|
|
|
|
|
|
|
If compiled with FOOTERS==1, mspace_realloc is not actually
|
|
|
|
needed. realloc may be called instead of mspace_realloc because
|
|
|
|
realloced chunks from any space are handled by their originating
|
|
|
|
spaces.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_calloc behaves as calloc, but operates within
|
|
|
|
the given space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_memalign behaves as memalign, but operates within
|
|
|
|
the given space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_independent_calloc behaves as independent_calloc, but
|
|
|
|
operates within the given space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements,
|
|
|
|
size_t elem_size, void* chunks[]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_independent_comalloc behaves as independent_comalloc, but
|
|
|
|
operates within the given space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements,
|
|
|
|
size_t sizes[], void* chunks[]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_footprint() returns the number of bytes obtained from the
|
|
|
|
system for this space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t mspace_footprint(mspace msp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_max_footprint() returns the peak number of bytes obtained from the
|
|
|
|
system for this space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp);
|
|
|
|
|
|
|
|
|
|
|
|
#if !NO_MALLINFO
|
|
|
|
/*
|
|
|
|
mspace_mallinfo behaves as mallinfo, but reports properties of
|
|
|
|
the given space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp);
|
|
|
|
#endif /* NO_MALLINFO */
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_usable_size(void* p) behaves the same as malloc_usable_size;
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_malloc_stats behaves as malloc_stats, but reports
|
|
|
|
properties of the given space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
mspace_trim behaves as malloc_trim, but
|
|
|
|
operates within the given space.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad);
|
|
|
|
|
|
|
|
/*
|
|
|
|
An alias for mallopt.
|
|
|
|
*/
|
|
|
|
DLMALLOC_EXPORT int mspace_mallopt(int, int);
|
|
|
|
|
|
|
|
#endif /* MSPACES */
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
} /* end of extern "C" */
|
|
|
|
#endif /* __cplusplus */
|
|
|
|
|
|
|
|
/*
|
|
|
|
========================================================================
|
|
|
|
To make a fully customizable malloc.h header file, cut everything
|
|
|
|
above this line, put into file malloc.h, edit to suit, and #include it
|
|
|
|
on the next line, as well as in programs that use this malloc.
|
|
|
|
========================================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* #include "malloc.h" */
|
|
|
|
|
|
|
|
/*------------------------------ internal #includes ---------------------- */
|
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
|
|
|
|
#endif /* _MSC_VER */
|
|
|
|
#if !NO_MALLOC_STATS
|
|
|
|
#endif /* NO_MALLOC_STATS */
|
|
|
|
#ifndef LACKS_ERRNO_H
|
|
|
|
#include <errno.h> /* for MALLOC_FAILURE_ACTION */
|
|
|
|
#endif /* LACKS_ERRNO_H */
|
|
|
|
#ifdef DEBUG
|
|
|
|
#if ABORT_ON_ASSERT_FAILURE
|
|
|
|
#undef assert
|
|
|
|
#define assert(x) if(!(x)) ABORT
|
|
|
|
#else /* ABORT_ON_ASSERT_FAILURE */
|
|
|
|
#include <assert.h>
|
|
|
|
#endif /* ABORT_ON_ASSERT_FAILURE */
|
|
|
|
#else /* DEBUG */
|
|
|
|
#ifndef assert
|
|
|
|
#define assert(x)
|
|
|
|
#endif
|
|
|
|
#define DEBUG 0
|
|
|
|
#endif /* DEBUG */
|
|
|
|
#if !defined(WIN32) && !defined(LACKS_TIME_H)
|
|
|
|
#include <time.h> /* for magic initialization */
|
|
|
|
#endif /* WIN32 */
|
|
|
|
#ifndef LACKS_STDLIB_H
|
|
|
|
#include <stdlib.h> /* for abort() */
|
|
|
|
#endif /* LACKS_STDLIB_H */
|
|
|
|
#ifndef LACKS_STRING_H
|
|
|
|
#include <string.h> /* for memset etc */
|
|
|
|
#endif /* LACKS_STRING_H */
|
|
|
|
#if USE_BUILTIN_FFS
|
|
|
|
#ifndef LACKS_STRINGS_H
|
|
|
|
#include <strings.h> /* for ffs */
|
|
|
|
#endif /* LACKS_STRINGS_H */
|
|
|
|
#endif /* USE_BUILTIN_FFS */
|
|
|
|
#if HAVE_MMAP
|
|
|
|
#ifndef LACKS_SYS_MMAN_H
|
|
|
|
/* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
|
|
|
|
#if (defined(linux) && !defined(__USE_GNU))
|
|
|
|
#define __USE_GNU 1
|
|
|
|
#include <sys/mman.h> /* for mmap */
|
|
|
|
#undef __USE_GNU
|
|
|
|
#else
|
|
|
|
#include <sys/mman.h> /* for mmap */
|
|
|
|
#endif /* linux */
|
|
|
|
#endif /* LACKS_SYS_MMAN_H */
|
|
|
|
#ifndef LACKS_FCNTL_H
|
|
|
|
#include <fcntl.h>
|
|
|
|
#endif /* LACKS_FCNTL_H */
|
|
|
|
#endif /* HAVE_MMAP */
|
|
|
|
#ifndef LACKS_UNISTD_H
|
|
|
|
#include <unistd.h> /* for sbrk, sysconf */
|
|
|
|
#else /* LACKS_UNISTD_H */
|
|
|
|
#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__COSMOPOLITAN__)
|
|
|
|
extern void* sbrk(ptrdiff_t);
|
|
|
|
#endif /* FreeBSD etc */
|
|
|
|
#endif /* LACKS_UNISTD_H */
|
|
|
|
|
|
|
|
/* Declarations for locking */
|
|
|
|
#if USE_LOCKS
|
|
|
|
#ifndef WIN32
|
|
|
|
#if defined (__SVR4) && defined (__sun) /* solaris */
|
|
|
|
#include <thread.h>
|
|
|
|
#elif !defined(LACKS_SCHED_H)
|
|
|
|
#include <sched.h>
|
|
|
|
#endif /* solaris or LACKS_SCHED_H */
|
|
|
|
#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
|
|
|
|
#include <pthread.h>
|
|
|
|
#endif /* USE_RECURSIVE_LOCKS ... */
|
|
|
|
#elif defined(_MSC_VER)
|
|
|
|
#ifndef _M_AMD64
|
|
|
|
/* These are already defined on AMD64 builds */
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif /* __cplusplus */
|
|
|
|
LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
|
|
|
|
LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif /* __cplusplus */
|
|
|
|
#endif /* _M_AMD64 */
|
|
|
|
#pragma intrinsic (_InterlockedCompareExchange)
|
|
|
|
#pragma intrinsic (_InterlockedExchange)
|
|
|
|
#define interlockedcompareexchange _InterlockedCompareExchange
|
|
|
|
#define interlockedexchange _InterlockedExchange
|
|
|
|
#elif defined(WIN32) && defined(__GNUC__)
|
|
|
|
#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
|
|
|
|
#define interlockedexchange __sync_lock_test_and_set
|
|
|
|
#endif /* Win32 */
|
|
|
|
#else /* USE_LOCKS */
|
|
|
|
#endif /* USE_LOCKS */
|
|
|
|
|
|
|
|
#ifndef LOCK_AT_FORK
|
|
|
|
#define LOCK_AT_FORK 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Declarations for bit scanning on win32 */
|
|
|
|
#if defined(_MSC_VER) && _MSC_VER>=1300
|
|
|
|
#ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif /* __cplusplus */
|
|
|
|
unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
|
|
|
|
unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif /* __cplusplus */
|
|
|
|
|
|
|
|
#define BitScanForward _BitScanForward
|
|
|
|
#define BitScanReverse _BitScanReverse
|
|
|
|
#pragma intrinsic(_BitScanForward)
|
|
|
|
#pragma intrinsic(_BitScanReverse)
|
|
|
|
#endif /* BitScanForward */
|
|
|
|
#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
|
|
|
|
|
|
|
|
#ifndef WIN32
|
|
|
|
#ifndef malloc_getpagesize
|
|
|
|
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
|
|
|
|
# ifndef _SC_PAGE_SIZE
|
|
|
|
# define _SC_PAGE_SIZE _SC_PAGESIZE
|
|
|
|
# endif
|
|
|
|
# endif
|
|
|
|
# ifdef _SC_PAGE_SIZE
|
|
|
|
# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
|
|
|
|
# else
|
|
|
|
# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
|
|
|
|
extern size_t getpagesize();
|
|
|
|
# define malloc_getpagesize getpagesize()
|
|
|
|
# else
|
|
|
|
# ifdef WIN32 /* use supplied emulation of getpagesize */
|
|
|
|
# define malloc_getpagesize getpagesize()
|
|
|
|
# else
|
|
|
|
# ifndef LACKS_SYS_PARAM_H
|
|
|
|
# include <sys/param.h>
|
|
|
|
# endif
|
|
|
|
# ifdef EXEC_PAGESIZE
|
|
|
|
# define malloc_getpagesize EXEC_PAGESIZE
|
|
|
|
# else
|
|
|
|
# ifdef NBPG
|
|
|
|
# ifndef CLSIZE
|
|
|
|
# define malloc_getpagesize NBPG
|
|
|
|
# else
|
|
|
|
# define malloc_getpagesize (NBPG * CLSIZE)
|
|
|
|
# endif
|
|
|
|
# else
|
|
|
|
# ifdef NBPC
|
|
|
|
# define malloc_getpagesize NBPC
|
|
|
|
# else
|
|
|
|
# ifdef PAGESIZE
|
|
|
|
# define malloc_getpagesize PAGESIZE
|
|
|
|
# else /* just guess */
|
|
|
|
# define malloc_getpagesize ((size_t)4096U)
|
|
|
|
# endif
|
|
|
|
# endif
|
|
|
|
# endif
|
|
|
|
# endif
|
|
|
|
# endif
|
|
|
|
# endif
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ------------------- size_t and alignment properties -------------------- */
|
|
|
|
|
|
|
|
/* The byte and bit size of a size_t */
|
|
|
|
#define SIZE_T_SIZE (sizeof(size_t))
|
|
|
|
#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
|
|
|
|
|
|
|
|
/* Some constants coerced to size_t */
|
|
|
|
/* Annoying but necessary to avoid errors on some platforms */
|
|
|
|
#define SIZE_T_ZERO ((size_t)0)
|
|
|
|
#define SIZE_T_ONE ((size_t)1)
|
|
|
|
#define SIZE_T_TWO ((size_t)2)
|
|
|
|
#define SIZE_T_FOUR ((size_t)4)
|
|
|
|
#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
|
|
|
|
#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
|
|
|
|
#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
|
|
|
|
#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
|
|
|
|
|
|
|
|
/* The bit mask value corresponding to MALLOC_ALIGNMENT */
|
|
|
|
#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
|
|
|
|
|
|
|
|
/* True if address a has acceptable alignment */
|
|
|
|
#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
|
|
|
|
|
|
|
|
/* the number of bytes to offset an address to align it */
|
|
|
|
#define align_offset(A)\
|
|
|
|
((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
|
|
|
|
((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
|
|
|
|
|
|
|
|
/* -------------------------- MMAP preliminaries ------------------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
|
|
|
|
checks to fail so compiler optimizer can delete code rather than
|
|
|
|
using so many "#if"s.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/* MORECORE and MMAP must return MFAIL on failure */
|
|
|
|
#define MFAIL ((void*)(MAX_SIZE_T))
|
|
|
|
#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
|
|
|
|
|
|
|
|
#if HAVE_MMAP
|
|
|
|
|
|
|
|
#ifndef WIN32
|
|
|
|
#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
|
|
|
|
#define MMAP_PROT (PROT_READ|PROT_WRITE)
|
|
|
|
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
|
|
|
|
#define MAP_ANONYMOUS MAP_ANON
|
|
|
|
#endif /* MAP_ANON */
|
|
|
|
#ifdef MAP_ANONYMOUS
|
|
|
|
#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
|
|
|
|
#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
|
|
|
|
#else /* MAP_ANONYMOUS */
|
|
|
|
/*
|
|
|
|
Nearly all versions of mmap support MAP_ANONYMOUS, so the following
|
|
|
|
is unlikely to be needed, but is supplied just in case.
|
|
|
|
*/
|
|
|
|
#define MMAP_FLAGS (MAP_PRIVATE)
|
|
|
|
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
|
|
|
|
#define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
|
|
|
|
(dev_zero_fd = open("/dev/zero", O_RDWR), \
|
|
|
|
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
|
|
|
|
mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
|
|
|
|
#endif /* MAP_ANONYMOUS */
|
|
|
|
|
|
|
|
#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
|
|
|
|
|
|
|
|
#else /* WIN32 */
|
|
|
|
|
|
|
|
/* Win32 MMAP via VirtualAlloc */
|
|
|
|
FORCEINLINE void* win32mmap(size_t size) {
|
|
|
|
void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
|
|
|
|
return (ptr != 0)? ptr: MFAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
|
|
|
|
FORCEINLINE void* win32direct_mmap(size_t size) {
|
|
|
|
void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
|
|
|
|
PAGE_READWRITE);
|
|
|
|
return (ptr != 0)? ptr: MFAIL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function supports releasing coalesed segments */
|
|
|
|
FORCEINLINE int win32munmap(void* ptr, size_t size) {
|
|
|
|
MEMORY_BASIC_INFORMATION minfo;
|
|
|
|
char* cptr = (char*)ptr;
|
|
|
|
while (size) {
|
|
|
|
if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
|
|
|
|
return -1;
|
|
|
|
if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
|
|
|
|
minfo.State != MEM_COMMIT || minfo.RegionSize > size)
|
|
|
|
return -1;
|
|
|
|
if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
|
|
|
|
return -1;
|
|
|
|
cptr += minfo.RegionSize;
|
|
|
|
size -= minfo.RegionSize;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
#define MMAP_DEFAULT(s) win32mmap(s)
|
|
|
|
#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
|
|
|
|
#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
|
|
|
|
#endif /* WIN32 */
|
|
|
|
#endif /* HAVE_MMAP */
|
|
|
|
|
|
|
|
#if HAVE_MREMAP
|
|
|
|
#ifndef WIN32
|
|
|
|
#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
|
|
|
|
#endif /* WIN32 */
|
|
|
|
#endif /* HAVE_MREMAP */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Define CALL_MORECORE
|
|
|
|
*/
|
|
|
|
#if HAVE_MORECORE
|
|
|
|
#ifdef MORECORE
|
|
|
|
#define CALL_MORECORE(S) MORECORE(S)
|
|
|
|
#else /* MORECORE */
|
|
|
|
#define CALL_MORECORE(S) MORECORE_DEFAULT(S)
|
|
|
|
#endif /* MORECORE */
|
|
|
|
#else /* HAVE_MORECORE */
|
|
|
|
#define CALL_MORECORE(S) MFAIL
|
|
|
|
#endif /* HAVE_MORECORE */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
|
|
|
|
*/
|
|
|
|
#if HAVE_MMAP
|
|
|
|
#define USE_MMAP_BIT (SIZE_T_ONE)
|
|
|
|
|
|
|
|
#ifdef MMAP
|
|
|
|
#define CALL_MMAP(s) MMAP(s)
|
|
|
|
#else /* MMAP */
|
|
|
|
#define CALL_MMAP(s) MMAP_DEFAULT(s)
|
|
|
|
#endif /* MMAP */
|
|
|
|
#ifdef MUNMAP
|
|
|
|
#define CALL_MUNMAP(a, s) MUNMAP((a), (s))
|
|
|
|
#else /* MUNMAP */
|
|
|
|
#define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
|
|
|
|
#endif /* MUNMAP */
|
|
|
|
#ifdef DIRECT_MMAP
|
|
|
|
#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
|
|
|
|
#else /* DIRECT_MMAP */
|
|
|
|
#define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
|
|
|
|
#endif /* DIRECT_MMAP */
|
|
|
|
#else /* HAVE_MMAP */
|
|
|
|
#define USE_MMAP_BIT (SIZE_T_ZERO)
|
|
|
|
|
|
|
|
#define MMAP(s) MFAIL
|
|
|
|
#define MUNMAP(a, s) (-1)
|
|
|
|
#define DIRECT_MMAP(s) MFAIL
|
|
|
|
#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
|
|
|
|
#define CALL_MMAP(s) MMAP(s)
|
|
|
|
#define CALL_MUNMAP(a, s) MUNMAP((a), (s))
|
|
|
|
#endif /* HAVE_MMAP */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Define CALL_MREMAP
|
|
|
|
*/
|
|
|
|
#if HAVE_MMAP && HAVE_MREMAP
|
|
|
|
#ifdef MREMAP
|
|
|
|
#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
|
|
|
|
#else /* MREMAP */
|
|
|
|
#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
|
|
|
|
#endif /* MREMAP */
|
|
|
|
#else /* HAVE_MMAP && HAVE_MREMAP */
|
|
|
|
#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
|
|
|
|
#endif /* HAVE_MMAP && HAVE_MREMAP */
|
|
|
|
|
|
|
|
/* mstate bit set if continguous morecore disabled or failed */
|
|
|
|
#define USE_NONCONTIGUOUS_BIT (4U)
|
|
|
|
|
|
|
|
/* segment bit set in create_mspace_with_base */
|
|
|
|
#define EXTERN_BIT (8U)
|
|
|
|
|
|
|
|
|
|
|
|
/* --------------------------- Lock preliminaries ------------------------ */
|
|
|
|
|
|
|
|
/*
|
|
|
|
When locks are defined, there is one global lock, plus
|
|
|
|
one per-mspace lock.
|
|
|
|
|
|
|
|
The global lock_ensures that mparams.magic and other unique
|
|
|
|
mparams values are initialized only once. It also protects
|
|
|
|
sequences of calls to MORECORE. In many cases sys_alloc requires
|
|
|
|
two calls, that should not be interleaved with calls by other
|
|
|
|
threads. This does not protect against direct calls to MORECORE
|
|
|
|
by other threads not using this lock, so there is still code to
|
|
|
|
cope the best we can on interference.
|
|
|
|
|
|
|
|
Per-mspace locks surround calls to malloc, free, etc.
|
|
|
|
By default, locks are simple non-reentrant mutexes.
|
|
|
|
|
|
|
|
Because lock-protected regions generally have bounded times, it is
|
|
|
|
OK to use the supplied simple spinlocks. Spinlocks are likely to
|
|
|
|
improve performance for lightly contended applications, but worsen
|
|
|
|
performance under heavy contention.
|
|
|
|
|
|
|
|
If USE_LOCKS is > 1, the definitions of lock routines here are
|
|
|
|
bypassed, in which case you will need to define the type MLOCK_T,
|
|
|
|
and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
|
|
|
|
and TRY_LOCK. You must also declare a
|
|
|
|
static MLOCK_T malloc_global_mutex = { initialization values };.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if !USE_LOCKS
|
|
|
|
#define USE_LOCK_BIT (0U)
|
|
|
|
#define INITIAL_LOCK(l) (0)
|
|
|
|
#define DESTROY_LOCK(l) (0)
|
|
|
|
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
|
|
|
|
#define RELEASE_MALLOC_GLOBAL_LOCK()
|
|
|
|
|
|
|
|
#else
|
|
|
|
#if USE_LOCKS > 1
|
|
|
|
/* ----------------------- User-defined locks ------------------------ */
|
|
|
|
/* Define your own lock implementation here */
|
|
|
|
/* #define INITIAL_LOCK(lk) ... */
|
|
|
|
/* #define DESTROY_LOCK(lk) ... */
|
|
|
|
/* #define ACQUIRE_LOCK(lk) ... */
|
|
|
|
/* #define RELEASE_LOCK(lk) ... */
|
|
|
|
/* #define TRY_LOCK(lk) ... */
|
|
|
|
/* static MLOCK_T malloc_global_mutex = ... */
|
|
|
|
|
|
|
|
#elif USE_SPIN_LOCKS
|
|
|
|
|
|
|
|
/* First, define CAS_LOCK and CLEAR_LOCK on ints */
|
|
|
|
/* Note CAS_LOCK defined to return 0 on success */
|
|
|
|
|
|
|
|
#if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
|
|
|
|
#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
|
|
|
|
#define CLEAR_LOCK(sl) __sync_lock_release(sl)
|
|
|
|
|
|
|
|
#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
|
|
|
|
/* Custom spin locks for older gcc on x86 */
|
|
|
|
FORCEINLINE int x86_cas_lock(int *sl) {
|
|
|
|
int ret;
|
|
|
|
int val = 1;
|
|
|
|
int cmp = 0;
|
|
|
|
__asm__ __volatile__ ("lock; cmpxchgl %1, %2"
|
|
|
|
: "=a" (ret)
|
|
|
|
: "r" (val), "m" (*(sl)), "0"(cmp)
|
|
|
|
: "memory", "cc");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE void x86_clear_lock(int* sl) {
|
|
|
|
assert(*sl != 0);
|
|
|
|
int prev = 0;
|
|
|
|
int ret;
|
|
|
|
__asm__ __volatile__ ("lock; xchgl %0, %1"
|
|
|
|
: "=r" (ret)
|
|
|
|
: "m" (*(sl)), "0"(prev)
|
|
|
|
: "memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
#define CAS_LOCK(sl) x86_cas_lock(sl)
|
|
|
|
#define CLEAR_LOCK(sl) x86_clear_lock(sl)
|
|
|
|
|
|
|
|
#else /* Win32 MSC */
|
|
|
|
#define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
|
|
|
|
#define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
|
|
|
|
|
|
|
|
#endif /* ... gcc spins locks ... */
|
|
|
|
|
|
|
|
#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
|
|
|
|
/* Plain spin locks use single word (embedded in malloc_states) */
|
|
|
|
static dontinline int spin_acquire_lock(int *sl) {
|
|
|
|
while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
|
|
|
|
__builtin_ia32_pause();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MLOCK_T int
|
|
|
|
#define TRY_LOCK(sl) !CAS_LOCK(sl)
|
|
|
|
#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
|
|
|
|
#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
|
|
|
|
#define INITIAL_LOCK(sl) (*sl = 0)
|
|
|
|
#define DESTROY_LOCK(sl) (0)
|
|
|
|
static MLOCK_T malloc_global_mutex = 0;
|
|
|
|
|
|
|
|
#else /* USE_RECURSIVE_LOCKS */
|
|
|
|
/* types for lock owners */
|
|
|
|
#ifdef WIN32
|
|
|
|
#define THREAD_ID_T DWORD
|
|
|
|
#define CURRENT_THREAD GetCurrentThreadId()
|
|
|
|
#define EQ_OWNER(X,Y) ((X) == (Y))
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
Note: the following assume that pthread_t is a type that can be
|
|
|
|
initialized to (casted) zero. If this is not the case, you will need to
|
|
|
|
somehow redefine these or not use spin locks.
|
|
|
|
*/
|
|
|
|
#define THREAD_ID_T pthread_t
|
|
|
|
#define CURRENT_THREAD pthread_self()
|
|
|
|
#define EQ_OWNER(X,Y) pthread_equal(X, Y)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct malloc_recursive_lock {
|
|
|
|
int sl;
|
|
|
|
unsigned int c;
|
|
|
|
THREAD_ID_T threadid;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define MLOCK_T struct malloc_recursive_lock
|
|
|
|
static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
|
|
|
|
|
|
|
|
FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
|
|
|
|
assert(lk->sl != 0);
|
|
|
|
if (--lk->c == 0) {
|
|
|
|
CLEAR_LOCK(&lk->sl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
|
|
|
|
THREAD_ID_T mythreadid = CURRENT_THREAD;
|
|
|
|
for (;;) {
|
|
|
|
if (*((volatile int *)(&lk->sl)) == 0) {
|
|
|
|
if (!CAS_LOCK(&lk->sl)) {
|
|
|
|
lk->threadid = mythreadid;
|
|
|
|
lk->c = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (EQ_OWNER(lk->threadid, mythreadid)) {
|
|
|
|
++lk->c;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__builtin_ia32_pause();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
|
|
|
|
THREAD_ID_T mythreadid = CURRENT_THREAD;
|
|
|
|
if (*((volatile int *)(&lk->sl)) == 0) {
|
|
|
|
if (!CAS_LOCK(&lk->sl)) {
|
|
|
|
lk->threadid = mythreadid;
|
|
|
|
lk->c = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (EQ_OWNER(lk->threadid, mythreadid)) {
|
|
|
|
++lk->c;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define RELEASE_LOCK(lk) recursive_release_lock(lk)
|
|
|
|
#define TRY_LOCK(lk) recursive_try_lock(lk)
|
|
|
|
#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
|
|
|
|
#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
|
|
|
|
#define DESTROY_LOCK(lk) (0)
|
|
|
|
#endif /* USE_RECURSIVE_LOCKS */
|
|
|
|
|
|
|
|
#elif defined(WIN32) /* Win32 critical sections */
|
|
|
|
#define MLOCK_T CRITICAL_SECTION
|
|
|
|
#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
|
|
|
|
#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
|
|
|
|
#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
|
|
|
|
#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
|
|
|
|
#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
|
|
|
|
#define NEED_GLOBAL_LOCK_INIT
|
|
|
|
|
|
|
|
static MLOCK_T malloc_global_mutex;
|
|
|
|
static volatile LONG malloc_global_mutex_status;
|
|
|
|
|
|
|
|
/* Use spin loop to initialize global lock */
|
|
|
|
static void init_malloc_global_mutex() {
|
|
|
|
for (;;) {
|
|
|
|
long stat = malloc_global_mutex_status;
|
|
|
|
if (stat > 0)
|
|
|
|
return;
|
|
|
|
/* transition to < 0 while initializing, then to > 0) */
|
|
|
|
if (stat == 0 &&
|
|
|
|
interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
|
|
|
|
InitializeCriticalSection(&malloc_global_mutex);
|
|
|
|
interlockedexchange(&malloc_global_mutex_status, (LONG)1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SleepEx(0, FALSE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* pthreads-based locks */
|
|
|
|
#define MLOCK_T pthread_mutex_t
|
|
|
|
#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
|
|
|
|
#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
|
|
|
|
#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
|
|
|
|
#define INITIAL_LOCK(lk) pthread_init_lock(lk)
|
|
|
|
#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
|
|
|
|
|
|
|
|
#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
|
|
|
|
/* Cope with old-style linux recursive lock initialization by adding */
|
|
|
|
/* skipped internal declaration from pthread.h */
|
|
|
|
extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
|
|
|
|
int __kind));
|
|
|
|
#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
|
|
|
|
#define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
|
|
|
|
#endif /* USE_RECURSIVE_LOCKS ... */
|
|
|
|
|
|
|
|
static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
|
|
|
|
static int pthread_init_lock (MLOCK_T *lk) {
|
|
|
|
pthread_mutexattr_t attr;
|
|
|
|
if (pthread_mutexattr_init(&attr)) return 1;
|
|
|
|
#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
|
|
|
|
if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
|
|
|
|
#endif
|
|
|
|
if (pthread_mutex_init(lk, &attr)) return 1;
|
|
|
|
if (pthread_mutexattr_destroy(&attr)) return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* ... lock types ... */
|
|
|
|
|
|
|
|
/* Common code for all lock types */
|
|
|
|
#define USE_LOCK_BIT (2U)
|
|
|
|
|
|
|
|
#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
|
|
|
|
#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef RELEASE_MALLOC_GLOBAL_LOCK
|
|
|
|
#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* USE_LOCKS */
|
|
|
|
|
|
|
|
struct malloc_chunk {
|
|
|
|
size_t prev_foot; /* Size of previous chunk (if free). */
|
|
|
|
size_t head; /* Size and inuse bits. */
|
|
|
|
struct malloc_chunk* fd; /* double links -- used only if free. */
|
|
|
|
struct malloc_chunk* bk;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct malloc_chunk mchunk;
|
|
|
|
typedef struct malloc_chunk* mchunkptr;
|
|
|
|
typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
|
|
|
|
typedef unsigned int bindex_t; /* Described below */
|
|
|
|
typedef unsigned int binmap_t; /* Described below */
|
|
|
|
typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|
|
|
|
|
|
|
/* ------------------- Chunks sizes and alignments ----------------------- */
|
|
|
|
|
|
|
|
#define MCHUNK_SIZE (sizeof(mchunk))
|
|
|
|
|
|
|
|
#if FOOTERS
|
|
|
|
#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
|
|
|
|
#else /* FOOTERS */
|
|
|
|
#define CHUNK_OVERHEAD (SIZE_T_SIZE)
|
|
|
|
#endif /* FOOTERS */
|
|
|
|
|
|
|
|
/* MMapped chunks need a second word of overhead ... */
|
|
|
|
#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
|
|
|
|
/* ... and additional padding for fake next-chunk at foot */
|
|
|
|
#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
|
|
|
|
|
|
|
|
/* The smallest size we can malloc is an aligned minimal chunk */
|
|
|
|
#define MIN_CHUNK_SIZE\
|
|
|
|
((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
|
|
|
|
|
|
|
|
/* conversion from malloc headers to user pointers, and back */
|
|
|
|
#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
|
|
|
|
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
|
|
|
|
/* chunk associated with aligned address A */
|
|
|
|
#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
|
|
|
|
|
|
|
|
/* Bounds on request (not chunk) sizes. */
|
|
|
|
#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
|
|
|
|
#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
|
|
|
|
|
|
|
|
/* pad request bytes into a usable size */
|
|
|
|
#define pad_request(req) \
|
|
|
|
(((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
|
|
|
|
|
|
|
|
/* pad request, checking for minimum (but not maximum) */
|
|
|
|
#define request2size(req) \
|
|
|
|
(((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
|
|
|
|
|
|
|
|
|
|
|
|
/* ------------------ Operations on head and foot fields ----------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
The head field of a chunk is or'ed with PINUSE_BIT when previous
|
|
|
|
adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
|
|
|
|
use, unless mmapped, in which case both bits are cleared.
|
|
|
|
|
|
|
|
FLAG4_BIT is not used by this malloc, but might be useful in extensions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define PINUSE_BIT (SIZE_T_ONE)
|
|
|
|
#define CINUSE_BIT (SIZE_T_TWO)
|
|
|
|
#define FLAG4_BIT (SIZE_T_FOUR)
|
|
|
|
#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
|
|
|
|
#define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
|
|
|
|
|
|
|
|
/* Head value for fenceposts */
|
|
|
|
#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
|
|
|
|
|
|
|
|
/* extraction of fields from head words */
|
|
|
|
#define cinuse(p) ((p)->head & CINUSE_BIT)
|
|
|
|
#define pinuse(p) ((p)->head & PINUSE_BIT)
|
|
|
|
#define flag4inuse(p) ((p)->head & FLAG4_BIT)
|
|
|
|
#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
|
|
|
|
#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
|
|
|
|
|
|
|
|
#define chunksize(p) ((p)->head & ~(FLAG_BITS))
|
|
|
|
|
|
|
|
#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
|
|
|
|
#define set_flag4(p) ((p)->head |= FLAG4_BIT)
|
|
|
|
#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
|
|
|
|
|
|
|
|
/* Treat space at ptr +/- offset as a chunk */
|
|
|
|
#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
|
|
|
|
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
|
|
|
|
|
|
|
|
/* Ptr to next or previous physical malloc_chunk. */
|
|
|
|
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
|
|
|
|
#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
|
|
|
|
|
|
|
|
/* extract next chunk's pinuse bit */
|
|
|
|
#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
|
|
|
|
|
|
|
|
/* Get/set size at footer */
|
|
|
|
#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
|
|
|
|
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
|
|
|
|
|
|
|
|
/* Set size, pinuse bit, and foot */
|
|
|
|
#define set_size_and_pinuse_of_free_chunk(p, s)\
|
|
|
|
((p)->head = (s|PINUSE_BIT), set_foot(p, s))
|
|
|
|
|
|
|
|
/* Set size, pinuse bit, foot, and clear next pinuse */
|
|
|
|
#define set_free_with_pinuse(p, s, n)\
|
|
|
|
(clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
|
|
|
|
|
|
|
|
/* Get the internal overhead associated with chunk p */
|
|
|
|
#define overhead_for(p)\
|
|
|
|
(is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
|
|
|
|
|
|
|
|
/* Return true if malloced space is not necessarily cleared */
|
|
|
|
#if MMAP_CLEARS
|
|
|
|
#define calloc_must_clear(p) (!is_mmapped(p))
|
|
|
|
#else /* MMAP_CLEARS */
|
|
|
|
#define calloc_must_clear(p) (1)
|
|
|
|
#endif /* MMAP_CLEARS */
|
|
|
|
|
|
|
|
|
|
|
|
struct malloc_tree_chunk {
|
|
|
|
/* The first four fields must be compatible with malloc_chunk */
|
|
|
|
size_t prev_foot;
|
|
|
|
size_t head;
|
|
|
|
struct malloc_tree_chunk* fd;
|
|
|
|
struct malloc_tree_chunk* bk;
|
|
|
|
|
|
|
|
struct malloc_tree_chunk* child[2];
|
|
|
|
struct malloc_tree_chunk* parent;
|
|
|
|
bindex_t index;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct malloc_tree_chunk tchunk;
|
|
|
|
typedef struct malloc_tree_chunk* tchunkptr;
|
|
|
|
typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
|
|
|
|
|
|
|
|
/* A little helper macro for trees */
|
|
|
|
#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
|
|
|
|
|
|
|
|
struct malloc_segment {
|
|
|
|
char* base; /* base address */
|
|
|
|
size_t size; /* allocated size */
|
|
|
|
struct malloc_segment* next; /* ptr to next segment */
|
|
|
|
flag_t sflags; /* mmap and extern flag */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
|
|
|
|
#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
|
|
|
|
|
|
|
|
typedef struct malloc_segment msegment;
|
|
|
|
typedef struct malloc_segment* msegmentptr;
|
|
|
|
|
|
|
|
/* Bin types, widths and sizes */
|
|
|
|
#define NSMALLBINS (32U)
|
|
|
|
#define NTREEBINS (32U)
|
|
|
|
#define SMALLBIN_SHIFT (3U)
|
|
|
|
#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
|
|
|
|
#define TREEBIN_SHIFT (8U)
|
|
|
|
#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
|
|
|
|
#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
|
|
|
|
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
|
|
|
|
|
|
|
|
struct malloc_state {
|
|
|
|
binmap_t smallmap;
|
|
|
|
binmap_t treemap;
|
|
|
|
size_t dvsize;
|
|
|
|
size_t topsize;
|
|
|
|
char* least_addr;
|
|
|
|
mchunkptr dv;
|
|
|
|
mchunkptr top;
|
|
|
|
size_t trim_check;
|
|
|
|
size_t release_checks;
|
|
|
|
size_t magic;
|
|
|
|
mchunkptr smallbins[(NSMALLBINS+1)*2];
|
|
|
|
tbinptr treebins[NTREEBINS];
|
|
|
|
size_t footprint;
|
|
|
|
size_t max_footprint;
|
|
|
|
size_t footprint_limit; /* zero means no limit */
|
|
|
|
flag_t mflags;
|
|
|
|
#if USE_LOCKS
|
|
|
|
MLOCK_T mutex; /* locate lock among fields that rarely change */
|
|
|
|
#endif /* USE_LOCKS */
|
|
|
|
msegment seg;
|
|
|
|
void* extp; /* Unused but available for extensions */
|
|
|
|
size_t exts;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct malloc_state* mstate;
|
|
|
|
|
|
|
|
/* ------------- Global malloc_state and malloc_params ------------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
malloc_params holds global properties, including those that can be
|
|
|
|
dynamically set using mallopt. There is a single instance, mparams,
|
|
|
|
initialized in init_mparams. Note that the non-zeroness of "magic"
|
|
|
|
also serves as an initialization flag.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct malloc_params {
|
|
|
|
size_t magic;
|
|
|
|
size_t page_size;
|
|
|
|
size_t granularity;
|
|
|
|
size_t mmap_threshold;
|
|
|
|
size_t trim_threshold;
|
|
|
|
flag_t default_mflags;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct malloc_params mparams;
|
|
|
|
|
|
|
|
/* Ensure mparams initialized */
|
|
|
|
#define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
|
|
|
|
|
|
|
|
#if !ONLY_MSPACES
|
|
|
|
|
|
|
|
/* The global malloc_state used for all non-"mspace" calls */
|
|
|
|
static struct malloc_state _gm_;
|
|
|
|
#define gm (&_gm_)
|
|
|
|
#define is_global(M) ((M) == &_gm_)
|
|
|
|
|
|
|
|
#endif /* !ONLY_MSPACES */
|
|
|
|
|
|
|
|
#define is_initialized(M) ((M)->top != 0)
|
|
|
|
|
|
|
|
/* -------------------------- system alloc setup ------------------------- */
|
|
|
|
|
|
|
|
/* Operations on mflags */
|
|
|
|
|
|
|
|
#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
|
|
|
|
#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
|
|
|
|
#if USE_LOCKS
|
|
|
|
#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
|
|
|
|
#else
|
|
|
|
#define disable_lock(M)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
|
|
|
|
#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
|
|
|
|
#if HAVE_MMAP
|
|
|
|
#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
|
|
|
|
#else
|
|
|
|
#define disable_mmap(M)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
|
|
|
|
#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
|
|
|
|
|
|
|
|
#define set_lock(M,L)\
|
|
|
|
((M)->mflags = (L)?\
|
|
|
|
((M)->mflags | USE_LOCK_BIT) :\
|
|
|
|
((M)->mflags & ~USE_LOCK_BIT))
|
|
|
|
|
|
|
|
/* page-align a size */
|
|
|
|
#define page_align(S)\
|
|
|
|
(((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
|
|
|
|
|
|
|
|
/* granularity-align a size */
|
|
|
|
#define granularity_align(S)\
|
|
|
|
(((S) + (mparams.granularity - SIZE_T_ONE))\
|
|
|
|
& ~(mparams.granularity - SIZE_T_ONE))
|
|
|
|
|
|
|
|
|
|
|
|
/* For mmap, use granularity alignment on windows, else page-align */
|
|
|
|
#if defined(WIN32) || defined(__COSMOPOLITAN__)
|
|
|
|
#define mmap_align(S) granularity_align(S)
|
|
|
|
#else
|
|
|
|
#define mmap_align(S) page_align(S)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* For sys_alloc, enough padding to ensure can malloc request on success */
|
|
|
|
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
|
|
|
|
|
|
|
|
#define is_page_aligned(S)\
|
|
|
|
(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
|
|
|
|
#define is_granularity_aligned(S)\
|
|
|
|
(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
|
|
|
|
|
|
|
|
/* True if segment S holds address A */
|
|
|
|
#define segment_holds(S, A)\
|
|
|
|
((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
|
|
|
|
|
|
|
|
/* Return segment holding given address */
|
|
|
|
static msegmentptr segment_holding(mstate m, char* addr) {
|
|
|
|
msegmentptr sp = &m->seg;
|
|
|
|
for (;;) {
|
|
|
|
if (addr >= sp->base && addr < sp->base + sp->size)
|
|
|
|
return sp;
|
|
|
|
if ((sp = sp->next) == 0)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return true if segment contains a segment link */
|
|
|
|
static int has_segment_link(mstate m, msegmentptr ss) {
|
|
|
|
msegmentptr sp = &m->seg;
|
|
|
|
for (;;) {
|
|
|
|
if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
|
|
|
|
return 1;
|
|
|
|
if ((sp = sp->next) == 0)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef MORECORE_CANNOT_TRIM
|
|
|
|
#define should_trim(M,s) ((s) > (M)->trim_check)
|
|
|
|
#else /* MORECORE_CANNOT_TRIM */
|
|
|
|
#define should_trim(M,s) (0)
|
|
|
|
#endif /* MORECORE_CANNOT_TRIM */
|
|
|
|
|
|
|
|
/*
|
|
|
|
TOP_FOOT_SIZE is padding at the end of a segment, including space
|
|
|
|
that may be needed to place segment records and fenceposts when new
|
|
|
|
noncontiguous segments are added.
|
|
|
|
*/
|
|
|
|
#define TOP_FOOT_SIZE\
|
|
|
|
(align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
|
|
|
|
|
|
|
|
|
|
|
|
/* ------------------------------- Hooks -------------------------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
PREACTION should be defined to return 0 on success, and nonzero on
|
|
|
|
failure. If you are not using locking, you can redefine these to do
|
|
|
|
anything you like.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if USE_LOCKS
|
|
|
|
#define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
|
|
|
|
#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
|
|
|
|
#else /* USE_LOCKS */
|
|
|
|
|
|
|
|
#ifndef PREACTION
|
|
|
|
#define PREACTION(M) (0)
|
|
|
|
#endif /* PREACTION */
|
|
|
|
|
|
|
|
#ifndef POSTACTION
|
|
|
|
#define POSTACTION(M)
|
|
|
|
#endif /* POSTACTION */
|
|
|
|
|
|
|
|
#endif /* USE_LOCKS */
|
|
|
|
|
|
|
|
/*
|
|
|
|
CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
|
|
|
|
USAGE_ERROR_ACTION is triggered on detected bad frees and
|
|
|
|
reallocs. The argument p is an address that might have triggered the
|
|
|
|
fault. It is ignored by the two predefined actions, but might be
|
|
|
|
useful in custom actions that try to help diagnose errors.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if PROCEED_ON_ERROR
|
|
|
|
|
|
|
|
/* A count of the number of corruption errors causing resets */
|
|
|
|
int malloc_corruption_error_count;
|
|
|
|
|
|
|
|
/* default corruption action */
|
|
|
|
static void reset_on_error(mstate m);
|
|
|
|
|
|
|
|
#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
|
|
|
|
#define USAGE_ERROR_ACTION(m, p)
|
|
|
|
|
|
|
|
#else /* PROCEED_ON_ERROR */
|
|
|
|
|
|
|
|
#ifndef CORRUPTION_ERROR_ACTION
|
|
|
|
#define CORRUPTION_ERROR_ACTION(m) ABORT
|
|
|
|
#endif /* CORRUPTION_ERROR_ACTION */
|
|
|
|
|
|
|
|
#ifndef USAGE_ERROR_ACTION
|
|
|
|
#define USAGE_ERROR_ACTION(m,p) ABORT
|
|
|
|
#endif /* USAGE_ERROR_ACTION */
|
|
|
|
|
|
|
|
#endif /* PROCEED_ON_ERROR */
|
|
|
|
|
|
|
|
|
|
|
|
/* -------------------------- Debugging setup ---------------------------- */
|
|
|
|
|
|
|
|
#if ! DEBUG
|
|
|
|
|
|
|
|
#define check_free_chunk(M,P)
|
|
|
|
#define check_inuse_chunk(M,P)
|
|
|
|
#define check_malloced_chunk(M,P,N)
|
|
|
|
#define check_mmapped_chunk(M,P)
|
|
|
|
#define check_malloc_state(M)
|
|
|
|
#define check_top_chunk(M,P)
|
|
|
|
|
|
|
|
#else /* DEBUG */
|
|
|
|
#define check_free_chunk(M,P) do_check_free_chunk(M,P)
|
|
|
|
#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
|
|
|
|
#define check_top_chunk(M,P) do_check_top_chunk(M,P)
|
|
|
|
#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
|
|
|
|
#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
|
|
|
|
#define check_malloc_state(M) do_check_malloc_state(M)
|
|
|
|
|
|
|
|
static void do_check_any_chunk(mstate m, mchunkptr p);
|
|
|
|
static void do_check_top_chunk(mstate m, mchunkptr p);
|
|
|
|
static void do_check_mmapped_chunk(mstate m, mchunkptr p);
|
|
|
|
static void do_check_inuse_chunk(mstate m, mchunkptr p);
|
|
|
|
static void do_check_free_chunk(mstate m, mchunkptr p);
|
|
|
|
static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
|
|
|
|
static void do_check_tree(mstate m, tchunkptr t);
|
|
|
|
static void do_check_treebin(mstate m, bindex_t i);
|
|
|
|
static void do_check_smallbin(mstate m, bindex_t i);
|
|
|
|
static void do_check_malloc_state(mstate m);
|
|
|
|
static int bin_find(mstate m, mchunkptr x);
|
|
|
|
static size_t traverse_and_check(mstate m);
|
|
|
|
#endif /* DEBUG */
|
|
|
|
|
|
|
|
/* ---------------------------- Indexing Bins ---------------------------- */
|
|
|
|
|
|
|
|
#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
|
|
|
|
#define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
|
|
|
|
#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
|
|
|
|
#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
|
|
|
|
|
|
|
|
/* addressing by index. See above about smallbin repositioning */
|
|
|
|
#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
|
|
|
|
#define treebin_at(M,i) (&((M)->treebins[i]))
|
|
|
|
|
|
|
|
/* assign tree index for size S to variable I. Use x86 asm if possible */
|
|
|
|
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
|
|
|
|
#define compute_tree_index(S, I)\
|
|
|
|
{\
|
|
|
|
unsigned int X = S >> TREEBIN_SHIFT;\
|
|
|
|
if (X == 0)\
|
|
|
|
I = 0;\
|
|
|
|
else if (X > 0xFFFF)\
|
|
|
|
I = NTREEBINS-1;\
|
|
|
|
else {\
|
|
|
|
unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
|
|
|
|
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
|
|
|
|
}\
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined (__INTEL_COMPILER)
|
|
|
|
#define compute_tree_index(S, I)\
|
|
|
|
{\
|
|
|
|
size_t X = S >> TREEBIN_SHIFT;\
|
|
|
|
if (X == 0)\
|
|
|
|
I = 0;\
|
|
|
|
else if (X > 0xFFFF)\
|
|
|
|
I = NTREEBINS-1;\
|
|
|
|
else {\
|
|
|
|
unsigned int K = _bit_scan_reverse (X); \
|
|
|
|
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
|
|
|
|
}\
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(_MSC_VER) && _MSC_VER>=1300
|
|
|
|
#define compute_tree_index(S, I)\
|
|
|
|
{\
|
|
|
|
size_t X = S >> TREEBIN_SHIFT;\
|
|
|
|
if (X == 0)\
|
|
|
|
I = 0;\
|
|
|
|
else if (X > 0xFFFF)\
|
|
|
|
I = NTREEBINS-1;\
|
|
|
|
else {\
|
|
|
|
unsigned int K;\
|
|
|
|
_BitScanReverse((DWORD *) &K, (DWORD) X);\
|
|
|
|
I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
|
|
|
|
}\
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* GNUC */
|
|
|
|
#define compute_tree_index(S, I)\
|
|
|
|
{\
|
|
|
|
size_t X = S >> TREEBIN_SHIFT;\
|
|
|
|
if (X == 0)\
|
|
|
|
I = 0;\
|
|
|
|
else if (X > 0xFFFF)\
|
|
|
|
I = NTREEBINS-1;\
|
|
|
|
else {\
|
|
|
|
unsigned int Y = (unsigned int)X;\
|
|
|
|
unsigned int N = ((Y - 0x100) >> 16) & 8;\
|
|
|
|
unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
|
|
|
|
N += K;\
|
|
|
|
N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
|
|
|
|
K = 14 - N + ((Y <<= K) >> 15);\
|
|
|
|
I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
|
|
|
|
}\
|
|
|
|
}
|
|
|
|
#endif /* GNUC */
|
|
|
|
|
|
|
|
/* Bit representing maximum resolved size in a treebin at i */
|
|
|
|
#define bit_for_tree_index(i) \
|
|
|
|
(i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
|
|
|
|
|
|
|
|
/* Shift placing maximum resolved bit in a treebin at i as sign bit */
|
|
|
|
#define leftshift_for_tree_index(i) \
|
|
|
|
((i == NTREEBINS-1)? 0 : \
|
|
|
|
((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
|
|
|
|
|
|
|
|
/* The size of the smallest chunk held in bin with index i */
|
|
|
|
#define minsize_for_tree_index(i) \
|
|
|
|
((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
|
|
|
|
(((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
|
|
|
|
|
|
|
|
|
|
|
|
/* ------------------------ Operations on bin maps ----------------------- */
|
|
|
|
|
|
|
|
/* bit corresponding to given index */
|
|
|
|
#define idx2bit(i) ((binmap_t)(1) << (i))
|
|
|
|
|
|
|
|
/* Mark/Clear bits with given index */
|
|
|
|
#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
|
|
|
|
#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
|
|
|
|
#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
|
|
|
|
|
|
|
|
#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
|
|
|
|
#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
|
|
|
|
#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
|
|
|
|
|
|
|
|
/* isolate the least set bit of a bitmap */
|
|
|
|
#define least_bit(x) ((x) & -(x))
|
|
|
|
|
|
|
|
/* mask with all bits to left of least bit of x on */
|
|
|
|
#define left_bits(x) ((x<<1) | -(x<<1))
|
|
|
|
|
|
|
|
/* mask with all bits to left of or equal to least bit of x on */
|
|
|
|
#define same_or_left_bits(x) ((x) | -(x))
|
|
|
|
|
|
|
|
/* index corresponding to given bit. Use x86 asm if possible */
|
|
|
|
|
|
|
|
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
|
|
|
|
#define compute_bit2idx(X, I)\
|
|
|
|
{\
|
|
|
|
unsigned int J;\
|
|
|
|
J = __builtin_ctz(X); \
|
|
|
|
I = (bindex_t)J;\
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined (__INTEL_COMPILER)
|
|
|
|
#define compute_bit2idx(X, I)\
|
|
|
|
{\
|
|
|
|
unsigned int J;\
|
|
|
|
J = _bit_scan_forward (X); \
|
|
|
|
I = (bindex_t)J;\
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif defined(_MSC_VER) && _MSC_VER>=1300
|
|
|
|
#define compute_bit2idx(X, I)\
|
|
|
|
{\
|
|
|
|
unsigned int J;\
|
|
|
|
_BitScanForward((DWORD *) &J, X);\
|
|
|
|
I = (bindex_t)J;\
|
|
|
|
}
|
|
|
|
|
|
|
|
#elif USE_BUILTIN_FFS
|
|
|
|
#define compute_bit2idx(X, I) I = ffs(X)-1
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define compute_bit2idx(X, I)\
|
|
|
|
{\
|
|
|
|
unsigned int Y = X - 1;\
|
|
|
|
unsigned int K = Y >> (16-4) & 16;\
|
|
|
|
unsigned int N = K; Y >>= K;\
|
|
|
|
N += K = Y >> (8-3) & 8; Y >>= K;\
|
|
|
|
N += K = Y >> (4-2) & 4; Y >>= K;\
|
|
|
|
N += K = Y >> (2-1) & 2; Y >>= K;\
|
|
|
|
N += K = Y >> (1-0) & 1; Y >>= K;\
|
|
|
|
I = (bindex_t)(N + Y);\
|
|
|
|
}
|
|
|
|
#endif /* GNUC */
|
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------- Runtime Check Support ------------------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
For security, the main invariant is that malloc/free/etc never
|
|
|
|
writes to a static address other than malloc_state, unless static
|
|
|
|
malloc_state itself has been corrupted, which cannot occur via
|
|
|
|
malloc (because of these checks). In essence this means that we
|
|
|
|
believe all pointers, sizes, maps etc held in malloc_state, but
|
|
|
|
check all of those linked or offsetted from other embedded data
|
|
|
|
structures. These checks are interspersed with main code in a way
|
|
|
|
that tends to minimize their run-time cost.
|
|
|
|
|
|
|
|
When FOOTERS is defined, in addition to range checking, we also
|
|
|
|
verify footer fields of inuse chunks, which can be used guarantee
|
|
|
|
that the mstate controlling malloc/free is intact. This is a
|
|
|
|
streamlined version of the approach described by William Robertson
|
|
|
|
et al in "Run-time Detection of Heap-based Overflows" LISA'03
|
|
|
|
http://www.usenix.org/events/lisa03/tech/robertson.html The footer
|
|
|
|
of an inuse chunk holds the xor of its mstate and a random seed,
|
|
|
|
that is checked upon calls to free() and realloc(). This is
|
|
|
|
(probabalistically) unguessable from outside the program, but can be
|
|
|
|
computed by any code successfully malloc'ing any chunk, so does not
|
|
|
|
itself provide protection against code that has already broken
|
|
|
|
security through some other means. Unlike Robertson et al, we
|
|
|
|
always dynamically check addresses of all offset chunks (previous,
|
|
|
|
next, etc). This turns out to be cheaper than relying on hashes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if !INSECURE
|
|
|
|
/* Check if address a is at least as high as any from MORECORE or MMAP */
|
|
|
|
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
|
|
|
|
/* Check if address of next chunk n is higher than base chunk p */
|
|
|
|
#define ok_next(p, n) ((char*)(p) < (char*)(n))
|
|
|
|
/* Check if p has inuse status */
|
|
|
|
#define ok_inuse(p) is_inuse(p)
|
|
|
|
/* Check if p has its pinuse bit on */
|
|
|
|
#define ok_pinuse(p) pinuse(p)
|
|
|
|
|
|
|
|
#else /* !INSECURE */
|
|
|
|
#define ok_address(M, a) (1)
|
|
|
|
#define ok_next(b, n) (1)
|
|
|
|
#define ok_inuse(p) (1)
|
|
|
|
#define ok_pinuse(p) (1)
|
|
|
|
#endif /* !INSECURE */
|
|
|
|
|
|
|
|
#if (FOOTERS && !INSECURE)
|
|
|
|
/* Check if (alleged) mstate m has expected magic field */
|
|
|
|
#define ok_magic(M) ((M)->magic == mparams.magic)
|
|
|
|
#else /* (FOOTERS && !INSECURE) */
|
|
|
|
#define ok_magic(M) (1)
|
|
|
|
#endif /* (FOOTERS && !INSECURE) */
|
|
|
|
|
|
|
|
/* In gcc, use __builtin_expect to minimize impact of checks */
|
|
|
|
#if !INSECURE
|
|
|
|
#if defined(__GNUC__) && __GNUC__ >= 3
|
|
|
|
#define RTCHECK(e) __builtin_expect(e, 1)
|
|
|
|
#else /* GNUC */
|
|
|
|
#define RTCHECK(e) (e)
|
|
|
|
#endif /* GNUC */
|
|
|
|
#else /* !INSECURE */
|
|
|
|
#define RTCHECK(e) (1)
|
|
|
|
#endif /* !INSECURE */
|
|
|
|
|
|
|
|
/* macros to set up inuse chunks with or without footers */
|
|
|
|
|
|
|
|
#if !FOOTERS
|
|
|
|
|
|
|
|
#define mark_inuse_foot(M,p,s)
|
|
|
|
|
|
|
|
/* Macros for setting head/foot of non-mmapped chunks */
|
|
|
|
|
|
|
|
/* Set cinuse bit and pinuse bit of next chunk */
|
|
|
|
#define set_inuse(M,p,s)\
|
|
|
|
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
|
|
|
|
((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
|
|
|
|
|
|
|
|
/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
|
|
|
|
#define set_inuse_and_pinuse(M,p,s)\
|
|
|
|
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
|
|
|
|
((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
|
|
|
|
|
|
|
|
/* Set size, cinuse and pinuse bit of this chunk */
|
|
|
|
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
|
|
|
|
((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
|
|
|
|
|
|
|
|
#else /* FOOTERS */
|
|
|
|
|
|
|
|
/* Set foot of inuse chunk to be xor of mstate and seed */
|
|
|
|
#define mark_inuse_foot(M,p,s)\
|
|
|
|
(((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
|
|
|
|
|
|
|
|
#define get_mstate_for(p)\
|
|
|
|
((mstate)(((mchunkptr)((char*)(p) +\
|
|
|
|
(chunksize(p))))->prev_foot ^ mparams.magic))
|
|
|
|
|
|
|
|
#define set_inuse(M,p,s)\
|
|
|
|
((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
|
|
|
|
(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
|
|
|
|
mark_inuse_foot(M,p,s))
|
|
|
|
|
|
|
|
#define set_inuse_and_pinuse(M,p,s)\
|
|
|
|
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
|
|
|
|
(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
|
|
|
|
mark_inuse_foot(M,p,s))
|
|
|
|
|
|
|
|
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
|
|
|
|
((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
|
|
|
|
mark_inuse_foot(M, p, s))
|
|
|
|
|
|
|
|
#endif /* !FOOTERS */
|
|
|
|
|
|
|
|
/* ---------------------------- setting mparams -------------------------- */
|
|
|
|
|
|
|
|
#if LOCK_AT_FORK
|
|
|
|
static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
|
|
|
|
static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
|
|
|
|
static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
|
|
|
|
#endif /* LOCK_AT_FORK */
|
|
|
|
|
|
|
|
/* Initialize mparams */
|
|
|
|
static int init_mparams(void) {
|
|
|
|
#ifdef NEED_GLOBAL_LOCK_INIT
|
|
|
|
if (malloc_global_mutex_status <= 0)
|
|
|
|
init_malloc_global_mutex();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ACQUIRE_MALLOC_GLOBAL_LOCK();
|
|
|
|
if (mparams.magic == 0) {
|
|
|
|
size_t magic;
|
|
|
|
size_t psize;
|
|
|
|
size_t gsize;
|
|
|
|
|
|
|
|
#ifndef WIN32
|
|
|
|
psize = malloc_getpagesize;
|
|
|
|
gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
|
|
|
|
#else /* WIN32 */
|
|
|
|
{
|
|
|
|
SYSTEM_INFO system_info;
|
|
|
|
GetSystemInfo(&system_info);
|
|
|
|
psize = system_info.dwPageSize;
|
|
|
|
gsize = ((DEFAULT_GRANULARITY != 0)?
|
|
|
|
DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
|
|
|
|
}
|
|
|
|
#endif /* WIN32 */
|
|
|
|
|
|
|
|
/* Sanity-check configuration:
|
|
|
|
size_t must be unsigned and as wide as pointer type.
|
|
|
|
ints must be at least 4 bytes.
|
|
|
|
alignment must be at least 8.
|
|
|
|
Alignment, min chunk size, and page size must all be powers of 2.
|
|
|
|
*/
|
|
|
|
if ((sizeof(size_t) != sizeof(char*)) ||
|
|
|
|
(MAX_SIZE_T < MIN_CHUNK_SIZE) ||
|
|
|
|
(sizeof(int) < 4) ||
|
|
|
|
(MALLOC_ALIGNMENT < (size_t)8U) ||
|
|
|
|
((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
|
|
|
|
((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
|
|
|
|
((gsize & (gsize-SIZE_T_ONE)) != 0) ||
|
|
|
|
((psize & (psize-SIZE_T_ONE)) != 0))
|
|
|
|
ABORT;
|
|
|
|
mparams.granularity = gsize;
|
|
|
|
mparams.page_size = psize;
|
|
|
|
mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
|
|
|
|
mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
|
|
|
|
#if MORECORE_CONTIGUOUS
|
|
|
|
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
|
|
|
|
#else /* MORECORE_CONTIGUOUS */
|
|
|
|
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
|
|
|
|
#endif /* MORECORE_CONTIGUOUS */
|
|
|
|
|
|
|
|
#if !ONLY_MSPACES
|
|
|
|
/* Set up lock for main malloc area */
|
|
|
|
gm->mflags = mparams.default_mflags;
|
|
|
|
(void)INITIAL_LOCK(&gm->mutex);
|
|
|
|
#endif
|
|
|
|
#if LOCK_AT_FORK
|
|
|
|
pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
{
|
|
|
|
#if USE_DEV_RANDOM
|
|
|
|
int fd;
|
|
|
|
unsigned char buf[sizeof(size_t)];
|
|
|
|
/* Try to use /dev/urandom, else fall back on using time */
|
|
|
|
if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
|
|
|
|
read(fd, buf, sizeof(buf)) == sizeof(buf)) {
|
|
|
|
magic = *((size_t *) buf);
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif /* USE_DEV_RANDOM */
|
|
|
|
magic = (size_t)(rand64() ^ (size_t)0x55555555U);
|
|
|
|
magic |= (size_t)8U; /* ensure nonzero */
|
|
|
|
magic &= ~(size_t)7U; /* improve chances of fault for bad values */
|
|
|
|
/* Until memory modes commonly available, use volatile-write */
|
|
|
|
(*(volatile size_t *)(&(mparams.magic))) = magic;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_MALLOC_GLOBAL_LOCK();
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* support for mallopt */
|
|
|
|
static int change_mparam(int param_number, int value) {
|
|
|
|
size_t val;
|
|
|
|
ensure_initialization();
|
|
|
|
val = (value == -1)? MAX_SIZE_T : (size_t)value;
|
|
|
|
switch(param_number) {
|
|
|
|
case M_TRIM_THRESHOLD:
|
|
|
|
mparams.trim_threshold = val;
|
|
|
|
return 1;
|
|
|
|
case M_GRANULARITY:
|
|
|
|
if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
|
|
|
|
mparams.granularity = val;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
case M_MMAP_THRESHOLD:
|
|
|
|
mparams.mmap_threshold = val;
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if DEBUG
|
|
|
|
/* ------------------------- Debugging Support --------------------------- */
|
|
|
|
|
|
|
|
/* Check properties of any chunk, whether free, inuse, mmapped etc */
|
|
|
|
static void do_check_any_chunk(mstate m, mchunkptr p) {
|
|
|
|
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
|
|
|
|
assert(ok_address(m, p));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check properties of top chunk */
|
|
|
|
static void do_check_top_chunk(mstate m, mchunkptr p) {
|
|
|
|
msegmentptr sp = segment_holding(m, (char*)p);
|
|
|
|
size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
|
|
|
|
assert(sp != 0);
|
|
|
|
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
|
|
|
|
assert(ok_address(m, p));
|
|
|
|
assert(sz == m->topsize);
|
|
|
|
assert(sz > 0);
|
|
|
|
assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
|
|
|
|
assert(pinuse(p));
|
|
|
|
assert(!pinuse(chunk_plus_offset(p, sz)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check properties of (inuse) mmapped chunks */
|
|
|
|
static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
|
|
|
|
size_t sz = chunksize(p);
|
|
|
|
size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
|
|
|
|
assert(is_mmapped(p));
|
|
|
|
assert(use_mmap(m));
|
|
|
|
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
|
|
|
|
assert(ok_address(m, p));
|
|
|
|
assert(!is_small(sz));
|
|
|
|
assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
|
|
|
|
assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
|
|
|
|
assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check properties of inuse chunks */
|
|
|
|
static void do_check_inuse_chunk(mstate m, mchunkptr p) {
|
|
|
|
do_check_any_chunk(m, p);
|
|
|
|
assert(is_inuse(p));
|
|
|
|
assert(next_pinuse(p));
|
|
|
|
/* If not pinuse and not mmapped, previous chunk has OK offset */
|
|
|
|
assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
|
|
|
|
if (is_mmapped(p))
|
|
|
|
do_check_mmapped_chunk(m, p);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check properties of free chunks */
|
|
|
|
static void do_check_free_chunk(mstate m, mchunkptr p) {
|
|
|
|
size_t sz = chunksize(p);
|
|
|
|
mchunkptr next = chunk_plus_offset(p, sz);
|
|
|
|
do_check_any_chunk(m, p);
|
|
|
|
assert(!is_inuse(p));
|
|
|
|
assert(!next_pinuse(p));
|
|
|
|
assert (!is_mmapped(p));
|
|
|
|
if (p != m->dv && p != m->top) {
|
|
|
|
if (sz >= MIN_CHUNK_SIZE) {
|
|
|
|
assert((sz & CHUNK_ALIGN_MASK) == 0);
|
|
|
|
assert(is_aligned(chunk2mem(p)));
|
|
|
|
assert(next->prev_foot == sz);
|
|
|
|
assert(pinuse(p));
|
|
|
|
assert (next == m->top || is_inuse(next));
|
|
|
|
assert(p->fd->bk == p);
|
|
|
|
assert(p->bk->fd == p);
|
|
|
|
}
|
|
|
|
else /* markers are always of size SIZE_T_SIZE */
|
|
|
|
assert(sz == SIZE_T_SIZE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check properties of malloced chunks at the point they are malloced */
|
|
|
|
static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
|
|
|
|
if (mem != 0) {
|
|
|
|
mchunkptr p = mem2chunk(mem);
|
|
|
|
size_t sz = p->head & ~INUSE_BITS;
|
|
|
|
do_check_inuse_chunk(m, p);
|
|
|
|
assert((sz & CHUNK_ALIGN_MASK) == 0);
|
|
|
|
assert(sz >= MIN_CHUNK_SIZE);
|
|
|
|
assert(sz >= s);
|
|
|
|
/* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
|
|
|
|
assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check a tree and its subtrees. */
|
|
|
|
static void do_check_tree(mstate m, tchunkptr t) {
|
|
|
|
tchunkptr head = 0;
|
|
|
|
tchunkptr u = t;
|
|
|
|
bindex_t tindex = t->index;
|
|
|
|
size_t tsize = chunksize(t);
|
|
|
|
bindex_t idx;
|
|
|
|
compute_tree_index(tsize, idx);
|
|
|
|
assert(tindex == idx);
|
|
|
|
assert(tsize >= MIN_LARGE_SIZE);
|
|
|
|
assert(tsize >= minsize_for_tree_index(idx));
|
|
|
|
assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
|
|
|
|
|
|
|
|
do { /* traverse through chain of same-sized nodes */
|
|
|
|
do_check_any_chunk(m, ((mchunkptr)u));
|
|
|
|
assert(u->index == tindex);
|
|
|
|
assert(chunksize(u) == tsize);
|
|
|
|
assert(!is_inuse(u));
|
|
|
|
assert(!next_pinuse(u));
|
|
|
|
assert(u->fd->bk == u);
|
|
|
|
assert(u->bk->fd == u);
|
|
|
|
if (u->parent == 0) {
|
|
|
|
assert(u->child[0] == 0);
|
|
|
|
assert(u->child[1] == 0);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
assert(head == 0); /* only one node on chain has parent */
|
|
|
|
head = u;
|
|
|
|
assert(u->parent != u);
|
|
|
|
assert (u->parent->child[0] == u ||
|
|
|
|
u->parent->child[1] == u ||
|
|
|
|
*((tbinptr*)(u->parent)) == u);
|
|
|
|
if (u->child[0] != 0) {
|
|
|
|
assert(u->child[0]->parent == u);
|
|
|
|
assert(u->child[0] != u);
|
|
|
|
do_check_tree(m, u->child[0]);
|
|
|
|
}
|
|
|
|
if (u->child[1] != 0) {
|
|
|
|
assert(u->child[1]->parent == u);
|
|
|
|
assert(u->child[1] != u);
|
|
|
|
do_check_tree(m, u->child[1]);
|
|
|
|
}
|
|
|
|
if (u->child[0] != 0 && u->child[1] != 0) {
|
|
|
|
assert(chunksize(u->child[0]) < chunksize(u->child[1]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
u = u->fd;
|
|
|
|
} while (u != t);
|
|
|
|
assert(head != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check all the chunks in a treebin. */
|
|
|
|
static void do_check_treebin(mstate m, bindex_t i) {
|
|
|
|
tbinptr* tb = treebin_at(m, i);
|
|
|
|
tchunkptr t = *tb;
|
|
|
|
int empty = (m->treemap & (1U << i)) == 0;
|
|
|
|
if (t == 0)
|
|
|
|
assert(empty);
|
|
|
|
if (!empty)
|
|
|
|
do_check_tree(m, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check all the chunks in a smallbin. */
|
|
|
|
static void do_check_smallbin(mstate m, bindex_t i) {
|
|
|
|
sbinptr b = smallbin_at(m, i);
|
|
|
|
mchunkptr p = b->bk;
|
|
|
|
unsigned int empty = (m->smallmap & (1U << i)) == 0;
|
|
|
|
if (p == b)
|
|
|
|
assert(empty);
|
|
|
|
if (!empty) {
|
|
|
|
for (; p != b; p = p->bk) {
|
|
|
|
size_t size = chunksize(p);
|
|
|
|
mchunkptr q;
|
|
|
|
/* each chunk claims to be free */
|
|
|
|
do_check_free_chunk(m, p);
|
|
|
|
/* chunk belongs in bin */
|
|
|
|
assert(small_index(size) == i);
|
|
|
|
assert(p->bk == b || chunksize(p->bk) == chunksize(p));
|
|
|
|
/* chunk is followed by an inuse chunk */
|
|
|
|
q = next_chunk(p);
|
|
|
|
if (q->head != FENCEPOST_HEAD)
|
|
|
|
do_check_inuse_chunk(m, q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Find x in a bin. Used in other check functions. */
|
|
|
|
static int bin_find(mstate m, mchunkptr x) {
|
|
|
|
size_t size = chunksize(x);
|
|
|
|
if (is_small(size)) {
|
|
|
|
bindex_t sidx = small_index(size);
|
|
|
|
sbinptr b = smallbin_at(m, sidx);
|
|
|
|
if (smallmap_is_marked(m, sidx)) {
|
|
|
|
mchunkptr p = b;
|
|
|
|
do {
|
|
|
|
if (p == x)
|
|
|
|
return 1;
|
|
|
|
} while ((p = p->fd) != b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
bindex_t tidx;
|
|
|
|
compute_tree_index(size, tidx);
|
|
|
|
if (treemap_is_marked(m, tidx)) {
|
|
|
|
tchunkptr t = *treebin_at(m, tidx);
|
|
|
|
size_t sizebits = size << leftshift_for_tree_index(tidx);
|
|
|
|
while (t != 0 && chunksize(t) != size) {
|
|
|
|
t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
|
|
|
|
sizebits <<= 1;
|
|
|
|
}
|
|
|
|
if (t != 0) {
|
|
|
|
tchunkptr u = t;
|
|
|
|
do {
|
|
|
|
if (u == (tchunkptr)x)
|
|
|
|
return 1;
|
|
|
|
} while ((u = u->fd) != t);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Traverse each chunk and check it; return total */
|
|
|
|
static size_t traverse_and_check(mstate m) {
|
|
|
|
size_t sum = 0;
|
|
|
|
if (is_initialized(m)) {
|
|
|
|
msegmentptr s = &m->seg;
|
|
|
|
sum += m->topsize + TOP_FOOT_SIZE;
|
|
|
|
while (s != 0) {
|
|
|
|
mchunkptr q = align_as_chunk(s->base);
|
|
|
|
mchunkptr lastq = 0;
|
|
|
|
assert(pinuse(q));
|
|
|
|
while (segment_holds(s, q) &&
|
|
|
|
q != m->top && q->head != FENCEPOST_HEAD) {
|
|
|
|
sum += chunksize(q);
|
|
|
|
if (is_inuse(q)) {
|
|
|
|
assert(!bin_find(m, q));
|
|
|
|
do_check_inuse_chunk(m, q);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
assert(q == m->dv || bin_find(m, q));
|
|
|
|
assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
|
|
|
|
do_check_free_chunk(m, q);
|
|
|
|
}
|
|
|
|
lastq = q;
|
|
|
|
q = next_chunk(q);
|
|
|
|
}
|
|
|
|
s = s->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Check all properties of malloc_state. */
|
|
|
|
static void do_check_malloc_state(mstate m) {
|
|
|
|
bindex_t i;
|
|
|
|
size_t total;
|
|
|
|
/* check bins */
|
|
|
|
for (i = 0; i < NSMALLBINS; ++i)
|
|
|
|
do_check_smallbin(m, i);
|
|
|
|
for (i = 0; i < NTREEBINS; ++i)
|
|
|
|
do_check_treebin(m, i);
|
|
|
|
|
|
|
|
if (m->dvsize != 0) { /* check dv chunk */
|
|
|
|
do_check_any_chunk(m, m->dv);
|
|
|
|
assert(m->dvsize == chunksize(m->dv));
|
|
|
|
assert(m->dvsize >= MIN_CHUNK_SIZE);
|
|
|
|
assert(bin_find(m, m->dv) == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (m->top != 0) { /* check top chunk */
|
|
|
|
do_check_top_chunk(m, m->top);
|
|
|
|
/*assert(m->topsize == chunksize(m->top)); redundant */
|
|
|
|
assert(m->topsize > 0);
|
|
|
|
assert(bin_find(m, m->top) == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
total = traverse_and_check(m);
|
|
|
|
assert(total <= m->footprint);
|
|
|
|
assert(m->footprint <= m->max_footprint);
|
|
|
|
}
|
|
|
|
#endif /* DEBUG */
|
|
|
|
|
|
|
|
/* ----------------------------- statistics ------------------------------ */
|
|
|
|
|
|
|
|
#if !NO_MALLINFO
|
|
|
|
static struct mallinfo internal_mallinfo(mstate m) {
|
|
|
|
struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
|
|
|
|
ensure_initialization();
|
|
|
|
if (!PREACTION(m)) {
|
|
|
|
check_malloc_state(m);
|
|
|
|
if (is_initialized(m)) {
|
|
|
|
size_t nfree = SIZE_T_ONE; /* top always free */
|
|
|
|
size_t mfree = m->topsize + TOP_FOOT_SIZE;
|
|
|
|
size_t sum = mfree;
|
|
|
|
msegmentptr s = &m->seg;
|
|
|
|
while (s != 0) {
|
|
|
|
mchunkptr q = align_as_chunk(s->base);
|
|
|
|
while (segment_holds(s, q) &&
|
|
|
|
q != m->top && q->head != FENCEPOST_HEAD) {
|
|
|
|
size_t sz = chunksize(q);
|
|
|
|
sum += sz;
|
|
|
|
if (!is_inuse(q)) {
|
|
|
|
mfree += sz;
|
|
|
|
++nfree;
|
|
|
|
}
|
|
|
|
q = next_chunk(q);
|
|
|
|
}
|
|
|
|
s = s->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
nm.arena = sum;
|
|
|
|
nm.ordblks = nfree;
|
|
|
|
nm.hblkhd = m->footprint - sum;
|
|
|
|
nm.usmblks = m->max_footprint;
|
|
|
|
nm.uordblks = m->footprint - mfree;
|
|
|
|
nm.fordblks = mfree;
|
|
|
|
nm.keepcost = m->topsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
POSTACTION(m);
|
|
|
|
}
|
|
|
|
return nm;
|
|
|
|
}
|
|
|
|
#endif /* !NO_MALLINFO */
|
|
|
|
|
|
|
|
#if !NO_MALLOC_STATS
|
|
|
|
static void internal_malloc_stats(mstate m) {
|
|
|
|
ensure_initialization();
|
|
|
|
if (!PREACTION(m)) {
|
|
|
|
size_t maxfp = 0;
|
|
|
|
size_t fp = 0;
|
|
|
|
size_t used = 0;
|
|
|
|
check_malloc_state(m);
|
|
|
|
if (is_initialized(m)) {
|
|
|
|
msegmentptr s = &m->seg;
|
|
|
|
maxfp = m->max_footprint;
|
|
|
|
fp = m->footprint;
|
|
|
|
used = fp - (m->topsize + TOP_FOOT_SIZE);
|
|
|
|
|
|
|
|
while (s != 0) {
|
|
|
|
mchunkptr q = align_as_chunk(s->base);
|
|
|
|
while (segment_holds(s, q) &&
|
|
|
|
q != m->top && q->head != FENCEPOST_HEAD) {
|
|
|
|
if (!is_inuse(q))
|
|
|
|
used -= chunksize(q);
|
|
|
|
q = next_chunk(q);
|
|
|
|
}
|
|
|
|
s = s->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
POSTACTION(m); /* drop lock */
|
|
|
|
kprintf("max system bytes = %10lu\n", (unsigned long)(maxfp));
|
|
|
|
kprintf("system bytes = %10lu\n", (unsigned long)(fp));
|
|
|
|
kprintf("in use bytes = %10lu\n", (unsigned long)(used));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* NO_MALLOC_STATS */
|
|
|
|
|
|
|
|
/* ----------------------- Operations on smallbins ----------------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
Various forms of linking and unlinking are defined as macros. Even
|
|
|
|
the ones for trees, which are very long but have very short typical
|
|
|
|
paths. This is ugly but reduces reliance on inlining support of
|
|
|
|
compilers.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Link a free chunk into a smallbin */
|
|
|
|
#define insert_small_chunk(M, P, S) {\
|
|
|
|
bindex_t I = small_index(S);\
|
|
|
|
mchunkptr B = smallbin_at(M, I);\
|
|
|
|
mchunkptr F = B;\
|
|
|
|
assert(S >= MIN_CHUNK_SIZE);\
|
|
|
|
if (!smallmap_is_marked(M, I))\
|
|
|
|
mark_smallmap(M, I);\
|
|
|
|
else if (RTCHECK(ok_address(M, B->fd)))\
|
|
|
|
F = B->fd;\
|
|
|
|
else {\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
B->fd = P;\
|
|
|
|
F->bk = P;\
|
|
|
|
P->fd = F;\
|
|
|
|
P->bk = B;\
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlink a chunk from a smallbin */
|
|
|
|
#define unlink_small_chunk(M, P, S) {\
|
|
|
|
mchunkptr F = P->fd;\
|
|
|
|
mchunkptr B = P->bk;\
|
|
|
|
bindex_t I = small_index(S);\
|
|
|
|
assert(P != B);\
|
|
|
|
assert(P != F);\
|
|
|
|
assert(chunksize(P) == small_index2size(I));\
|
|
|
|
if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
|
|
|
|
if (B == F) {\
|
|
|
|
clear_smallmap(M, I);\
|
|
|
|
}\
|
|
|
|
else if (RTCHECK(B == smallbin_at(M,I) ||\
|
|
|
|
(ok_address(M, B) && B->fd == P))) {\
|
|
|
|
F->bk = B;\
|
|
|
|
B->fd = F;\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlink the first chunk from a smallbin */
|
|
|
|
#define unlink_first_small_chunk(M, B, P, I) {\
|
|
|
|
mchunkptr F = P->fd;\
|
|
|
|
assert(P != B);\
|
|
|
|
assert(P != F);\
|
|
|
|
assert(chunksize(P) == small_index2size(I));\
|
|
|
|
if (B == F) {\
|
|
|
|
clear_smallmap(M, I);\
|
|
|
|
}\
|
|
|
|
else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
|
|
|
|
F->bk = B;\
|
|
|
|
B->fd = F;\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Replace dv node, binning the old one */
|
|
|
|
/* Used only when dvsize known to be small */
|
|
|
|
#define replace_dv(M, P, S) {\
|
|
|
|
size_t DVS = M->dvsize;\
|
|
|
|
assert(is_small(DVS));\
|
|
|
|
if (DVS != 0) {\
|
|
|
|
mchunkptr DV = M->dv;\
|
|
|
|
insert_small_chunk(M, DV, DVS);\
|
|
|
|
}\
|
|
|
|
M->dvsize = S;\
|
|
|
|
M->dv = P;\
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------- Operations on trees ------------------------- */
|
|
|
|
|
|
|
|
/* Insert chunk into tree */
|
|
|
|
#define insert_large_chunk(M, X, S) {\
|
|
|
|
tbinptr* H;\
|
|
|
|
bindex_t I;\
|
|
|
|
compute_tree_index(S, I);\
|
|
|
|
H = treebin_at(M, I);\
|
|
|
|
X->index = I;\
|
|
|
|
X->child[0] = X->child[1] = 0;\
|
|
|
|
if (!treemap_is_marked(M, I)) {\
|
|
|
|
mark_treemap(M, I);\
|
|
|
|
*H = X;\
|
|
|
|
X->parent = (tchunkptr)H;\
|
|
|
|
X->fd = X->bk = X;\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
tchunkptr T = *H;\
|
|
|
|
size_t K = S << leftshift_for_tree_index(I);\
|
|
|
|
for (;;) {\
|
|
|
|
if (chunksize(T) != S) {\
|
|
|
|
tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
|
|
|
|
K <<= 1;\
|
|
|
|
if (*C != 0)\
|
|
|
|
T = *C;\
|
|
|
|
else if (RTCHECK(ok_address(M, C))) {\
|
|
|
|
*C = X;\
|
|
|
|
X->parent = T;\
|
|
|
|
X->fd = X->bk = X;\
|
|
|
|
break;\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
break;\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
tchunkptr F = T->fd;\
|
|
|
|
if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
|
|
|
|
T->fd = F->bk = X;\
|
|
|
|
X->fd = F;\
|
|
|
|
X->bk = T;\
|
|
|
|
X->parent = 0;\
|
|
|
|
break;\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
break;\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Unlink steps:
|
|
|
|
|
|
|
|
1. If x is a chained node, unlink it from its same-sized fd/bk links
|
|
|
|
and choose its bk node as its replacement.
|
|
|
|
2. If x was the last node of its size, but not a leaf node, it must
|
|
|
|
be replaced with a leaf node (not merely one with an open left or
|
|
|
|
right), to make sure that lefts and rights of descendents
|
|
|
|
correspond properly to bit masks. We use the rightmost descendent
|
|
|
|
of x. We could use any other leaf, but this is easy to locate and
|
|
|
|
tends to counteract removal of leftmosts elsewhere, and so keeps
|
|
|
|
paths shorter than minimally guaranteed. This doesn't loop much
|
|
|
|
because on average a node in a tree is near the bottom.
|
|
|
|
3. If x is the base of a chain (i.e., has parent links) relink
|
|
|
|
x's parent and children to x's replacement (or null if none).
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define unlink_large_chunk(M, X) {\
|
|
|
|
tchunkptr XP = X->parent;\
|
|
|
|
tchunkptr R;\
|
|
|
|
if (X->bk != X) {\
|
|
|
|
tchunkptr F = X->fd;\
|
|
|
|
R = X->bk;\
|
|
|
|
if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
|
|
|
|
F->bk = R;\
|
|
|
|
R->fd = F;\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
else {\
|
|
|
|
tchunkptr* RP;\
|
|
|
|
if (((R = *(RP = &(X->child[1]))) != 0) ||\
|
|
|
|
((R = *(RP = &(X->child[0]))) != 0)) {\
|
|
|
|
tchunkptr* CP;\
|
|
|
|
while ((*(CP = &(R->child[1])) != 0) ||\
|
|
|
|
(*(CP = &(R->child[0])) != 0)) {\
|
|
|
|
R = *(RP = CP);\
|
|
|
|
}\
|
|
|
|
if (RTCHECK(ok_address(M, RP)))\
|
|
|
|
*RP = 0;\
|
|
|
|
else {\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
if (XP != 0) {\
|
|
|
|
tbinptr* H = treebin_at(M, X->index);\
|
|
|
|
if (X == *H) {\
|
|
|
|
if ((*H = R) == 0) \
|
|
|
|
clear_treemap(M, X->index);\
|
|
|
|
}\
|
|
|
|
else if (RTCHECK(ok_address(M, XP))) {\
|
|
|
|
if (XP->child[0] == X) \
|
|
|
|
XP->child[0] = R;\
|
|
|
|
else \
|
|
|
|
XP->child[1] = R;\
|
|
|
|
}\
|
|
|
|
else\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
if (R != 0) {\
|
|
|
|
if (RTCHECK(ok_address(M, R))) {\
|
|
|
|
tchunkptr C0, C1;\
|
|
|
|
R->parent = XP;\
|
|
|
|
if ((C0 = X->child[0]) != 0) {\
|
|
|
|
if (RTCHECK(ok_address(M, C0))) {\
|
|
|
|
R->child[0] = C0;\
|
|
|
|
C0->parent = R;\
|
|
|
|
}\
|
|
|
|
else\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
if ((C1 = X->child[1]) != 0) {\
|
|
|
|
if (RTCHECK(ok_address(M, C1))) {\
|
|
|
|
R->child[1] = C1;\
|
|
|
|
C1->parent = R;\
|
|
|
|
}\
|
|
|
|
else\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
else\
|
|
|
|
CORRUPTION_ERROR_ACTION(M);\
|
|
|
|
}\
|
|
|
|
}\
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Relays to large vs small bin operations */
|
|
|
|
|
|
|
|
#define insert_chunk(M, P, S)\
|
|
|
|
if (is_small(S)) insert_small_chunk(M, P, S)\
|
|
|
|
else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
|
|
|
|
|
|
|
|
#define unlink_chunk(M, P, S)\
|
|
|
|
if (is_small(S)) unlink_small_chunk(M, P, S)\
|
|
|
|
else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
|
|
|
|
|
|
|
|
|
|
|
|
/* Relays to internal calls to malloc/free from realloc, memalign etc */
|
|
|
|
|
|
|
|
#if ONLY_MSPACES
|
|
|
|
#define internal_malloc(m, b) mspace_malloc(m, b)
|
|
|
|
#define internal_free(m, mem) mspace_free(m,mem);
|
|
|
|
#else /* ONLY_MSPACES */
|
|
|
|
#if MSPACES
|
|
|
|
#define internal_malloc(m, b)\
|
|
|
|
((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
|
|
|
|
#define internal_free(m, mem)\
|
|
|
|
if (m == gm) dlfree(mem); else mspace_free(m,mem);
|
|
|
|
#else /* MSPACES */
|
|
|
|
#define internal_malloc(m, b) dlmalloc(b)
|
|
|
|
#define internal_free(m, mem) dlfree(mem)
|
|
|
|
#endif /* MSPACES */
|
|
|
|
#endif /* ONLY_MSPACES */
|
|
|
|
|
|
|
|
/* ----------------------- Direct-mmapping chunks ----------------------- */
|
|
|
|
|
|
|
|
/*
|
|
|
|
Directly mmapped chunks are set up with an offset to the start of
|
|
|
|
the mmapped region stored in the prev_foot field of the chunk. This
|
|
|
|
allows reconstruction of the required argument to MUNMAP when freed,
|
|
|
|
and also allows adjustment of the returned chunk to meet alignment
|
|
|
|
requirements (especially in memalign).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Malloc using mmap */
|
|
|
|
static void* mmap_alloc(mstate m, size_t nb) {
|
|
|
|
size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
|
|
|
if (m->footprint_limit != 0) {
|
|
|
|
size_t fp = m->footprint + mmsize;
|
|
|
|
if (fp <= m->footprint || fp > m->footprint_limit)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (mmsize > nb) { /* Check for wrap around 0 */
|
|
|
|
char* mm = (char*)(dlmalloc_requires_more_vespene_gas(mmsize));
|
|
|
|
if (mm != CMFAIL) {
|
|
|
|
size_t offset = align_offset(chunk2mem(mm));
|
|
|
|
size_t psize = mmsize - offset - MMAP_FOOT_PAD;
|
|
|
|
mchunkptr p = (mchunkptr)(mm + offset);
|
|
|
|
p->prev_foot = offset;
|
|
|
|
p->head = psize;
|
|
|
|
mark_inuse_foot(m, p, psize);
|
|
|
|
chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
|
|
|
|
chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
|
|
|
|
|
|
|
|
if (m->least_addr == 0 || mm < m->least_addr)
|
|
|
|
m->least_addr = mm;
|
|
|
|
if ((m->footprint += mmsize) > m->max_footprint)
|
|
|
|
m->max_footprint = m->footprint;
|
|
|
|
assert(is_aligned(chunk2mem(p)));
|
|
|
|
check_mmapped_chunk(m, p);
|
|
|
|
return chunk2mem(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Realloc using mmap */
|
|
|
|
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
|
|
|
|
size_t oldsize = chunksize(oldp);
|
|
|
|
(void)flags; /* placate people compiling -Wunused */
|
|
|
|
if (is_small(nb)) /* Can't shrink mmap regions below small size */
|
|
|
|
return 0;
|
|
|
|
/* Keep old chunk if big enough but not too big */
|
|
|
|
if (oldsize >= nb + SIZE_T_SIZE &&
|
|
|
|
(oldsize - nb) <= (mparams.granularity << 1))
|
|
|
|
return oldp;
|
|
|
|
else {
|
|
|
|
size_t offset = oldp->prev_foot;
|
|
|
|
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
|
|
|
|
size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
|
|
|
char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
|
|
|
|
oldmmsize, newmmsize, flags);
|
|
|
|
if (cp != CMFAIL) {
|
|
|
|
mchunkptr newp = (mchunkptr)(cp + offset);
|
|
|
|
size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
|
|
|
|
newp->head = psize;
|
|
|
|
mark_inuse_foot(m, newp, psize);
|
|
|
|
chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
|
|
|
|
chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
|
|
|
|
|
|
|
|
if (cp < m->least_addr)
|
|
|
|
m->least_addr = cp;
|
|
|
|
if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
|
|
|
|
m->max_footprint = m->footprint;
|
|
|
|
check_mmapped_chunk(m, newp);
|
|
|
|
return newp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* -------------------------- mspace management -------------------------- */
|
|
|
|
|
|
|
|
/* Initialize top chunk and its size */
|
|
|
|
static void init_top(mstate m, mchunkptr p, size_t psize) {
|
|
|
|
/* Ensure alignment */
|
|
|
|
size_t offset = align_offset(chunk2mem(p));
|
|
|
|
p = (mchunkptr)((char*)p + offset);
|
|
|
|
psize -= offset;
|
|
|
|
|
|
|
|
m->top = p;
|
|
|
|
m->topsize = psize;
|
|
|
|
p->head = psize | PINUSE_BIT;
|
|
|
|
/* set size of fake trailing chunk holding overhead space only once */
|
|
|
|
chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
|
|
|
|
m->trim_check = mparams.trim_threshold; /* reset on each update */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize bins for a new mstate that is otherwise zeroed out */
|
|
|
|
static void init_bins(mstate m) {
|
|
|
|
/* Establish circular links for smallbins */
|
|
|
|
bindex_t i;
|
|
|
|
for (i = 0; i < NSMALLBINS; ++i) {
|
|
|
|
sbinptr bin = smallbin_at(m,i);
|
|
|
|
bin->fd = bin->bk = bin;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if PROCEED_ON_ERROR
|
|
|
|
|
|
|
|
/* default corruption action */
|
|
|
|
static void reset_on_error(mstate m) {
|
|
|
|
int i;
|
|
|
|
++malloc_corruption_error_count;
|
|
|
|
/* Reinitialize fields to forget about all memory */
|
|
|
|
m->smallmap = m->treemap = 0;
|
|
|
|
m->dvsize = m->topsize = 0;
|
|
|
|
m->seg.base = 0;
|
|
|
|
m->seg.size = 0;
|
|
|
|
m->seg.next = 0;
|
|
|
|
m->top = m->dv = 0;
|
|
|
|
for (i = 0; i < NTREEBINS; ++i)
|
|
|
|
*treebin_at(m, i) = 0;
|
|
|
|
init_bins(m);
|
|
|
|
}
|
|
|
|
#endif /* PROCEED_ON_ERROR */
|
|
|
|
|
|
|
|
/* Allocate chunk and prepend remainder with chunk in successor base. */
|
|
|
|
static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
|
|
|
|
size_t nb) {
|
|
|
|
mchunkptr p = align_as_chunk(newbase);
|
|
|
|
mchunkptr oldfirst = align_as_chunk(oldbase);
|
|
|
|
size_t psize = (char*)oldfirst - (char*)p;
|
|
|
|
mchunkptr q = chunk_plus_offset(p, nb);
|
|
|
|
size_t qsize = psize - nb;
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(m, p, nb);
|
|
|
|
|
|
|
|
assert((char*)oldfirst > (char*)q);
|
|
|
|
assert(pinuse(oldfirst));
|
|
|
|
assert(qsize >= MIN_CHUNK_SIZE);
|
|
|
|
|
|
|
|
/* consolidate remainder with first chunk of old base */
|
|
|
|
if (oldfirst == m->top) {
|
|
|
|
size_t tsize = m->topsize += qsize;
|
|
|
|
m->top = q;
|
|
|
|
q->head = tsize | PINUSE_BIT;
|
|
|
|
check_top_chunk(m, q);
|
|
|
|
}
|
|
|
|
else if (oldfirst == m->dv) {
|
|
|
|
size_t dsize = m->dvsize += qsize;
|
|
|
|
m->dv = q;
|
|
|
|
set_size_and_pinuse_of_free_chunk(q, dsize);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (!is_inuse(oldfirst)) {
|
|
|
|
size_t nsize = chunksize(oldfirst);
|
|
|
|
unlink_chunk(m, oldfirst, nsize);
|
|
|
|
oldfirst = chunk_plus_offset(oldfirst, nsize);
|
|
|
|
qsize += nsize;
|
|
|
|
}
|
|
|
|
set_free_with_pinuse(q, qsize, oldfirst);
|
|
|
|
insert_chunk(m, q, qsize);
|
|
|
|
check_free_chunk(m, q);
|
|
|
|
}
|
|
|
|
|
|
|
|
check_malloced_chunk(m, chunk2mem(p), nb);
|
|
|
|
return chunk2mem(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add a segment to hold a new noncontiguous region */
|
|
|
|
static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
|
|
|
|
/* Determine locations and sizes of segment, fenceposts, old top */
|
|
|
|
char* old_top = (char*)m->top;
|
|
|
|
msegmentptr oldsp = segment_holding(m, old_top);
|
|
|
|
char* old_end = oldsp->base + oldsp->size;
|
|
|
|
size_t ssize = pad_request(sizeof(struct malloc_segment));
|
|
|
|
char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
|
|
|
size_t offset = align_offset(chunk2mem(rawsp));
|
|
|
|
char* asp = rawsp + offset;
|
|
|
|
char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
|
|
|
|
mchunkptr sp = (mchunkptr)csp;
|
|
|
|
msegmentptr ss = (msegmentptr)(chunk2mem(sp));
|
|
|
|
mchunkptr tnext = chunk_plus_offset(sp, ssize);
|
|
|
|
mchunkptr p = tnext;
|
|
|
|
int nfences = 0;
|
|
|
|
|
|
|
|
/* reset top to new space */
|
|
|
|
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
|
|
|
|
|
|
|
|
/* Set up segment record */
|
|
|
|
assert(is_aligned(ss));
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
|
|
|
|
*ss = m->seg; /* Push current record */
|
|
|
|
m->seg.base = tbase;
|
|
|
|
m->seg.size = tsize;
|
|
|
|
m->seg.sflags = mmapped;
|
|
|
|
m->seg.next = ss;
|
|
|
|
|
|
|
|
/* Insert trailing fenceposts */
|
|
|
|
for (;;) {
|
|
|
|
mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
|
|
|
|
p->head = FENCEPOST_HEAD;
|
|
|
|
++nfences;
|
|
|
|
if ((char*)(&(nextp->head)) < old_end)
|
|
|
|
p = nextp;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(nfences >= 2);
|
|
|
|
|
|
|
|
/* Insert the rest of old top into a bin as an ordinary free chunk */
|
|
|
|
if (csp != old_top) {
|
|
|
|
mchunkptr q = (mchunkptr)old_top;
|
|
|
|
size_t psize = csp - old_top;
|
|
|
|
mchunkptr tn = chunk_plus_offset(q, psize);
|
|
|
|
set_free_with_pinuse(q, psize, tn);
|
|
|
|
insert_chunk(m, q, psize);
|
|
|
|
}
|
|
|
|
|
|
|
|
check_top_chunk(m, m->top);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* -------------------------- System allocation -------------------------- */
|
|
|
|
|
|
|
|
/* Get memory from system using MORECORE or MMAP */
|
|
|
|
static void* sys_alloc(mstate m, size_t nb) {
|
|
|
|
char* tbase = CMFAIL;
|
|
|
|
size_t tsize = 0;
|
|
|
|
flag_t mmap_flag = 0;
|
|
|
|
size_t asize; /* allocation size */
|
|
|
|
|
|
|
|
ensure_initialization();
|
|
|
|
|
|
|
|
/* Directly map large chunks, but only if already initialized */
|
|
|
|
if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
|
|
|
|
void* mem = mmap_alloc(m, nb);
|
|
|
|
if (mem != 0)
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
asize = granularity_align(nb + SYS_ALLOC_PADDING);
|
|
|
|
if (asize <= nb)
|
|
|
|
return 0; /* wraparound */
|
|
|
|
if (m->footprint_limit != 0) {
|
|
|
|
size_t fp = m->footprint + asize;
|
|
|
|
if (fp <= m->footprint || fp > m->footprint_limit)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Try getting memory in any of three ways (in most-preferred to
|
|
|
|
least-preferred order):
|
|
|
|
1. A call to MORECORE that can normally contiguously extend memory.
|
|
|
|
(disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
|
|
|
|
or main space is mmapped or a previous contiguous call failed)
|
|
|
|
2. A call to MMAP new space (disabled if not HAVE_MMAP).
|
|
|
|
Note that under the default settings, if MORECORE is unable to
|
|
|
|
fulfill a request, and HAVE_MMAP is true, then mmap is
|
|
|
|
used as a noncontiguous system allocator. This is a useful backup
|
|
|
|
strategy for systems with holes in address spaces -- in this case
|
|
|
|
sbrk cannot contiguously expand the heap, but mmap may be able to
|
|
|
|
find space.
|
|
|
|
3. A call to MORECORE that cannot usually contiguously extend memory.
|
|
|
|
(disabled if not HAVE_MORECORE)
|
|
|
|
|
|
|
|
In all cases, we need to request enough bytes from system to ensure
|
|
|
|
we can malloc nb bytes upon success, so pad with enough space for
|
|
|
|
top_foot, plus alignment-pad to make sure we don't lose bytes if
|
|
|
|
not on boundary, and round this up to a granularity unit.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
|
|
|
|
char* br = CMFAIL;
|
|
|
|
size_t ssize = asize; /* sbrk call size */
|
|
|
|
msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
|
|
|
|
ACQUIRE_MALLOC_GLOBAL_LOCK();
|
|
|
|
|
|
|
|
if (ss == 0) { /* First time through or recovery */
|
|
|
|
char* base = (char*)CALL_MORECORE(0);
|
|
|
|
if (base != CMFAIL) {
|
|
|
|
size_t fp;
|
|
|
|
/* Adjust to end on a page boundary */
|
|
|
|
if (!is_page_aligned(base))
|
|
|
|
ssize += (page_align((size_t)base) - (size_t)base);
|
|
|
|
fp = m->footprint + ssize; /* recheck limits */
|
|
|
|
if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
|
|
|
|
(m->footprint_limit == 0 ||
|
|
|
|
(fp > m->footprint && fp <= m->footprint_limit)) &&
|
|
|
|
(br = (char*)(CALL_MORECORE(ssize))) == base) {
|
|
|
|
tbase = base;
|
|
|
|
tsize = ssize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Subtract out existing available top space from MORECORE request. */
|
|
|
|
ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
|
|
|
|
/* Use mem here only if it did continuously extend old space */
|
|
|
|
if (ssize < HALF_MAX_SIZE_T &&
|
|
|
|
(br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
|
|
|
|
tbase = br;
|
|
|
|
tsize = ssize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tbase == CMFAIL) { /* Cope with partial failure */
|
|
|
|
if (br != CMFAIL) { /* Try to use/extend the space we did get */
|
|
|
|
if (ssize < HALF_MAX_SIZE_T &&
|
|
|
|
ssize < nb + SYS_ALLOC_PADDING) {
|
|
|
|
size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
|
|
|
|
if (esize < HALF_MAX_SIZE_T) {
|
|
|
|
char* end = (char*)CALL_MORECORE(esize);
|
|
|
|
if (end != CMFAIL)
|
|
|
|
ssize += esize;
|
|
|
|
else { /* Can't use; try to release */
|
|
|
|
(void) CALL_MORECORE(-ssize);
|
|
|
|
br = CMFAIL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (br != CMFAIL) { /* Use the space we did get */
|
|
|
|
tbase = br;
|
|
|
|
tsize = ssize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
disable_contiguous(m); /* Don't try contiguous path in the future */
|
|
|
|
}
|
|
|
|
|
|
|
|
RELEASE_MALLOC_GLOBAL_LOCK();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
|
|
|
|
char* mp = (char*)(dlmalloc_requires_more_vespene_gas(asize));
|
|
|
|
if (mp != CMFAIL) {
|
|
|
|
tbase = mp;
|
|
|
|
tsize = asize;
|
|
|
|
mmap_flag = USE_MMAP_BIT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
|
|
|
|
if (asize < HALF_MAX_SIZE_T) {
|
|
|
|
char* br = CMFAIL;
|
|
|
|
char* end = CMFAIL;
|
|
|
|
ACQUIRE_MALLOC_GLOBAL_LOCK();
|
|
|
|
br = (char*)(CALL_MORECORE(asize));
|
|
|
|
end = (char*)(CALL_MORECORE(0));
|
|
|
|
RELEASE_MALLOC_GLOBAL_LOCK();
|
|
|
|
if (br != CMFAIL && end != CMFAIL && br < end) {
|
|
|
|
size_t ssize = end - br;
|
|
|
|
if (ssize > nb + TOP_FOOT_SIZE) {
|
|
|
|
tbase = br;
|
|
|
|
tsize = ssize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tbase != CMFAIL) {
|
|
|
|
|
|
|
|
if ((m->footprint += tsize) > m->max_footprint)
|
|
|
|
m->max_footprint = m->footprint;
|
|
|
|
|
|
|
|
if (!is_initialized(m)) { /* first-time initialization */
|
|
|
|
if (m->least_addr == 0 || tbase < m->least_addr)
|
|
|
|
m->least_addr = tbase;
|
|
|
|
m->seg.base = tbase;
|
|
|
|
m->seg.size = tsize;
|
|
|
|
m->seg.sflags = mmap_flag;
|
|
|
|
m->magic = mparams.magic;
|
|
|
|
m->release_checks = MAX_RELEASE_CHECK_RATE;
|
|
|
|
init_bins(m);
|
|
|
|
#if !ONLY_MSPACES
|
|
|
|
if (is_global(m))
|
|
|
|
init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
/* Offset top by embedded malloc_state */
|
|
|
|
mchunkptr mn = next_chunk(mem2chunk(m));
|
|
|
|
init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
else {
|
|
|
|
/* Try to merge with an existing segment */
|
|
|
|
msegmentptr sp = &m->seg;
|
|
|
|
/* Only consider most recent segment if traversal suppressed */
|
|
|
|
while (sp != 0 && tbase != sp->base + sp->size)
|
|
|
|
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
|
|
|
|
if (sp != 0 &&
|
|
|
|
!is_extern_segment(sp) &&
|
|
|
|
(sp->sflags & USE_MMAP_BIT) == mmap_flag &&
|
|
|
|
segment_holds(sp, m->top)) { /* append */
|
|
|
|
sp->size += tsize;
|
|
|
|
init_top(m, m->top, m->topsize + tsize);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (tbase < m->least_addr)
|
|
|
|
m->least_addr = tbase;
|
|
|
|
sp = &m->seg;
|
|
|
|
while (sp != 0 && sp->base != tbase + tsize)
|
|
|
|
sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
|
|
|
|
if (sp != 0 &&
|
|
|
|
!is_extern_segment(sp) &&
|
|
|
|
(sp->sflags & USE_MMAP_BIT) == mmap_flag) {
|
|
|
|
char* oldbase = sp->base;
|
|
|
|
sp->base = tbase;
|
|
|
|
sp->size += tsize;
|
|
|
|
return prepend_alloc(m, tbase, oldbase, nb);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
add_segment(m, tbase, tsize, mmap_flag);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nb < m->topsize) { /* Allocate from new or extended top space */
|
|
|
|
size_t rsize = m->topsize -= nb;
|
|
|
|
mchunkptr p = m->top;
|
|
|
|
mchunkptr r = m->top = chunk_plus_offset(p, nb);
|
|
|
|
r->head = rsize | PINUSE_BIT;
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(m, p, nb);
|
|
|
|
check_top_chunk(m, m->top);
|
|
|
|
check_malloced_chunk(m, chunk2mem(p), nb);
|
|
|
|
return chunk2mem(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MALLOC_FAILURE_ACTION;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------------- system deallocation -------------------------- */
|
|
|
|
|
|
|
|
/* Unmap and unlink any mmapped segments that don't contain used chunks */
|
|
|
|
static size_t release_unused_segments(mstate m) {
|
|
|
|
size_t released = 0;
|
|
|
|
int nsegs = 0;
|
|
|
|
msegmentptr pred = &m->seg;
|
|
|
|
msegmentptr sp = pred->next;
|
|
|
|
while (sp != 0) {
|
|
|
|
char* base = sp->base;
|
|
|
|
size_t size = sp->size;
|
|
|
|
msegmentptr next = sp->next;
|
|
|
|
++nsegs;
|
|
|
|
if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
|
|
|
|
mchunkptr p = align_as_chunk(base);
|
|
|
|
size_t psize = chunksize(p);
|
|
|
|
/* Can unmap if first chunk holds entire segment and not pinned */
|
|
|
|
if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
|
|
|
|
tchunkptr tp = (tchunkptr)p;
|
|
|
|
assert(segment_holds(sp, (char*)sp));
|
|
|
|
if (p == m->dv) {
|
|
|
|
m->dv = 0;
|
|
|
|
m->dvsize = 0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
unlink_large_chunk(m, tp);
|
|
|
|
}
|
|
|
|
if (CALL_MUNMAP(base, size) == 0) {
|
|
|
|
released += size;
|
|
|
|
m->footprint -= size;
|
|
|
|
/* unlink obsoleted record */
|
|
|
|
sp = pred;
|
|
|
|
sp->next = next;
|
|
|
|
}
|
|
|
|
else { /* back out if cannot unmap */
|
|
|
|
insert_large_chunk(m, tp, psize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
|
|
|
|
break;
|
|
|
|
pred = sp;
|
|
|
|
sp = next;
|
|
|
|
}
|
|
|
|
/* Reset check counter */
|
|
|
|
m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
|
|
|
|
(size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
|
|
|
|
return released;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sys_trim(mstate m, size_t pad) {
|
|
|
|
size_t released = 0;
|
|
|
|
ensure_initialization();
|
|
|
|
if (pad < MAX_REQUEST && is_initialized(m)) {
|
|
|
|
pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
|
|
|
|
|
|
|
|
if (m->topsize > pad) {
|
|
|
|
/* Shrink top space in granularity-size units, keeping at least one */
|
|
|
|
size_t unit = mparams.granularity;
|
|
|
|
size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
|
|
|
|
SIZE_T_ONE) * unit;
|
|
|
|
msegmentptr sp = segment_holding(m, (char*)m->top);
|
|
|
|
|
|
|
|
if (!is_extern_segment(sp)) {
|
|
|
|
if (is_mmapped_segment(sp)) {
|
|
|
|
if (HAVE_MMAP &&
|
|
|
|
sp->size >= extra &&
|
|
|
|
!has_segment_link(m, sp)) { /* can't shrink if pinned */
|
|
|
|
size_t newsize = sp->size - extra;
|
|
|
|
(void)newsize; /* placate people compiling -Wunused-variable */
|
|
|
|
/* Prefer mremap, fall back to munmap */
|
|
|
|
if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
|
|
|
|
(CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
|
|
|
|
released = extra;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (HAVE_MORECORE) {
|
|
|
|
if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
|
|
|
|
extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
|
|
|
|
ACQUIRE_MALLOC_GLOBAL_LOCK();
|
|
|
|
{
|
|
|
|
/* Make sure end of memory is where we last set it. */
|
|
|
|
char* old_br = (char*)(CALL_MORECORE(0));
|
|
|
|
if (old_br == sp->base + sp->size) {
|
|
|
|
char* rel_br = (char*)(CALL_MORECORE(-extra));
|
|
|
|
char* new_br = (char*)(CALL_MORECORE(0));
|
|
|
|
if (rel_br != CMFAIL && new_br < old_br)
|
|
|
|
released = old_br - new_br;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
RELEASE_MALLOC_GLOBAL_LOCK();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (released != 0) {
|
|
|
|
sp->size -= released;
|
|
|
|
m->footprint -= released;
|
|
|
|
init_top(m, m->top, m->topsize - released);
|
|
|
|
check_top_chunk(m, m->top);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unmap any unused mmapped segments */
|
|
|
|
if (HAVE_MMAP)
|
|
|
|
released += release_unused_segments(m);
|
|
|
|
|
|
|
|
/* On failure, disable autotrim to avoid repeated failed future calls */
|
|
|
|
if (released == 0 && m->topsize > m->trim_check)
|
|
|
|
m->trim_check = MAX_SIZE_T;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (released != 0)? 1 : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Consolidate and bin a chunk. Differs from exported versions
|
|
|
|
of free mainly in that the chunk need not be marked as inuse.
|
|
|
|
*/
|
|
|
|
static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
|
|
|
|
mchunkptr next = chunk_plus_offset(p, psize);
|
|
|
|
if (!pinuse(p)) {
|
|
|
|
mchunkptr prev;
|
|
|
|
size_t prevsize = p->prev_foot;
|
|
|
|
if (is_mmapped(p)) {
|
|
|
|
psize += prevsize + MMAP_FOOT_PAD;
|
|
|
|
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
|
|
|
|
m->footprint -= psize;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
prev = chunk_minus_offset(p, prevsize);
|
|
|
|
psize += prevsize;
|
|
|
|
p = prev;
|
|
|
|
if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
|
|
|
|
if (p != m->dv) {
|
|
|
|
unlink_chunk(m, p, prevsize);
|
|
|
|
}
|
|
|
|
else if ((next->head & INUSE_BITS) == INUSE_BITS) {
|
|
|
|
m->dvsize = psize;
|
|
|
|
set_free_with_pinuse(p, psize, next);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
CORRUPTION_ERROR_ACTION(m);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (RTCHECK(ok_address(m, next))) {
|
|
|
|
if (!cinuse(next)) { /* consolidate forward */
|
|
|
|
if (next == m->top) {
|
|
|
|
size_t tsize = m->topsize += psize;
|
|
|
|
m->top = p;
|
|
|
|
p->head = tsize | PINUSE_BIT;
|
|
|
|
if (p == m->dv) {
|
|
|
|
m->dv = 0;
|
|
|
|
m->dvsize = 0;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else if (next == m->dv) {
|
|
|
|
size_t dsize = m->dvsize += psize;
|
|
|
|
m->dv = p;
|
|
|
|
set_size_and_pinuse_of_free_chunk(p, dsize);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
size_t nsize = chunksize(next);
|
|
|
|
psize += nsize;
|
|
|
|
unlink_chunk(m, next, nsize);
|
|
|
|
set_size_and_pinuse_of_free_chunk(p, psize);
|
|
|
|
if (p == m->dv) {
|
|
|
|
m->dvsize = psize;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
set_free_with_pinuse(p, psize, next);
|
|
|
|
}
|
|
|
|
insert_chunk(m, p, psize);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
CORRUPTION_ERROR_ACTION(m);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---------------------------- malloc --------------------------- */
|
|
|
|
|
|
|
|
/* allocate a large request from the best fitting chunk in a treebin */
|
|
|
|
static void* tmalloc_large(mstate m, size_t nb) {
|
|
|
|
tchunkptr v = 0;
|
|
|
|
size_t rsize = -nb; /* Unsigned negation */
|
|
|
|
tchunkptr t;
|
|
|
|
bindex_t idx;
|
|
|
|
compute_tree_index(nb, idx);
|
|
|
|
if ((t = *treebin_at(m, idx)) != 0) {
|
|
|
|
/* Traverse tree for this bin looking for node with size == nb */
|
|
|
|
size_t sizebits = nb << leftshift_for_tree_index(idx);
|
|
|
|
tchunkptr rst = 0; /* The deepest untaken right subtree */
|
|
|
|
for (;;) {
|
|
|
|
tchunkptr rt;
|
|
|
|
size_t trem = chunksize(t) - nb;
|
|
|
|
if (trem < rsize) {
|
|
|
|
v = t;
|
|
|
|
if ((rsize = trem) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rt = t->child[1];
|
|
|
|
t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
|
|
|
|
if (rt != 0 && rt != t)
|
|
|
|
rst = rt;
|
|
|
|
if (t == 0) {
|
|
|
|
t = rst; /* set t to least subtree holding sizes > nb */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sizebits <<= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
|
|
|
|
binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
|
|
|
|
if (leftbits != 0) {
|
|
|
|
bindex_t i;
|
|
|
|
binmap_t leastbit = least_bit(leftbits);
|
|
|
|
compute_bit2idx(leastbit, i);
|
|
|
|
t = *treebin_at(m, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (t != 0) { /* find smallest of tree or subtree */
|
|
|
|
size_t trem = chunksize(t) - nb;
|
|
|
|
if (trem < rsize) {
|
|
|
|
rsize = trem;
|
|
|
|
v = t;
|
|
|
|
}
|
|
|
|
t = leftmost_child(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If dv is a better fit, return 0 so malloc will use it */
|
|
|
|
if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
|
|
|
|
if (RTCHECK(ok_address(m, v))) { /* split */
|
|
|
|
mchunkptr r = chunk_plus_offset(v, nb);
|
|
|
|
assert(chunksize(v) == rsize + nb);
|
|
|
|
if (RTCHECK(ok_next(v, r))) {
|
|
|
|
unlink_large_chunk(m, v);
|
|
|
|
if (rsize < MIN_CHUNK_SIZE)
|
|
|
|
set_inuse_and_pinuse(m, v, (rsize + nb));
|
|
|
|
else {
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(m, v, nb);
|
|
|
|
set_size_and_pinuse_of_free_chunk(r, rsize);
|
|
|
|
insert_chunk(m, r, rsize);
|
|
|
|
}
|
|
|
|
return chunk2mem(v);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CORRUPTION_ERROR_ACTION(m);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* allocate a small request from the best fitting chunk in a treebin */
|
|
|
|
static void* tmalloc_small(mstate m, size_t nb) {
|
|
|
|
tchunkptr t, v;
|
|
|
|
size_t rsize;
|
|
|
|
bindex_t i;
|
|
|
|
binmap_t leastbit = least_bit(m->treemap);
|
|
|
|
compute_bit2idx(leastbit, i);
|
|
|
|
v = t = *treebin_at(m, i);
|
|
|
|
rsize = chunksize(t) - nb;
|
|
|
|
|
|
|
|
while ((t = leftmost_child(t)) != 0) {
|
|
|
|
size_t trem = chunksize(t) - nb;
|
|
|
|
if (trem < rsize) {
|
|
|
|
rsize = trem;
|
|
|
|
v = t;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RTCHECK(ok_address(m, v))) {
|
|
|
|
mchunkptr r = chunk_plus_offset(v, nb);
|
|
|
|
assert(chunksize(v) == rsize + nb);
|
|
|
|
if (RTCHECK(ok_next(v, r))) {
|
|
|
|
unlink_large_chunk(m, v);
|
|
|
|
if (rsize < MIN_CHUNK_SIZE)
|
|
|
|
set_inuse_and_pinuse(m, v, (rsize + nb));
|
|
|
|
else {
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(m, v, nb);
|
|
|
|
set_size_and_pinuse_of_free_chunk(r, rsize);
|
|
|
|
replace_dv(m, r, rsize);
|
|
|
|
}
|
|
|
|
return chunk2mem(v);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
CORRUPTION_ERROR_ACTION(m);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if !ONLY_MSPACES
|
|
|
|
|
|
|
|
void* dlmalloc(size_t bytes) {
|
|
|
|
/*
|
|
|
|
Basic algorithm:
|
|
|
|
If a small request (< 256 bytes minus per-chunk overhead):
|
|
|
|
1. If one exists, use a remainderless chunk in associated smallbin.
|
|
|
|
(Remainderless means that there are too few excess bytes to
|
|
|
|
represent as a chunk.)
|
|
|
|
2. If it is big enough, use the dv chunk, which is normally the
|
|
|
|
chunk adjacent to the one used for the most recent small request.
|
|
|
|
3. If one exists, split the smallest available chunk in a bin,
|
|
|
|
saving remainder in dv.
|
|
|
|
4. If it is big enough, use the top chunk.
|
|
|
|
5. If available, get memory from system and use it
|
|
|
|
Otherwise, for a large request:
|
|
|
|
1. Find the smallest available binned chunk that fits, and use it
|
|
|
|
if it is better fitting than dv chunk, splitting if necessary.
|
|
|
|
2. If better fitting than any binned chunk, use the dv chunk.
|
|
|
|
3. If it is big enough, use the top chunk.
|
|
|
|
4. If request size >= mmap threshold, try to directly mmap this chunk.
|
|
|
|
5. If available, get memory from system and use it
|
|
|
|
|
|
|
|
The ugly goto's here ensure that postaction occurs along all paths.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if USE_LOCKS
|
|
|
|
ensure_initialization(); /* initialize in sys_alloc if not using locks */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!PREACTION(gm)) {
|
|
|
|
void* mem;
|
|
|
|
size_t nb;
|
|
|
|
if (bytes <= MAX_SMALL_REQUEST) {
|
|
|
|
bindex_t idx;
|
|
|
|
binmap_t smallbits;
|
|
|
|
nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
|
|
|
|
idx = small_index(nb);
|
|
|
|
smallbits = gm->smallmap >> idx;
|
|
|
|
|
|
|
|
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
|
|
|
|
mchunkptr b, p;
|
|
|
|
idx += ~smallbits & 1; /* Uses next bin if idx empty */
|
|
|
|
b = smallbin_at(gm, idx);
|
|
|
|
p = b->fd;
|
|
|
|
assert(chunksize(p) == small_index2size(idx));
|
|
|
|
unlink_first_small_chunk(gm, b, p, idx);
|
|
|
|
set_inuse_and_pinuse(gm, p, small_index2size(idx));
|
|
|
|
mem = chunk2mem(p);
|
|
|
|
check_malloced_chunk(gm, mem, nb);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (nb > gm->dvsize) {
|
|
|
|
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
|
|
|
|
mchunkptr b, p, r;
|
|
|
|
size_t rsize;
|
|
|
|
bindex_t i;
|
|
|
|
binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
|
|
|
|
binmap_t leastbit = least_bit(leftbits);
|
|
|
|
compute_bit2idx(leastbit, i);
|
|
|
|
b = smallbin_at(gm, i);
|
|
|
|
p = b->fd;
|
|
|
|
assert(chunksize(p) == small_index2size(i));
|
|
|
|
unlink_first_small_chunk(gm, b, p, i);
|
|
|
|
rsize = small_index2size(i) - nb;
|
|
|
|
/* Fit here cannot be remainderless if 4byte sizes */
|
|
|
|
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
|
|
|
|
set_inuse_and_pinuse(gm, p, small_index2size(i));
|
|
|
|
else {
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
|
|
|
|
r = chunk_plus_offset(p, nb);
|
|
|
|
set_size_and_pinuse_of_free_chunk(r, rsize);
|
|
|
|
replace_dv(gm, r, rsize);
|
|
|
|
}
|
|
|
|
mem = chunk2mem(p);
|
|
|
|
check_malloced_chunk(gm, mem, nb);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
|
|
|
|
check_malloced_chunk(gm, mem, nb);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (bytes >= MAX_REQUEST)
|
|
|
|
nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
|
|
|
|
else {
|
|
|
|
nb = pad_request(bytes);
|
|
|
|
if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
|
|
|
|
check_malloced_chunk(gm, mem, nb);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nb <= gm->dvsize) {
|
|
|
|
size_t rsize = gm->dvsize - nb;
|
|
|
|
mchunkptr p = gm->dv;
|
|
|
|
if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
|
|
|
|
mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
|
|
|
|
gm->dvsize = rsize;
|
|
|
|
set_size_and_pinuse_of_free_chunk(r, rsize);
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
|
|
|
|
}
|
|
|
|
else { /* exhaust dv */
|
|
|
|
size_t dvs = gm->dvsize;
|
|
|
|
gm->dvsize = 0;
|
|
|
|
gm->dv = 0;
|
|
|
|
set_inuse_and_pinuse(gm, p, dvs);
|
|
|
|
}
|
|
|
|
mem = chunk2mem(p);
|
|
|
|
check_malloced_chunk(gm, mem, nb);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (nb < gm->topsize) { /* Split top */
|
|
|
|
size_t rsize = gm->topsize -= nb;
|
|
|
|
mchunkptr p = gm->top;
|
|
|
|
mchunkptr r = gm->top = chunk_plus_offset(p, nb);
|
|
|
|
r->head = rsize | PINUSE_BIT;
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
|
|
|
|
mem = chunk2mem(p);
|
|
|
|
check_top_chunk(gm, gm->top);
|
|
|
|
check_malloced_chunk(gm, mem, nb);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
mem = sys_alloc(gm, nb);
|
|
|
|
POSTACTION(gm);
|
|
|
|
if (mem == MAP_FAILED && weaken(__oom_hook)) {
|
|
|
|
weaken(__oom_hook)(bytes);
|
|
|
|
}
|
|
|
|
return mem;
|
|
|
|
|
|
|
|
postaction:
|
|
|
|
POSTACTION(gm);
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---------------------------- free --------------------------- */
|
|
|
|
|
|
|
|
void dlfree(void* mem) {
|
|
|
|
/*
|
|
|
|
Consolidate freed chunks with preceeding or succeeding bordering
|
|
|
|
free chunks, if they exist, and then place in a bin. Intermixed
|
|
|
|
with special cases for top, dv, mmapped chunks, and usage errors.
|
|
|
|
*/
|
2020-08-25 11:23:25 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
if (mem != 0) {
|
|
|
|
mchunkptr p = mem2chunk(mem);
|
|
|
|
#if FOOTERS
|
|
|
|
mstate fm = get_mstate_for(p);
|
|
|
|
if (!ok_magic(fm)) {
|
|
|
|
USAGE_ERROR_ACTION(fm, p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#else /* FOOTERS */
|
|
|
|
#define fm gm
|
|
|
|
#endif /* FOOTERS */
|
|
|
|
if (!PREACTION(fm)) {
|
|
|
|
check_inuse_chunk(fm, p);
|
|
|
|
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
|
|
|
|
size_t psize = chunksize(p);
|
|
|
|
mchunkptr next = chunk_plus_offset(p, psize);
|
|
|
|
if (!pinuse(p)) {
|
|
|
|
size_t prevsize = p->prev_foot;
|
|
|
|
if (is_mmapped(p)) {
|
|
|
|
psize += prevsize + MMAP_FOOT_PAD;
|
|
|
|
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
|
|
|
|
fm->footprint -= psize;
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
mchunkptr prev = chunk_minus_offset(p, prevsize);
|
|
|
|
psize += prevsize;
|
|
|
|
p = prev;
|
|
|
|
if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
|
|
|
|
if (p != fm->dv) {
|
|
|
|
unlink_chunk(fm, p, prevsize);
|
|
|
|
}
|
|
|
|
else if ((next->head & INUSE_BITS) == INUSE_BITS) {
|
|
|
|
fm->dvsize = psize;
|
|
|
|
set_free_with_pinuse(p, psize, next);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
goto erroraction;
|
|
|
|
}
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
|
|
|
|
if (!cinuse(next)) { /* consolidate forward */
|
|
|
|
if (next == fm->top) {
|
|
|
|
size_t tsize = fm->topsize += psize;
|
|
|
|
fm->top = p;
|
|
|
|
p->head = tsize | PINUSE_BIT;
|
|
|
|
if (p == fm->dv) {
|
|
|
|
fm->dv = 0;
|
|
|
|
fm->dvsize = 0;
|
|
|
|
}
|
|
|
|
if (should_trim(fm, tsize))
|
|
|
|
sys_trim(fm, 0);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
else if (next == fm->dv) {
|
|
|
|
size_t dsize = fm->dvsize += psize;
|
|
|
|
fm->dv = p;
|
|
|
|
set_size_and_pinuse_of_free_chunk(p, dsize);
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
size_t nsize = chunksize(next);
|
|
|
|
psize += nsize;
|
|
|
|
unlink_chunk(fm, next, nsize);
|
|
|
|
set_size_and_pinuse_of_free_chunk(p, psize);
|
|
|
|
if (p == fm->dv) {
|
|
|
|
fm->dvsize = psize;
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
set_free_with_pinuse(p, psize, next);
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
if (is_small(psize)) {
|
|
|
|
insert_small_chunk(fm, p, psize);
|
|
|
|
check_free_chunk(fm, p);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
tchunkptr tp = (tchunkptr)p;
|
|
|
|
insert_large_chunk(fm, tp, psize);
|
|
|
|
check_free_chunk(fm, p);
|
|
|
|
if (--fm->release_checks == 0)
|
|
|
|
release_unused_segments(fm);
|
|
|
|
}
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
erroraction:
|
|
|
|
USAGE_ERROR_ACTION(fm, p);
|
|
|
|
postaction:
|
|
|
|
POSTACTION(fm);
|
2021-04-18 18:34:59 +00:00
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
#if !FOOTERS
|
|
|
|
#undef fm
|
|
|
|
#endif /* FOOTERS */
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
void* dlcalloc(size_t n_elements, size_t elem_size) {
|
|
|
|
void* mem;
|
|
|
|
size_t req = 0;
|
|
|
|
if (n_elements != 0) {
|
|
|
|
req = n_elements * elem_size;
|
|
|
|
if (((n_elements | elem_size) & ~(size_t)0xffff) &&
|
|
|
|
(req / n_elements != elem_size))
|
|
|
|
req = MAX_SIZE_T; /* force downstream failure on overflow */
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
mem = dlmalloc(req);
|
|
|
|
if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
|
|
|
|
bzero(mem, req);
|
|
|
|
return mem;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
#endif /* !ONLY_MSPACES */
|
|
|
|
|
|
|
|
/* ------------ Internal support for realloc, memalign, etc -------------- */
|
|
|
|
|
|
|
|
/* Try to realloc; only in-place unless can_move true */
|
|
|
|
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
|
|
|
|
int can_move) {
|
|
|
|
mchunkptr newp = 0;
|
|
|
|
size_t oldsize = chunksize(p);
|
|
|
|
mchunkptr next = chunk_plus_offset(p, oldsize);
|
|
|
|
if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
|
|
|
|
ok_next(p, next) && ok_pinuse(next))) {
|
|
|
|
if (is_mmapped(p)) {
|
|
|
|
newp = mmap_resize(m, p, nb, can_move);
|
|
|
|
}
|
|
|
|
else if (oldsize >= nb) { /* already big enough */
|
|
|
|
size_t rsize = oldsize - nb;
|
|
|
|
if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
|
|
|
|
mchunkptr r = chunk_plus_offset(p, nb);
|
|
|
|
set_inuse(m, p, nb);
|
|
|
|
set_inuse(m, r, rsize);
|
|
|
|
dispose_chunk(m, r, rsize);
|
|
|
|
}
|
|
|
|
newp = p;
|
|
|
|
}
|
|
|
|
else if (next == m->top) { /* extend into top */
|
|
|
|
if (oldsize + m->topsize > nb) {
|
|
|
|
size_t newsize = oldsize + m->topsize;
|
|
|
|
size_t newtopsize = newsize - nb;
|
|
|
|
mchunkptr newtop = chunk_plus_offset(p, nb);
|
|
|
|
set_inuse(m, p, nb);
|
|
|
|
newtop->head = newtopsize |PINUSE_BIT;
|
|
|
|
m->top = newtop;
|
|
|
|
m->topsize = newtopsize;
|
|
|
|
newp = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (next == m->dv) { /* extend into dv */
|
|
|
|
size_t dvs = m->dvsize;
|
|
|
|
if (oldsize + dvs >= nb) {
|
|
|
|
size_t dsize = oldsize + dvs - nb;
|
|
|
|
if (dsize >= MIN_CHUNK_SIZE) {
|
|
|
|
mchunkptr r = chunk_plus_offset(p, nb);
|
|
|
|
mchunkptr n = chunk_plus_offset(r, dsize);
|
|
|
|
set_inuse(m, p, nb);
|
|
|
|
set_size_and_pinuse_of_free_chunk(r, dsize);
|
|
|
|
clear_pinuse(n);
|
|
|
|
m->dvsize = dsize;
|
|
|
|
m->dv = r;
|
|
|
|
}
|
|
|
|
else { /* exhaust dv */
|
|
|
|
size_t newsize = oldsize + dvs;
|
|
|
|
set_inuse(m, p, newsize);
|
|
|
|
m->dvsize = 0;
|
|
|
|
m->dv = 0;
|
|
|
|
}
|
|
|
|
newp = p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!cinuse(next)) { /* extend into next free chunk */
|
|
|
|
size_t nextsize = chunksize(next);
|
|
|
|
if (oldsize + nextsize >= nb) {
|
|
|
|
size_t rsize = oldsize + nextsize - nb;
|
|
|
|
unlink_chunk(m, next, nextsize);
|
|
|
|
if (rsize < MIN_CHUNK_SIZE) {
|
|
|
|
size_t newsize = oldsize + nextsize;
|
|
|
|
set_inuse(m, p, newsize);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
mchunkptr r = chunk_plus_offset(p, nb);
|
|
|
|
set_inuse(m, p, nb);
|
|
|
|
set_inuse(m, r, rsize);
|
|
|
|
dispose_chunk(m, r, rsize);
|
|
|
|
}
|
|
|
|
newp = p;
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(m, chunk2mem(p));
|
|
|
|
}
|
|
|
|
return newp;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
|
|
|
|
void* mem = 0;
|
|
|
|
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
|
|
|
|
alignment = MIN_CHUNK_SIZE;
|
|
|
|
if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
|
|
|
|
size_t a = MALLOC_ALIGNMENT << 1;
|
|
|
|
while (a < alignment) a <<= 1;
|
|
|
|
alignment = a;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
if (bytes >= MAX_REQUEST - alignment) {
|
|
|
|
if (m != 0) { /* Test isn't needed but avoids compiler warning */
|
|
|
|
MALLOC_FAILURE_ACTION;
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
else {
|
|
|
|
size_t nb = request2size(bytes);
|
|
|
|
size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
|
|
|
|
mem = internal_malloc(m, req);
|
|
|
|
if (mem != 0) {
|
|
|
|
mchunkptr p = mem2chunk(mem);
|
|
|
|
if (PREACTION(m))
|
|
|
|
return 0;
|
|
|
|
if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
|
|
|
|
/*
|
|
|
|
Find an aligned spot inside chunk. Since we need to give
|
|
|
|
back leading space in a chunk of at least MIN_CHUNK_SIZE, if
|
|
|
|
the first calculation places us at a spot with less than
|
|
|
|
MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
|
|
|
|
We've allocated enough total room so that this is always
|
|
|
|
possible.
|
|
|
|
*/
|
|
|
|
char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
|
|
|
|
SIZE_T_ONE)) &
|
|
|
|
-alignment));
|
|
|
|
char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
|
|
|
|
br : br+alignment;
|
|
|
|
mchunkptr newp = (mchunkptr)pos;
|
|
|
|
size_t leadsize = pos - (char*)(p);
|
|
|
|
size_t newsize = chunksize(p) - leadsize;
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
|
|
|
|
newp->prev_foot = p->prev_foot + leadsize;
|
|
|
|
newp->head = newsize;
|
|
|
|
}
|
|
|
|
else { /* Otherwise, give back leader, use the rest */
|
|
|
|
set_inuse(m, newp, newsize);
|
|
|
|
set_inuse(m, p, leadsize);
|
|
|
|
dispose_chunk(m, p, leadsize);
|
|
|
|
}
|
|
|
|
p = newp;
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
/* Give back spare room at the end */
|
|
|
|
if (!is_mmapped(p)) {
|
|
|
|
size_t size = chunksize(p);
|
|
|
|
if (size > nb + MIN_CHUNK_SIZE) {
|
|
|
|
size_t remainder_size = size - nb;
|
|
|
|
mchunkptr remainder = chunk_plus_offset(p, nb);
|
|
|
|
set_inuse(m, p, nb);
|
|
|
|
set_inuse(m, remainder, remainder_size);
|
|
|
|
dispose_chunk(m, remainder, remainder_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mem = chunk2mem(p);
|
|
|
|
assert (chunksize(p) >= nb);
|
|
|
|
assert(((size_t)mem & (alignment - 1)) == 0);
|
|
|
|
check_inuse_chunk(m, p);
|
|
|
|
POSTACTION(m);
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return mem;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-04-15 06:39:48 +00:00
|
|
|
Common support for independent_X routines, handling
|
|
|
|
all of the combinations that can result.
|
|
|
|
The opts arg has:
|
|
|
|
bit 0 set if all elements are same size (using sizes[0])
|
|
|
|
bit 1 set if elements should be zeroed
|
2020-06-15 14:18:57 +00:00
|
|
|
*/
|
2022-04-15 06:39:48 +00:00
|
|
|
static void** ialloc(mstate m,
|
|
|
|
size_t n_elements,
|
|
|
|
size_t* sizes,
|
|
|
|
int opts,
|
|
|
|
void* chunks[]) {
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
size_t element_size; /* chunksize of each element, if all same */
|
|
|
|
size_t contents_size; /* total size of elements */
|
|
|
|
size_t array_size; /* request size of pointer array */
|
|
|
|
void* mem; /* malloced aggregate space */
|
|
|
|
mchunkptr p; /* corresponding chunk */
|
|
|
|
size_t remainder_size; /* remaining bytes while splitting */
|
|
|
|
void** marray; /* either "chunks" or malloced ptr array */
|
|
|
|
mchunkptr array_chunk; /* chunk for malloced ptr array */
|
|
|
|
flag_t was_enabled; /* to disable mmap */
|
|
|
|
size_t size;
|
|
|
|
size_t i;
|
2020-06-15 14:18:57 +00:00
|
|
|
|
|
|
|
ensure_initialization();
|
2022-04-15 06:39:48 +00:00
|
|
|
/* compute array length, if needed */
|
|
|
|
if (chunks != 0) {
|
|
|
|
if (n_elements == 0)
|
|
|
|
return chunks; /* nothing to do */
|
|
|
|
marray = chunks;
|
|
|
|
array_size = 0;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* if empty req, must still return chunk representing empty array */
|
|
|
|
if (n_elements == 0)
|
|
|
|
return (void**)internal_malloc(m, 0);
|
|
|
|
marray = 0;
|
|
|
|
array_size = request2size(n_elements * (sizeof(void*)));
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
/* compute total element size */
|
|
|
|
if (opts & 0x1) { /* all-same-size */
|
|
|
|
element_size = request2size(*sizes);
|
|
|
|
contents_size = n_elements * element_size;
|
|
|
|
}
|
|
|
|
else { /* add up all the sizes */
|
|
|
|
element_size = 0;
|
|
|
|
contents_size = 0;
|
|
|
|
for (i = 0; i != n_elements; ++i)
|
|
|
|
contents_size += request2size(sizes[i]);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
size = contents_size + array_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
Allocate the aggregate chunk. First disable direct-mmapping so
|
|
|
|
malloc won't use it, since we would not be able to later
|
|
|
|
free/realloc space internal to a segregated mmap region.
|
|
|
|
*/
|
|
|
|
was_enabled = use_mmap(m);
|
|
|
|
disable_mmap(m);
|
|
|
|
mem = internal_malloc(m, size - CHUNK_OVERHEAD);
|
|
|
|
if (was_enabled)
|
|
|
|
enable_mmap(m);
|
|
|
|
if (mem == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (PREACTION(m)) return 0;
|
|
|
|
p = mem2chunk(mem);
|
|
|
|
remainder_size = chunksize(p);
|
|
|
|
|
|
|
|
assert(!is_mmapped(p));
|
|
|
|
|
|
|
|
if (opts & 0x2) { /* optionally clear the elements */
|
|
|
|
bzero((size_t*)mem, remainder_size - SIZE_T_SIZE - array_size);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
/* If not provided, allocate the pointer array as final part of chunk */
|
|
|
|
if (marray == 0) {
|
|
|
|
size_t array_chunk_size;
|
|
|
|
array_chunk = chunk_plus_offset(p, contents_size);
|
|
|
|
array_chunk_size = remainder_size - contents_size;
|
|
|
|
marray = (void**) (chunk2mem(array_chunk));
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
|
|
|
|
remainder_size = contents_size;
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
/* split out elements */
|
|
|
|
for (i = 0; ; ++i) {
|
|
|
|
marray[i] = chunk2mem(p);
|
|
|
|
if (i != n_elements-1) {
|
|
|
|
if (element_size != 0)
|
|
|
|
size = element_size;
|
|
|
|
else
|
|
|
|
size = request2size(sizes[i]);
|
|
|
|
remainder_size -= size;
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(m, p, size);
|
|
|
|
p = chunk_plus_offset(p, size);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
else { /* the final element absorbs any overallocation slop */
|
|
|
|
set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
|
|
|
|
break;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
#if DEBUG
|
|
|
|
if (marray != chunks) {
|
|
|
|
/* final element must have exactly exhausted chunk */
|
|
|
|
if (element_size != 0) {
|
|
|
|
assert(remainder_size == element_size);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
assert(remainder_size == request2size(sizes[i]));
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
check_inuse_chunk(m, mem2chunk(marray));
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
for (i = 0; i != n_elements; ++i)
|
|
|
|
check_inuse_chunk(m, mem2chunk(marray[i]));
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
#endif /* DEBUG */
|
|
|
|
|
|
|
|
POSTACTION(m);
|
|
|
|
return marray;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
/* Try to free all pointers in the given array.
|
|
|
|
Note: this could be made faster, by delaying consolidation,
|
|
|
|
at the price of disabling some user integrity checks, We
|
|
|
|
still optimize some consolidations by combining adjacent
|
|
|
|
chunks before freeing, which will occur often if allocated
|
|
|
|
with ialloc or the array is sorted.
|
|
|
|
*/
|
|
|
|
static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
|
|
|
|
size_t unfreed = 0;
|
|
|
|
if (!PREACTION(m)) {
|
|
|
|
void** a;
|
|
|
|
void** fence = &(array[nelem]);
|
|
|
|
for (a = array; a != fence; ++a) {
|
|
|
|
void* mem = *a;
|
|
|
|
if (mem != 0) {
|
|
|
|
mchunkptr p = mem2chunk(mem);
|
|
|
|
size_t psize = chunksize(p);
|
|
|
|
#if FOOTERS
|
|
|
|
if (get_mstate_for(p) != m) {
|
|
|
|
++unfreed;
|
|
|
|
continue;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
#endif
|
|
|
|
check_inuse_chunk(m, p);
|
|
|
|
*a = 0;
|
|
|
|
if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
|
|
|
|
void ** b = a + 1; /* try to merge with next chunk */
|
|
|
|
mchunkptr next = next_chunk(p);
|
|
|
|
if (b != fence && *b == chunk2mem(next)) {
|
|
|
|
size_t newsize = chunksize(next) + psize;
|
|
|
|
set_inuse(m, p, newsize);
|
|
|
|
*b = chunk2mem(p);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
dispose_chunk(m, p, psize);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
CORRUPTION_ERROR_ACTION(m);
|
|
|
|
break;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
if (should_trim(m, m->topsize))
|
|
|
|
sys_trim(m, 0);
|
|
|
|
POSTACTION(m);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return unfreed;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
/* Traversal */
|
|
|
|
#if MALLOC_INSPECT_ALL
|
|
|
|
static void internal_inspect_all(mstate m,
|
|
|
|
void(*handler)(void *start,
|
|
|
|
void *end,
|
|
|
|
size_t used_bytes,
|
|
|
|
void* callback_arg),
|
|
|
|
void* arg) {
|
|
|
|
if (is_initialized(m)) {
|
|
|
|
mchunkptr top = m->top;
|
|
|
|
msegmentptr s;
|
|
|
|
for (s = &m->seg; s != 0; s = s->next) {
|
|
|
|
mchunkptr q = align_as_chunk(s->base);
|
|
|
|
while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
|
|
|
|
mchunkptr next = next_chunk(q);
|
|
|
|
size_t sz = chunksize(q);
|
|
|
|
size_t used;
|
|
|
|
void* start;
|
|
|
|
if (is_inuse(q)) {
|
|
|
|
used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
|
|
|
|
start = chunk2mem(q);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
used = 0;
|
|
|
|
if (is_small(sz)) { /* offset by possible bookkeeping */
|
|
|
|
start = (void*)((char*)q + sizeof(struct malloc_chunk));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
if (start < (void*)next) /* skip if all space is bookkeeping */
|
|
|
|
handler(start, next, used, arg);
|
|
|
|
if (q == top)
|
|
|
|
break;
|
|
|
|
q = next;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
#endif /* MALLOC_INSPECT_ALL */
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
/* ------------------ Exported realloc, memalign, etc -------------------- */
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
#if !ONLY_MSPACES
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
void* dlrealloc(void* oldmem, size_t bytes) {
|
|
|
|
void* mem = 0;
|
|
|
|
if (oldmem == 0) {
|
|
|
|
mem = dlmalloc(bytes);
|
|
|
|
}
|
|
|
|
else if (bytes >= MAX_REQUEST) {
|
|
|
|
MALLOC_FAILURE_ACTION;
|
|
|
|
}
|
|
|
|
#ifdef REALLOC_ZERO_BYTES_FREES
|
|
|
|
else if (bytes == 0) {
|
|
|
|
dlfree(oldmem);
|
|
|
|
}
|
|
|
|
#endif /* REALLOC_ZERO_BYTES_FREES */
|
|
|
|
else {
|
|
|
|
size_t nb = request2size(bytes);
|
|
|
|
mchunkptr oldp = mem2chunk(oldmem);
|
|
|
|
#if ! FOOTERS
|
|
|
|
mstate m = gm;
|
|
|
|
#else /* FOOTERS */
|
|
|
|
mstate m = get_mstate_for(oldp);
|
|
|
|
if (!ok_magic(m)) {
|
|
|
|
USAGE_ERROR_ACTION(m, oldmem);
|
|
|
|
return 0;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
#endif /* FOOTERS */
|
|
|
|
if (!PREACTION(m)) {
|
|
|
|
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
|
|
|
|
POSTACTION(m);
|
|
|
|
if (newp != 0) {
|
|
|
|
check_inuse_chunk(m, newp);
|
|
|
|
mem = chunk2mem(newp);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
mem = internal_malloc(m, bytes);
|
|
|
|
if (mem != 0) {
|
|
|
|
size_t oc = chunksize(oldp) - overhead_for(oldp);
|
|
|
|
memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
|
|
|
|
internal_free(m, oldmem);
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* dlrealloc_in_place(void* oldmem, size_t bytes) {
|
|
|
|
void* mem = 0;
|
|
|
|
if (oldmem != 0) {
|
|
|
|
if (bytes >= MAX_REQUEST) {
|
|
|
|
MALLOC_FAILURE_ACTION;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
size_t nb = request2size(bytes);
|
|
|
|
mchunkptr oldp = mem2chunk(oldmem);
|
|
|
|
#if ! FOOTERS
|
|
|
|
mstate m = gm;
|
|
|
|
#else /* FOOTERS */
|
|
|
|
mstate m = get_mstate_for(oldp);
|
|
|
|
if (!ok_magic(m)) {
|
|
|
|
USAGE_ERROR_ACTION(m, oldmem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* FOOTERS */
|
|
|
|
if (!PREACTION(m)) {
|
|
|
|
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
|
|
|
|
POSTACTION(m);
|
|
|
|
if (newp == oldp) {
|
|
|
|
check_inuse_chunk(m, newp);
|
|
|
|
mem = oldmem;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return mem;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
void* dlmemalign(size_t alignment, size_t bytes) {
|
|
|
|
if (alignment <= MALLOC_ALIGNMENT) {
|
|
|
|
return dlmalloc(bytes);
|
|
|
|
}
|
|
|
|
return internal_memalign(gm, alignment, bytes);
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
|
|
|
|
void* mem = 0;
|
|
|
|
if (alignment == MALLOC_ALIGNMENT)
|
|
|
|
mem = dlmalloc(bytes);
|
|
|
|
else {
|
|
|
|
size_t d = alignment / sizeof(void*);
|
|
|
|
size_t r = alignment % sizeof(void*);
|
|
|
|
if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
|
|
|
|
return EINVAL;
|
|
|
|
else if (bytes <= MAX_REQUEST - alignment) {
|
|
|
|
if (alignment < MIN_CHUNK_SIZE)
|
|
|
|
alignment = MIN_CHUNK_SIZE;
|
|
|
|
mem = internal_memalign(gm, alignment, bytes);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
if (!mem) {
|
|
|
|
return ENOMEM;
|
|
|
|
} else {
|
|
|
|
*pp = mem;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void* dlvalloc(size_t bytes) {
|
|
|
|
size_t pagesz;
|
|
|
|
ensure_initialization();
|
|
|
|
pagesz = mparams.page_size;
|
|
|
|
return dlmemalign(pagesz, bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
void* dlpvalloc(size_t bytes) {
|
|
|
|
size_t pagesz;
|
|
|
|
ensure_initialization();
|
|
|
|
pagesz = mparams.page_size;
|
|
|
|
return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
|
|
|
|
}
|
|
|
|
|
|
|
|
void** dlindependent_calloc(size_t n_elements, size_t elem_size,
|
|
|
|
void* chunks[]) {
|
|
|
|
size_t sz = elem_size; /* serves as 1-element array */
|
|
|
|
return ialloc(gm, n_elements, &sz, 3, chunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
|
|
|
|
void* chunks[]) {
|
|
|
|
return ialloc(gm, n_elements, sizes, 0, chunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t dlbulk_free(void* array[], size_t nelem) {
|
|
|
|
return internal_bulk_free(gm, array, nelem);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if MALLOC_INSPECT_ALL
|
|
|
|
void dlmalloc_inspect_all(void(*handler)(void *start,
|
|
|
|
void *end,
|
|
|
|
size_t used_bytes,
|
|
|
|
void* callback_arg),
|
|
|
|
void* arg) {
|
|
|
|
ensure_initialization();
|
|
|
|
if (!PREACTION(gm)) {
|
|
|
|
internal_inspect_all(gm, handler, arg);
|
|
|
|
POSTACTION(gm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* MALLOC_INSPECT_ALL */
|
|
|
|
|
|
|
|
int dlmalloc_trim(size_t pad) {
|
|
|
|
int result = 0;
|
|
|
|
ensure_initialization();
|
|
|
|
if (!PREACTION(gm)) {
|
|
|
|
result = sys_trim(gm, pad);
|
|
|
|
POSTACTION(gm);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t dlmalloc_footprint(void) {
|
|
|
|
return gm->footprint;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t dlmalloc_max_footprint(void) {
|
|
|
|
return gm->max_footprint;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t dlmalloc_footprint_limit(void) {
|
|
|
|
size_t maf = gm->footprint_limit;
|
|
|
|
return maf == 0 ? MAX_SIZE_T : maf;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t dlmalloc_set_footprint_limit(size_t bytes) {
|
|
|
|
size_t result; /* invert sense of 0 */
|
|
|
|
if (bytes == 0)
|
|
|
|
result = granularity_align(1); /* Use minimal size */
|
|
|
|
if (bytes == MAX_SIZE_T)
|
|
|
|
result = 0; /* disable */
|
|
|
|
else
|
|
|
|
result = granularity_align(bytes);
|
|
|
|
return gm->footprint_limit = result;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if !NO_MALLINFO
|
|
|
|
struct mallinfo dlmallinfo(void) {
|
|
|
|
return internal_mallinfo(gm);
|
|
|
|
}
|
|
|
|
#endif /* NO_MALLINFO */
|
|
|
|
|
|
|
|
#if !NO_MALLOC_STATS
|
|
|
|
void dlmalloc_stats() {
|
|
|
|
internal_malloc_stats(gm);
|
|
|
|
}
|
|
|
|
#endif /* NO_MALLOC_STATS */
|
|
|
|
|
|
|
|
int dlmallopt(int param_number, int value) {
|
|
|
|
return change_mparam(param_number, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t dlmalloc_usable_size(void* mem) {
|
|
|
|
mchunkptr p;
|
|
|
|
size_t bytes;
|
|
|
|
if (mem) {
|
|
|
|
p = mem2chunk(mem);
|
|
|
|
if (is_inuse(p)) {
|
|
|
|
bytes = chunksize(p) - overhead_for(p);
|
|
|
|
} else {
|
|
|
|
bytes = 0;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
} else {
|
|
|
|
bytes = 0;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return bytes;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
#endif /* !ONLY_MSPACES */
|
|
|
|
|
|
|
|
/* ----------------------------- user mspaces ---------------------------- */
|
|
|
|
|
|
|
|
#if MSPACES
|
|
|
|
|
|
|
|
static mstate init_user_mstate(char* tbase, size_t tsize) {
|
|
|
|
size_t msize = pad_request(sizeof(struct malloc_state));
|
|
|
|
mchunkptr mn;
|
|
|
|
mchunkptr msp = align_as_chunk(tbase);
|
|
|
|
mstate m = (mstate)(chunk2mem(msp));
|
|
|
|
bzero(m, msize);
|
|
|
|
(void)INITIAL_LOCK(&m->mutex);
|
|
|
|
msp->head = (msize|INUSE_BITS);
|
|
|
|
m->seg.base = m->least_addr = tbase;
|
|
|
|
m->seg.size = m->footprint = m->max_footprint = tsize;
|
|
|
|
m->magic = mparams.magic;
|
|
|
|
m->release_checks = MAX_RELEASE_CHECK_RATE;
|
|
|
|
m->mflags = mparams.default_mflags;
|
|
|
|
m->extp = 0;
|
|
|
|
m->exts = 0;
|
|
|
|
disable_contiguous(m);
|
|
|
|
init_bins(m);
|
|
|
|
mn = next_chunk(mem2chunk(m));
|
|
|
|
init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
|
|
|
|
check_top_chunk(m, m->top);
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
|
|
|
mspace create_mspace(size_t capacity, int locked) {
|
|
|
|
mstate m = 0;
|
|
|
|
size_t msize;
|
|
|
|
ensure_initialization();
|
|
|
|
msize = pad_request(sizeof(struct malloc_state));
|
|
|
|
if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
|
|
|
|
size_t rs = ((capacity == 0)? mparams.granularity :
|
|
|
|
(capacity + TOP_FOOT_SIZE + msize));
|
|
|
|
size_t tsize = granularity_align(rs);
|
|
|
|
char* tbase = (char*)(dlmalloc_requires_more_vespene_gas(tsize));
|
|
|
|
if (tbase != CMFAIL) {
|
|
|
|
m = init_user_mstate(tbase, tsize);
|
|
|
|
m->seg.sflags = USE_MMAP_BIT;
|
|
|
|
set_lock(m, locked);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return (mspace)m;
|
|
|
|
}
|
|
|
|
|
|
|
|
mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
|
|
|
|
mstate m = 0;
|
|
|
|
size_t msize;
|
|
|
|
ensure_initialization();
|
|
|
|
msize = pad_request(sizeof(struct malloc_state));
|
|
|
|
if (capacity > msize + TOP_FOOT_SIZE &&
|
|
|
|
capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
|
|
|
|
m = init_user_mstate((char*)base, capacity);
|
|
|
|
m->seg.sflags = EXTERN_BIT;
|
|
|
|
set_lock(m, locked);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return (mspace)m;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mspace_track_large_chunks(mspace msp, int enable) {
|
|
|
|
int ret = 0;
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (!PREACTION(ms)) {
|
|
|
|
if (!use_mmap(ms)) {
|
|
|
|
ret = 1;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
if (!enable) {
|
|
|
|
enable_mmap(ms);
|
|
|
|
} else {
|
|
|
|
disable_mmap(ms);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
POSTACTION(ms);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return ret;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
size_t destroy_mspace(mspace msp) {
|
|
|
|
size_t freed = 0;
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (ok_magic(ms)) {
|
|
|
|
msegmentptr sp = &ms->seg;
|
|
|
|
(void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
|
|
|
|
while (sp != 0) {
|
|
|
|
char* base = sp->base;
|
|
|
|
size_t size = sp->size;
|
|
|
|
flag_t flag = sp->sflags;
|
|
|
|
(void)base; /* placate people compiling -Wunused-variable */
|
|
|
|
sp = sp->next;
|
|
|
|
if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
|
|
|
|
CALL_MUNMAP(base, size) == 0)
|
|
|
|
freed += size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
return freed;
|
|
|
|
}
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
/*
|
|
|
|
mspace versions of routines are near-clones of the global
|
|
|
|
versions. This is not so nice but better than the alternatives.
|
|
|
|
*/
|
2020-06-15 14:18:57 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
void* mspace_malloc(mspace msp, size_t bytes) {
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (!ok_magic(ms)) {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (!PREACTION(ms)) {
|
|
|
|
void* mem;
|
2020-06-15 14:18:57 +00:00
|
|
|
size_t nb;
|
|
|
|
if (bytes <= MAX_SMALL_REQUEST) {
|
|
|
|
bindex_t idx;
|
|
|
|
binmap_t smallbits;
|
2022-04-15 06:39:48 +00:00
|
|
|
nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
|
2020-06-15 14:18:57 +00:00
|
|
|
idx = small_index(nb);
|
2022-04-15 06:39:48 +00:00
|
|
|
smallbits = ms->smallmap >> idx;
|
2020-06-15 14:18:57 +00:00
|
|
|
|
|
|
|
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
|
|
|
|
mchunkptr b, p;
|
2022-04-15 06:39:48 +00:00
|
|
|
idx += ~smallbits & 1; /* Uses next bin if idx empty */
|
|
|
|
b = smallbin_at(ms, idx);
|
2020-06-15 14:18:57 +00:00
|
|
|
p = b->fd;
|
|
|
|
assert(chunksize(p) == small_index2size(idx));
|
2022-04-15 06:39:48 +00:00
|
|
|
unlink_first_small_chunk(ms, b, p, idx);
|
|
|
|
set_inuse_and_pinuse(ms, p, small_index2size(idx));
|
2020-06-15 14:18:57 +00:00
|
|
|
mem = chunk2mem(p);
|
2022-04-15 06:39:48 +00:00
|
|
|
check_malloced_chunk(ms, mem, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
else if (nb > ms->dvsize) {
|
2020-06-15 14:18:57 +00:00
|
|
|
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
|
|
|
|
mchunkptr b, p, r;
|
|
|
|
size_t rsize;
|
|
|
|
bindex_t i;
|
|
|
|
binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
|
|
|
|
binmap_t leastbit = least_bit(leftbits);
|
|
|
|
compute_bit2idx(leastbit, i);
|
2022-04-15 06:39:48 +00:00
|
|
|
b = smallbin_at(ms, i);
|
2020-06-15 14:18:57 +00:00
|
|
|
p = b->fd;
|
|
|
|
assert(chunksize(p) == small_index2size(i));
|
2022-04-15 06:39:48 +00:00
|
|
|
unlink_first_small_chunk(ms, b, p, i);
|
2020-06-15 14:18:57 +00:00
|
|
|
rsize = small_index2size(i) - nb;
|
|
|
|
/* Fit here cannot be remainderless if 4byte sizes */
|
|
|
|
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
|
2022-04-15 06:39:48 +00:00
|
|
|
set_inuse_and_pinuse(ms, p, small_index2size(i));
|
2020-06-15 14:18:57 +00:00
|
|
|
else {
|
2022-04-15 06:39:48 +00:00
|
|
|
set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
r = chunk_plus_offset(p, nb);
|
|
|
|
set_size_and_pinuse_of_free_chunk(r, rsize);
|
2022-04-15 06:39:48 +00:00
|
|
|
replace_dv(ms, r, rsize);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
mem = chunk2mem(p);
|
2022-04-15 06:39:48 +00:00
|
|
|
check_malloced_chunk(ms, mem, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
|
|
|
|
check_malloced_chunk(ms, mem, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
|
|
|
else if (bytes >= MAX_REQUEST)
|
|
|
|
nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
|
|
|
|
else {
|
2020-06-15 14:18:57 +00:00
|
|
|
nb = pad_request(bytes);
|
2022-04-15 06:39:48 +00:00
|
|
|
if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
|
|
|
|
check_malloced_chunk(ms, mem, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
if (nb <= ms->dvsize) {
|
|
|
|
size_t rsize = ms->dvsize - nb;
|
|
|
|
mchunkptr p = ms->dv;
|
2020-06-15 14:18:57 +00:00
|
|
|
if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
|
2022-04-15 06:39:48 +00:00
|
|
|
mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
|
|
|
|
ms->dvsize = rsize;
|
2020-06-15 14:18:57 +00:00
|
|
|
set_size_and_pinuse_of_free_chunk(r, rsize);
|
2022-04-15 06:39:48 +00:00
|
|
|
set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
|
|
|
|
}
|
|
|
|
else { /* exhaust dv */
|
|
|
|
size_t dvs = ms->dvsize;
|
|
|
|
ms->dvsize = 0;
|
|
|
|
ms->dv = 0;
|
|
|
|
set_inuse_and_pinuse(ms, p, dvs);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
mem = chunk2mem(p);
|
2022-04-15 06:39:48 +00:00
|
|
|
check_malloced_chunk(ms, mem, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
else if (nb < ms->topsize) { /* Split top */
|
|
|
|
size_t rsize = ms->topsize -= nb;
|
|
|
|
mchunkptr p = ms->top;
|
|
|
|
mchunkptr r = ms->top = chunk_plus_offset(p, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
r->head = rsize | PINUSE_BIT;
|
2022-04-15 06:39:48 +00:00
|
|
|
set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
mem = chunk2mem(p);
|
2022-04-15 06:39:48 +00:00
|
|
|
check_top_chunk(ms, ms->top);
|
|
|
|
check_malloced_chunk(ms, mem, nb);
|
2020-06-15 14:18:57 +00:00
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
mem = sys_alloc(ms, nb);
|
|
|
|
POSTACTION(ms);
|
|
|
|
if (mem == MAP_FAILED && weaken(__oom_hook)) {
|
|
|
|
weaken(__oom_hook)(bytes);
|
|
|
|
}
|
|
|
|
return mem;
|
2020-06-15 14:18:57 +00:00
|
|
|
|
|
|
|
postaction:
|
2022-04-15 06:39:48 +00:00
|
|
|
POSTACTION(ms);
|
|
|
|
return mem;
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
void mspace_free(mspace msp, void* mem) {
|
2020-06-15 14:18:57 +00:00
|
|
|
if (mem != 0) {
|
2022-04-15 06:39:48 +00:00
|
|
|
mchunkptr p = mem2chunk(mem);
|
2020-06-15 14:18:57 +00:00
|
|
|
#if FOOTERS
|
2022-04-15 06:39:48 +00:00
|
|
|
mstate fm = get_mstate_for(p);
|
|
|
|
(void)msp; /* placate people compiling -Wunused */
|
|
|
|
#else /* FOOTERS */
|
|
|
|
mstate fm = (mstate)msp;
|
|
|
|
#endif /* FOOTERS */
|
|
|
|
if (!ok_magic(fm)) {
|
2020-06-15 14:18:57 +00:00
|
|
|
USAGE_ERROR_ACTION(fm, p);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!PREACTION(fm)) {
|
|
|
|
check_inuse_chunk(fm, p);
|
|
|
|
if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
|
|
|
|
size_t psize = chunksize(p);
|
|
|
|
mchunkptr next = chunk_plus_offset(p, psize);
|
|
|
|
if (!pinuse(p)) {
|
|
|
|
size_t prevsize = p->prev_foot;
|
|
|
|
if (is_mmapped(p)) {
|
|
|
|
psize += prevsize + MMAP_FOOT_PAD;
|
2022-04-15 06:39:48 +00:00
|
|
|
if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
|
2020-06-15 14:18:57 +00:00
|
|
|
fm->footprint -= psize;
|
|
|
|
goto postaction;
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
|
|
|
else {
|
2020-06-15 14:18:57 +00:00
|
|
|
mchunkptr prev = chunk_minus_offset(p, prevsize);
|
|
|
|
psize += prevsize;
|
|
|
|
p = prev;
|
|
|
|
if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
|
|
|
|
if (p != fm->dv) {
|
|
|
|
unlink_chunk(fm, p, prevsize);
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
|
|
|
else if ((next->head & INUSE_BITS) == INUSE_BITS) {
|
2020-06-15 14:18:57 +00:00
|
|
|
fm->dvsize = psize;
|
|
|
|
set_free_with_pinuse(p, psize, next);
|
|
|
|
goto postaction;
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
|
|
|
else
|
2020-06-15 14:18:57 +00:00
|
|
|
goto erroraction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
|
2022-04-15 06:39:48 +00:00
|
|
|
if (!cinuse(next)) { /* consolidate forward */
|
2020-06-15 14:18:57 +00:00
|
|
|
if (next == fm->top) {
|
|
|
|
size_t tsize = fm->topsize += psize;
|
|
|
|
fm->top = p;
|
|
|
|
p->head = tsize | PINUSE_BIT;
|
|
|
|
if (p == fm->dv) {
|
|
|
|
fm->dv = 0;
|
|
|
|
fm->dvsize = 0;
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
if (should_trim(fm, tsize))
|
|
|
|
sys_trim(fm, 0);
|
2020-06-15 14:18:57 +00:00
|
|
|
goto postaction;
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
|
|
|
else if (next == fm->dv) {
|
2020-06-15 14:18:57 +00:00
|
|
|
size_t dsize = fm->dvsize += psize;
|
|
|
|
fm->dv = p;
|
|
|
|
set_size_and_pinuse_of_free_chunk(p, dsize);
|
|
|
|
goto postaction;
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
|
|
|
else {
|
2020-06-15 14:18:57 +00:00
|
|
|
size_t nsize = chunksize(next);
|
|
|
|
psize += nsize;
|
|
|
|
unlink_chunk(fm, next, nsize);
|
|
|
|
set_size_and_pinuse_of_free_chunk(p, psize);
|
|
|
|
if (p == fm->dv) {
|
|
|
|
fm->dvsize = psize;
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
2020-09-07 04:39:00 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
else
|
|
|
|
set_free_with_pinuse(p, psize, next);
|
2020-06-15 14:18:57 +00:00
|
|
|
|
|
|
|
if (is_small(psize)) {
|
|
|
|
insert_small_chunk(fm, p, psize);
|
|
|
|
check_free_chunk(fm, p);
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
|
|
|
else {
|
2020-06-15 14:18:57 +00:00
|
|
|
tchunkptr tp = (tchunkptr)p;
|
|
|
|
insert_large_chunk(fm, tp, psize);
|
|
|
|
check_free_chunk(fm, p);
|
2022-04-15 06:39:48 +00:00
|
|
|
if (--fm->release_checks == 0)
|
|
|
|
release_unused_segments(fm);
|
2020-06-15 14:18:57 +00:00
|
|
|
}
|
|
|
|
goto postaction;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
erroraction:
|
|
|
|
USAGE_ERROR_ACTION(fm, p);
|
|
|
|
postaction:
|
|
|
|
POSTACTION(fm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
|
|
|
|
void* mem;
|
|
|
|
size_t req = 0;
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (!ok_magic(ms)) {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
return 0;
|
2021-02-01 11:33:13 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
if (n_elements != 0) {
|
|
|
|
req = n_elements * elem_size;
|
|
|
|
if (((n_elements | elem_size) & ~(size_t)0xffff) &&
|
|
|
|
(req / n_elements != elem_size))
|
|
|
|
req = MAX_SIZE_T; /* force downstream failure on overflow */
|
|
|
|
}
|
|
|
|
mem = internal_malloc(ms, req);
|
|
|
|
if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
|
|
|
|
bzero(mem, req);
|
|
|
|
return mem;
|
2021-02-01 11:33:13 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
|
|
|
|
void* mem = 0;
|
|
|
|
if (oldmem == 0) {
|
|
|
|
mem = mspace_malloc(msp, bytes);
|
2020-08-25 11:23:25 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
else if (bytes >= MAX_REQUEST) {
|
|
|
|
MALLOC_FAILURE_ACTION;
|
|
|
|
}
|
|
|
|
#ifdef REALLOC_ZERO_BYTES_FREES
|
|
|
|
else if (bytes == 0) {
|
|
|
|
mspace_free(msp, oldmem);
|
|
|
|
}
|
|
|
|
#endif /* REALLOC_ZERO_BYTES_FREES */
|
|
|
|
else {
|
|
|
|
size_t nb = request2size(bytes);
|
|
|
|
mchunkptr oldp = mem2chunk(oldmem);
|
|
|
|
#if ! FOOTERS
|
|
|
|
mstate m = (mstate)msp;
|
|
|
|
#else /* FOOTERS */
|
|
|
|
mstate m = get_mstate_for(oldp);
|
|
|
|
if (!ok_magic(m)) {
|
|
|
|
USAGE_ERROR_ACTION(m, oldmem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* FOOTERS */
|
|
|
|
if (!PREACTION(m)) {
|
|
|
|
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
|
|
|
|
POSTACTION(m);
|
|
|
|
if (newp != 0) {
|
|
|
|
check_inuse_chunk(m, newp);
|
|
|
|
mem = chunk2mem(newp);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
mem = mspace_malloc(m, bytes);
|
|
|
|
if (mem != 0) {
|
|
|
|
size_t oc = chunksize(oldp) - overhead_for(oldp);
|
|
|
|
memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
|
|
|
|
mspace_free(m, oldmem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mem;
|
2020-08-25 11:23:25 +00:00
|
|
|
}
|
2021-02-01 11:33:13 +00:00
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
|
|
|
|
void* mem = 0;
|
|
|
|
if (oldmem != 0) {
|
|
|
|
if (bytes >= MAX_REQUEST) {
|
|
|
|
MALLOC_FAILURE_ACTION;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
size_t nb = request2size(bytes);
|
|
|
|
mchunkptr oldp = mem2chunk(oldmem);
|
|
|
|
#if ! FOOTERS
|
|
|
|
mstate m = (mstate)msp;
|
|
|
|
#else /* FOOTERS */
|
|
|
|
mstate m = get_mstate_for(oldp);
|
|
|
|
(void)msp; /* placate people compiling -Wunused */
|
|
|
|
if (!ok_magic(m)) {
|
|
|
|
USAGE_ERROR_ACTION(m, oldmem);
|
|
|
|
return 0;
|
2021-02-01 11:33:13 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
#endif /* FOOTERS */
|
|
|
|
if (!PREACTION(m)) {
|
|
|
|
mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
|
|
|
|
POSTACTION(m);
|
|
|
|
if (newp == oldp) {
|
|
|
|
check_inuse_chunk(m, newp);
|
|
|
|
mem = oldmem;
|
2021-02-01 11:33:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
}
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (!ok_magic(ms)) {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (alignment <= MALLOC_ALIGNMENT)
|
|
|
|
return mspace_malloc(msp, bytes);
|
|
|
|
return internal_memalign(ms, alignment, bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
void** mspace_independent_calloc(mspace msp, size_t n_elements,
|
|
|
|
size_t elem_size, void* chunks[]) {
|
|
|
|
size_t sz = elem_size; /* serves as 1-element array */
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (!ok_magic(ms)) {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return ialloc(ms, n_elements, &sz, 3, chunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
void** mspace_independent_comalloc(mspace msp, size_t n_elements,
|
|
|
|
size_t sizes[], void* chunks[]) {
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (!ok_magic(ms)) {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
2021-10-14 00:27:13 +00:00
|
|
|
return 0;
|
2021-02-01 11:33:13 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return ialloc(ms, n_elements, sizes, 0, chunks);
|
2021-10-14 00:27:13 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
|
|
|
|
return internal_bulk_free((mstate)msp, array, nelem);
|
2021-02-01 11:33:13 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 06:39:48 +00:00
|
|
|
#if MALLOC_INSPECT_ALL
|
|
|
|
void mspace_inspect_all(mspace msp,
|
|
|
|
void(*handler)(void *start,
|
|
|
|
void *end,
|
|
|
|
size_t used_bytes,
|
|
|
|
void* callback_arg),
|
|
|
|
void* arg) {
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (ok_magic(ms)) {
|
|
|
|
if (!PREACTION(ms)) {
|
|
|
|
internal_inspect_all(ms, handler, arg);
|
|
|
|
POSTACTION(ms);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* MALLOC_INSPECT_ALL */
|
|
|
|
|
|
|
|
int mspace_trim(mspace msp, size_t pad) {
|
|
|
|
int result = 0;
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (ok_magic(ms)) {
|
|
|
|
if (!PREACTION(ms)) {
|
|
|
|
result = sys_trim(ms, pad);
|
|
|
|
POSTACTION(ms);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if !NO_MALLOC_STATS
|
|
|
|
void mspace_malloc_stats(mspace msp) {
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (ok_magic(ms)) {
|
|
|
|
internal_malloc_stats(ms);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* NO_MALLOC_STATS */
|
|
|
|
|
|
|
|
size_t mspace_footprint(mspace msp) {
|
|
|
|
size_t result = 0;
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (ok_magic(ms)) {
|
|
|
|
result = ms->footprint;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t mspace_max_footprint(mspace msp) {
|
|
|
|
size_t result = 0;
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (ok_magic(ms)) {
|
|
|
|
result = ms->max_footprint;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t mspace_footprint_limit(mspace msp) {
|
|
|
|
size_t result = 0;
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (ok_magic(ms)) {
|
|
|
|
size_t maf = ms->footprint_limit;
|
|
|
|
result = (maf == 0) ? MAX_SIZE_T : maf;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
|
|
|
|
size_t result = 0;
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (ok_magic(ms)) {
|
|
|
|
if (bytes == 0)
|
|
|
|
result = granularity_align(1); /* Use minimal size */
|
|
|
|
if (bytes == MAX_SIZE_T)
|
|
|
|
result = 0; /* disable */
|
|
|
|
else
|
|
|
|
result = granularity_align(bytes);
|
|
|
|
ms->footprint_limit = result;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if !NO_MALLINFO
|
|
|
|
struct mallinfo mspace_mallinfo(mspace msp) {
|
|
|
|
mstate ms = (mstate)msp;
|
|
|
|
if (!ok_magic(ms)) {
|
|
|
|
USAGE_ERROR_ACTION(ms,ms);
|
|
|
|
}
|
|
|
|
return internal_mallinfo(ms);
|
|
|
|
}
|
|
|
|
#endif /* NO_MALLINFO */
|
|
|
|
|
|
|
|
size_t mspace_usable_size(const void* mem) {
|
|
|
|
if (mem != 0) {
|
|
|
|
mchunkptr p = mem2chunk(mem);
|
|
|
|
if (is_inuse(p))
|
|
|
|
return chunksize(p) - overhead_for(p);
|
2021-10-14 00:27:13 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mspace_mallopt(int param_number, int value) {
|
|
|
|
return change_mparam(param_number, value);
|
2021-02-01 11:33:13 +00:00
|
|
|
}
|
2022-04-15 06:39:48 +00:00
|
|
|
|
|
|
|
#endif /* MSPACES */
|