mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-07-23 02:50:29 +00:00
Improve memory safety
This commit makes numerous refinements to cosmopolitan memory handling. The default stack size has been reduced from 2mb to 128kb. A new macro is now provided so you can easily reconfigure the stack size to be any value you want. Work around the breaking change by adding to your main: STATIC_STACK_SIZE(0x00200000); // 2mb stack If you're not sure how much stack you need, then you can use: STATIC_YOINK("stack_usage_logging"); After which you can `sort -nr o/$MODE/stack.log`. Based on the unit test suite, nothing in the Cosmopolitan repository (except for Python) needs a stack size greater than 30kb. There are also new macros for detecting the size and address of the stack at runtime, e.g. GetStackAddr(). We also now support sigaltstack() so if you want to see nice looking crash reports whenever a stack overflow happens, you can put this in main(): ShowCrashReports(); Under `make MODE=dbg` and `make MODE=asan` the unit testing framework will now automatically print backtraces of memory allocations when things like memory leaks happen. Bugs are now fixed in ASAN global variable overrun detection. The memtrack and asan runtimes also handle edge cases now. The new tools helped to identify a few memory leaks, which are fixed by this change. This change should fix an issue reported in #288 with ARG_MAX limits. Fixing this doubled the performance of MKDEPS.COM and AR.COM yet again.
This commit is contained in:
parent
a0b39f886c
commit
226aaf3547
317 changed files with 6474 additions and 3993 deletions
4
third_party/dlmalloc/README
vendored
4
third_party/dlmalloc/README
vendored
|
@ -389,10 +389,6 @@ HAVE_MMAP default: 1 (true)
|
|||
able to unmap memory that may have be allocated using multiple calls
|
||||
to MMAP, so long as they are adjacent.
|
||||
|
||||
HAVE_MREMAP default: 1 on linux, else 0
|
||||
If true realloc() uses mremap() to re-allocate large blocks and
|
||||
extend or shrink allocation spaces.
|
||||
|
||||
MMAP_CLEARS default: 1 except on WINCE.
|
||||
True if mmap clears memory so calloc doesn't need to. This is true
|
||||
for standard unix mmap using /dev/zero and on WIN32 except for WINCE.
|
||||
|
|
21
third_party/dlmalloc/bulk_free.c
vendored
21
third_party/dlmalloc/bulk_free.c
vendored
|
@ -10,6 +10,9 @@
|
|||
* to sort this array before calling bulk_free.
|
||||
*/
|
||||
size_t dlbulk_free(void *array[], size_t nelem) {
|
||||
void **a, **b, *mem, **fence;
|
||||
struct MallocChunk *p, *next;
|
||||
size_t psize, newsize, unfreed;
|
||||
/*
|
||||
* Try to free all pointers in the given array. Note: this could be
|
||||
* made faster, by delaying consolidation, at the price of disabling
|
||||
|
@ -17,15 +20,15 @@ size_t dlbulk_free(void *array[], size_t nelem) {
|
|||
* by combining adjacent chunks before freeing, which will occur often
|
||||
* if allocated with ialloc or the array is sorted.
|
||||
*/
|
||||
size_t unfreed = 0;
|
||||
unfreed = 0;
|
||||
if (!PREACTION(g_dlmalloc)) {
|
||||
void **a;
|
||||
void **fence = &(array[nelem]);
|
||||
a;
|
||||
fence = &(array[nelem]);
|
||||
for (a = array; a != fence; ++a) {
|
||||
void *mem = *a;
|
||||
mem = *a;
|
||||
if (mem != 0) {
|
||||
mchunkptr p = mem2chunk(AddressDeathAction(mem));
|
||||
size_t psize = chunksize(p);
|
||||
p = mem2chunk(AddressDeathAction(mem));
|
||||
psize = chunksize(p);
|
||||
#if FOOTERS
|
||||
if (get_mstate_for(p) != g_dlmalloc) {
|
||||
++unfreed;
|
||||
|
@ -35,10 +38,10 @@ size_t dlbulk_free(void *array[], size_t nelem) {
|
|||
check_inuse_chunk(g_dlmalloc, p);
|
||||
*a = 0;
|
||||
if (RTCHECK(ok_address(g_dlmalloc, p) && ok_inuse(p))) {
|
||||
void **b = a + 1; /* try to merge with next chunk */
|
||||
mchunkptr next = next_chunk(p);
|
||||
b = a + 1; /* try to merge with next chunk */
|
||||
next = next_chunk(p);
|
||||
if (b != fence && *b == chunk2mem(next)) {
|
||||
size_t newsize = chunksize(next) + psize;
|
||||
newsize = chunksize(next) + psize;
|
||||
set_inuse(g_dlmalloc, p, newsize);
|
||||
*b = chunk2mem(p);
|
||||
} else
|
||||
|
|
2
third_party/dlmalloc/dlcalloc.c
vendored
2
third_party/dlmalloc/dlcalloc.c
vendored
|
@ -6,7 +6,7 @@ void *dlcalloc(size_t n_elements, size_t elem_size) {
|
|||
size_t req;
|
||||
if (__builtin_mul_overflow(n_elements, elem_size, &req)) req = -1;
|
||||
mem = dlmalloc(req);
|
||||
if (mem != 0 && calloc_must_clear(mem2chunk(mem))) {
|
||||
if (mem && calloc_must_clear(mem2chunk(mem))) {
|
||||
bzero(mem, req);
|
||||
}
|
||||
return mem;
|
||||
|
|
9
third_party/dlmalloc/dlindependent_calloc.c
vendored
9
third_party/dlmalloc/dlindependent_calloc.c
vendored
|
@ -96,7 +96,7 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
|
|||
}
|
||||
}
|
||||
|
||||
#if DEBUG + MODE_DBG + 0
|
||||
#ifdef DEBUG
|
||||
if (marray != chunks) {
|
||||
/* final element must have exactly exhausted chunk */
|
||||
if (element_size != 0) {
|
||||
|
@ -106,9 +106,10 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
|
|||
}
|
||||
check_inuse_chunk(m, mem2chunk(marray));
|
||||
}
|
||||
for (i = 0; i != n_elements; ++i) check_inuse_chunk(m, mem2chunk(marray[i]));
|
||||
|
||||
#endif /* DEBUG */
|
||||
for (i = 0; i != n_elements; ++i) {
|
||||
check_inuse_chunk(m, mem2chunk(marray[i]));
|
||||
}
|
||||
#endif /* IsModeDbg() */
|
||||
|
||||
POSTACTION(m);
|
||||
return marray;
|
||||
|
|
113
third_party/dlmalloc/dlmalloc.c
vendored
113
third_party/dlmalloc/dlmalloc.c
vendored
|
@ -1,3 +1,4 @@
|
|||
#include "libc/assert.h"
|
||||
#include "libc/bits/initializer.internal.h"
|
||||
#include "libc/bits/safemacros.internal.h"
|
||||
#include "libc/bits/weaken.h"
|
||||
|
@ -13,6 +14,7 @@
|
|||
#include "libc/macros.internal.h"
|
||||
#include "libc/mem/mem.h"
|
||||
#include "libc/nt/systeminfo.h"
|
||||
#include "libc/runtime/memtrack.internal.h"
|
||||
#include "libc/runtime/runtime.h"
|
||||
#include "libc/str/str.h"
|
||||
#include "libc/sysv/consts/fileno.h"
|
||||
|
@ -51,7 +53,8 @@ static void *dlmalloc_requires_more_vespene_gas(size_t size) {
|
|||
/* ─────────────────────────── mspace management ─────────────────────────── */
|
||||
|
||||
/* Initialize top chunk and its size */
|
||||
static void dlmalloc_init_top(mstate m, mchunkptr p, size_t psize) {
|
||||
static void dlmalloc_init_top(struct MallocState *m, mchunkptr p,
|
||||
size_t psize) {
|
||||
/* Ensure alignment */
|
||||
size_t offset = align_offset(chunk2mem(p));
|
||||
p = (mchunkptr)((char *)p + offset);
|
||||
|
@ -65,7 +68,7 @@ static void dlmalloc_init_top(mstate m, mchunkptr p, size_t psize) {
|
|||
}
|
||||
|
||||
/* Initialize bins for a new mstate that is otherwise zeroed out */
|
||||
static void init_bins(mstate m) {
|
||||
static void init_bins(struct MallocState *m) {
|
||||
/* Establish circular links for smallbins */
|
||||
bindex_t i;
|
||||
for (i = 0; i < NSMALLBINS; ++i) {
|
||||
|
@ -75,8 +78,8 @@ static void init_bins(mstate m) {
|
|||
}
|
||||
|
||||
/* Allocate chunk and prepend remainder with chunk in successor base. */
|
||||
static void *dlmalloc_prepend_alloc(mstate m, char *newbase, char *oldbase,
|
||||
size_t nb) {
|
||||
static void *dlmalloc_prepend_alloc(struct MallocState *m, char *newbase,
|
||||
char *oldbase, size_t nb) {
|
||||
mchunkptr p = align_as_chunk(newbase);
|
||||
mchunkptr oldfirst = align_as_chunk(oldbase);
|
||||
size_t psize = (char *)oldfirst - (char *)p;
|
||||
|
@ -112,13 +115,13 @@ static void *dlmalloc_prepend_alloc(mstate m, char *newbase, char *oldbase,
|
|||
}
|
||||
|
||||
/* Add a segment to hold a new noncontiguous region */
|
||||
static void dlmalloc_add_segment(mstate m, char *tbase, size_t tsize,
|
||||
flag_t mmapped) {
|
||||
static void dlmalloc_add_segment(struct MallocState *m, char *tbase,
|
||||
size_t tsize, flag_t mmapped) {
|
||||
/* Determine locations and sizes of segment, fenceposts, old top */
|
||||
char *old_top = (char *)m->top;
|
||||
msegmentptr oldsp = segment_holding(m, old_top);
|
||||
char *old_end = oldsp->base + oldsp->size;
|
||||
size_t ssize = pad_request(sizeof(struct malloc_segment));
|
||||
size_t ssize = pad_request(sizeof(struct MallocSegment));
|
||||
char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
||||
size_t offset = align_offset(chunk2mem(rawsp));
|
||||
char *asp = rawsp + offset;
|
||||
|
@ -163,8 +166,10 @@ static void dlmalloc_add_segment(mstate m, char *tbase, size_t tsize,
|
|||
/* ─────────────────────────── system integration ─────────────────────────── */
|
||||
|
||||
/* Return true if segment contains a segment link */
|
||||
static int has_segment_link(mstate m, msegmentptr ss) {
|
||||
msegmentptr sp = &m->seg;
|
||||
noinline int has_segment_link(struct MallocState *m, msegmentptr ss) {
|
||||
msegmentptr sp;
|
||||
assert(m);
|
||||
sp = &m->seg;
|
||||
for (;;) {
|
||||
if ((char *)sp >= ss->base && (char *)sp < ss->base + ss->size) return 1;
|
||||
if ((sp = sp->next) == 0) return 0;
|
||||
|
@ -183,7 +188,7 @@ static int has_segment_link(mstate m, msegmentptr ss) {
|
|||
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
|
||||
|
||||
/* Malloc using mmap */
|
||||
static void *mmap_alloc(mstate m, size_t nb) {
|
||||
static void *mmap_alloc(struct MallocState *m, size_t nb) {
|
||||
size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
|
||||
if (m->footprint_limit != 0) {
|
||||
size_t fp = m->footprint + mmsize;
|
||||
|
@ -214,7 +219,7 @@ static void *mmap_alloc(mstate m, size_t nb) {
|
|||
/**
|
||||
* Gets memory from system.
|
||||
*/
|
||||
static void *dlmalloc_sys_alloc(mstate m, size_t nb) {
|
||||
static void *dlmalloc_sys_alloc(struct MallocState *m, size_t nb) {
|
||||
char *tbase = CMFAIL;
|
||||
size_t tsize = 0;
|
||||
flag_t mmap_flag = 0;
|
||||
|
@ -310,7 +315,7 @@ static void *dlmalloc_sys_alloc(mstate m, size_t nb) {
|
|||
}
|
||||
|
||||
/* Unmap and unlink any mmapped segments that don't contain used chunks */
|
||||
static size_t dlmalloc_release_unused_segments(mstate m) {
|
||||
static size_t dlmalloc_release_unused_segments(struct MallocState *m) {
|
||||
size_t released = 0;
|
||||
int nsegs = 0;
|
||||
msegmentptr pred = &m->seg;
|
||||
|
@ -357,7 +362,7 @@ static size_t dlmalloc_release_unused_segments(mstate m) {
|
|||
return released;
|
||||
}
|
||||
|
||||
int dlmalloc_sys_trim(mstate m, size_t pad) {
|
||||
int dlmalloc_sys_trim(struct MallocState *m, size_t pad) {
|
||||
size_t released = 0;
|
||||
ensure_initialization();
|
||||
if (pad < MAX_REQUEST && is_initialized(m)) {
|
||||
|
@ -416,7 +421,7 @@ static void post_fork_child(void) {
|
|||
/* Consolidate and bin a chunk. Differs from exported versions
|
||||
of free mainly in that the chunk need not be marked as inuse.
|
||||
*/
|
||||
void dlmalloc_dispose_chunk(mstate m, mchunkptr p, size_t psize) {
|
||||
void dlmalloc_dispose_chunk(struct MallocState *m, mchunkptr p, size_t psize) {
|
||||
mchunkptr next = chunk_plus_offset(p, psize);
|
||||
if (!pinuse(p)) {
|
||||
mchunkptr prev;
|
||||
|
@ -480,7 +485,7 @@ void dlmalloc_dispose_chunk(mstate m, mchunkptr p, size_t psize) {
|
|||
/* ──────────────────────────── malloc ─────────────────────────── */
|
||||
|
||||
/* allocate a small request from the best fitting chunk in a treebin */
|
||||
static void *tmalloc_small(mstate m, size_t nb) {
|
||||
static void *tmalloc_small(struct MallocState *m, size_t nb) {
|
||||
tchunkptr t, v;
|
||||
size_t rsize;
|
||||
bindex_t i;
|
||||
|
@ -515,7 +520,7 @@ static void *tmalloc_small(mstate m, size_t nb) {
|
|||
}
|
||||
|
||||
/* allocate a large request from the best fitting chunk in a treebin */
|
||||
static void *tmalloc_large(mstate m, size_t nb) {
|
||||
static void *tmalloc_large(struct MallocState *m, size_t nb) {
|
||||
tchunkptr v = 0;
|
||||
size_t rsize = -nb; /* Unsigned negation */
|
||||
tchunkptr t;
|
||||
|
@ -717,11 +722,8 @@ void *dlmalloc_impl(size_t bytes, bool takeaction) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void *dlmalloc(size_t bytes) {
|
||||
return dlmalloc_impl(bytes, true);
|
||||
}
|
||||
|
||||
void dlfree(void *mem) {
|
||||
/* asan runtime depends on this function */
|
||||
/*
|
||||
Consolidate freed chunks with preceeding or succeeding bordering
|
||||
free chunks, if they exist, and then place in a bin. Intermixed
|
||||
|
@ -732,7 +734,7 @@ void dlfree(void *mem) {
|
|||
mchunkptr p = mem2chunk(mem);
|
||||
|
||||
#if FOOTERS
|
||||
mstate fm = get_mstate_for(p);
|
||||
struct MallocState *fm = get_mstate_for(p);
|
||||
if (!ok_magic(fm)) { /* HELLO
|
||||
* TRY #1: rm -rf o && make -j8 -O MODE=dbg
|
||||
* TRY #2: gdb: p/x (long*)(p+(*(long*)(p-8)&~(1|2|3)))
|
||||
|
@ -784,7 +786,9 @@ void dlfree(void *mem) {
|
|||
fm->dv = 0;
|
||||
fm->dvsize = 0;
|
||||
}
|
||||
if (should_trim(fm, tsize)) dlmalloc_sys_trim(fm, 0);
|
||||
if (should_trim(fm, tsize)) {
|
||||
dlmalloc_sys_trim(fm, 0);
|
||||
}
|
||||
goto postaction;
|
||||
} else if (next == fm->dv) {
|
||||
size_t dsize = fm->dvsize += psize;
|
||||
|
@ -818,6 +822,7 @@ void dlfree(void *mem) {
|
|||
}
|
||||
}
|
||||
erroraction:
|
||||
if (IsArenaFrame((intptr_t)p >> 16)) return;
|
||||
USAGE_ERROR_ACTION(fm, p);
|
||||
postaction:
|
||||
POSTACTION(fm);
|
||||
|
@ -829,6 +834,7 @@ void dlfree(void *mem) {
|
|||
}
|
||||
|
||||
size_t dlmalloc_usable_size(const void *mem) {
|
||||
/* asan runtime depends on this function */
|
||||
if (mem != 0) {
|
||||
mchunkptr p = mem2chunk(mem);
|
||||
if (is_inuse(p)) return chunksize(p) - overhead_for(p);
|
||||
|
@ -879,22 +885,19 @@ textstartup void dlmalloc_init(void) {
|
|||
RELEASE_MALLOC_GLOBAL_LOCK();
|
||||
}
|
||||
|
||||
void *dlmemalign$impl(mstate m, size_t alignment, size_t bytes) {
|
||||
void *mem = 0;
|
||||
if (bytes >= MAX_REQUEST - alignment) {
|
||||
if (m != 0) { /* Test isn't needed but avoids compiler warning */
|
||||
enomem();
|
||||
}
|
||||
} else {
|
||||
void *dlmemalign_impl(struct MallocState *m, size_t al, size_t bytes) {
|
||||
char *br, *pos, *mem = 0;
|
||||
mchunkptr p, newp, remainder;
|
||||
size_t nb, req, size, leadsize, newsize, remainder_size;
|
||||
if (bytes < MAX_REQUEST - al) {
|
||||
/* alignment is 32+ bytes rounded up to nearest two power */
|
||||
alignment = 2ul << bsrl(MAX(MIN_CHUNK_SIZE, alignment) - 1);
|
||||
size_t nb = request2size(bytes);
|
||||
size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
|
||||
mem = dlmalloc_impl(req, false);
|
||||
if (mem != 0) {
|
||||
mchunkptr p = mem2chunk(mem);
|
||||
al = 2ul << bsrl(MAX(MIN_CHUNK_SIZE, al) - 1);
|
||||
nb = request2size(bytes);
|
||||
req = nb + al + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
|
||||
if ((mem = dlmalloc_impl(req, false))) {
|
||||
p = mem2chunk(mem);
|
||||
if (PREACTION(m)) return 0;
|
||||
if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
|
||||
if ((((size_t)(mem)) & (al - 1))) { /* misaligned */
|
||||
/*
|
||||
Find an aligned spot inside chunk. Since we need to give
|
||||
back leading space in a chunk of at least MIN_CHUNK_SIZE, if
|
||||
|
@ -903,14 +906,11 @@ void *dlmemalign$impl(mstate m, size_t alignment, size_t bytes) {
|
|||
We've allocated enough total room so that this is always
|
||||
possible.
|
||||
*/
|
||||
char *br = (char *)mem2chunk((size_t)(
|
||||
((size_t)((char *)mem + alignment - SIZE_T_ONE)) & -alignment));
|
||||
char *pos = ((size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE)
|
||||
? br
|
||||
: br + alignment;
|
||||
mchunkptr newp = (mchunkptr)pos;
|
||||
size_t leadsize = pos - (char *)(p);
|
||||
size_t newsize = chunksize(p) - leadsize;
|
||||
br = (char *)mem2chunk(ROUNDUP((uintptr_t)mem, al));
|
||||
pos = (size_t)(br - (char *)(p)) >= MIN_CHUNK_SIZE ? br : br + al;
|
||||
newp = (mchunkptr)pos;
|
||||
leadsize = pos - (char *)(p);
|
||||
newsize = chunksize(p) - leadsize;
|
||||
if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
|
||||
newp->prev_foot = p->prev_foot + leadsize;
|
||||
newp->head = newsize;
|
||||
|
@ -923,10 +923,10 @@ void *dlmemalign$impl(mstate m, size_t alignment, size_t bytes) {
|
|||
}
|
||||
/* Give back spare room at the end */
|
||||
if (!is_mmapped(p)) {
|
||||
size_t size = chunksize(p);
|
||||
size = chunksize(p);
|
||||
if (size > nb + MIN_CHUNK_SIZE) {
|
||||
size_t remainder_size = size - nb;
|
||||
mchunkptr remainder = chunk_plus_offset(p, nb);
|
||||
remainder_size = size - nb;
|
||||
remainder = chunk_plus_offset(p, nb);
|
||||
set_inuse(m, p, nb);
|
||||
set_inuse(m, remainder, remainder_size);
|
||||
dlmalloc_dispose_chunk(m, remainder, remainder_size);
|
||||
|
@ -934,15 +934,26 @@ void *dlmemalign$impl(mstate m, size_t alignment, size_t bytes) {
|
|||
}
|
||||
mem = chunk2mem(p);
|
||||
assert(chunksize(p) >= nb);
|
||||
assert(((size_t)mem & (alignment - 1)) == 0);
|
||||
assert(!((size_t)mem & (al - 1)));
|
||||
check_inuse_chunk(m, p);
|
||||
POSTACTION(m);
|
||||
}
|
||||
return AddressBirthAction(mem);
|
||||
} else {
|
||||
enomem();
|
||||
return 0;
|
||||
}
|
||||
return AddressBirthAction(mem);
|
||||
}
|
||||
|
||||
void *dlmalloc(size_t bytes) {
|
||||
return dlmalloc_impl(bytes, true);
|
||||
}
|
||||
|
||||
void *dlmemalign(size_t alignment, size_t bytes) {
|
||||
if (alignment <= MALLOC_ALIGNMENT) return dlmalloc(bytes);
|
||||
return dlmemalign$impl(g_dlmalloc, alignment, bytes);
|
||||
/* asan runtime depends on this function */
|
||||
if (alignment <= MALLOC_ALIGNMENT) {
|
||||
return dlmalloc_impl(bytes, true);
|
||||
} else {
|
||||
return dlmemalign_impl(g_dlmalloc, alignment, bytes);
|
||||
}
|
||||
}
|
||||
|
|
184
third_party/dlmalloc/dlmalloc.internal.h
vendored
184
third_party/dlmalloc/dlmalloc.internal.h
vendored
|
@ -18,19 +18,15 @@ COSMOPOLITAN_C_START_
|
|||
*/
|
||||
#endif
|
||||
|
||||
#define DLMALLOC_VERSION 20806
|
||||
|
||||
#ifndef FOOTERS
|
||||
#define FOOTERS !NoDebug()
|
||||
#endif
|
||||
|
||||
#define DLMALLOC_VERSION 20806
|
||||
#define HAVE_MMAP 1
|
||||
#define HAVE_MREMAP 0 /* IsLinux() */
|
||||
#define MMAP_CLEARS 1
|
||||
#define MALLOC_ALIGNMENT __BIGGEST_ALIGNMENT__
|
||||
#define MALLOC_ALIGNMENT 16
|
||||
#define NO_SEGMENT_TRAVERSAL 1
|
||||
#define MAX_RELEASE_CHECK_RATE 128
|
||||
#define MALLOC_ABORT abort()
|
||||
#define FOOTERS !NoDebug()
|
||||
#define MAX_REQUEST 0xfffffffffff
|
||||
#define DEFAULT_GRANULARITY (64UL * 1024UL)
|
||||
#define DEFAULT_TRIM_THRESHOLD (10UL * 1024UL * 1024UL)
|
||||
#define DEFAULT_MMAP_THRESHOLD (256UL * 1024UL)
|
||||
|
@ -137,7 +133,7 @@ COSMOPOLITAN_C_START_
|
|||
/*
|
||||
(The following includes lightly edited explanations by Colin Plumb.)
|
||||
|
||||
The malloc_chunk declaration below is misleading (but accurate and
|
||||
The MallocChunk declaration below is misleading (but accurate and
|
||||
necessary). It declares a "view" into memory allowing access to
|
||||
necessary fields at known offsets from a given base.
|
||||
|
||||
|
@ -269,19 +265,19 @@ COSMOPOLITAN_C_START_
|
|||
|
||||
*/
|
||||
|
||||
struct malloc_chunk {
|
||||
size_t prev_foot; /* Size of previous chunk (if free). */
|
||||
size_t head; /* Size and inuse bits. */
|
||||
struct malloc_chunk *fd; /* double links -- used only if free. */
|
||||
struct malloc_chunk *bk;
|
||||
struct MallocChunk {
|
||||
size_t prev_foot; /* Size of previous chunk (if free). */
|
||||
size_t head; /* Size and inuse bits. */
|
||||
struct MallocChunk *fd; /* double links -- used only if free. */
|
||||
struct MallocChunk *bk;
|
||||
};
|
||||
|
||||
typedef struct malloc_chunk mchunk;
|
||||
typedef struct malloc_chunk *mchunkptr;
|
||||
typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
|
||||
typedef unsigned int bindex_t; /* Described below */
|
||||
typedef unsigned int binmap_t; /* Described below */
|
||||
typedef unsigned int flag_t; /* The type of various bit flag sets */
|
||||
typedef struct MallocChunk mchunk;
|
||||
typedef struct MallocChunk *mchunkptr;
|
||||
typedef struct MallocChunk *sbinptr; /* The type of bins of chunks */
|
||||
typedef unsigned int bindex_t; /* Described below */
|
||||
typedef unsigned int binmap_t; /* Described below */
|
||||
typedef unsigned int flag_t; /* The type of various bit flag sets */
|
||||
|
||||
/* ─────────────────── Chunks sizes and alignments ─────────────────────── */
|
||||
|
||||
|
@ -304,7 +300,6 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|||
#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
|
||||
|
||||
/* Bounds on request (not chunk) sizes. */
|
||||
#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
|
||||
#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
|
||||
|
||||
/* pad request bytes into a usable size */
|
||||
|
@ -351,7 +346,7 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|||
#define chunk_plus_offset(p, s) ((mchunkptr)(((char *)(p)) + (s)))
|
||||
#define chunk_minus_offset(p, s) ((mchunkptr)(((char *)(p)) - (s)))
|
||||
|
||||
/* Ptr to next or previous physical malloc_chunk. */
|
||||
/* Ptr to next or previous physical MallocChunk. */
|
||||
#define next_chunk(p) ((mchunkptr)(((char *)(p)) + ((p)->head & ~FLAG_BITS)))
|
||||
#define prev_chunk(p) ((mchunkptr)(((char *)(p)) - ((p)->prev_foot)))
|
||||
|
||||
|
@ -403,7 +398,7 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|||
└───────────────────────────────────────────────────────────────┘
|
||||
|
||||
Larger chunks are kept in a form of bitwise digital trees (aka
|
||||
tries) keyed on chunksizes. Because malloc_tree_chunks are only for
|
||||
tries) keyed on chunksizes. Because MallocTreeChunks are only for
|
||||
free chunks greater than 256 bytes, their size doesn't impose any
|
||||
constraints on user chunk sizes. Each node looks like:
|
||||
|
||||
|
@ -468,21 +463,20 @@ typedef unsigned int flag_t; /* The type of various bit flag sets */
|
|||
is of course much better.
|
||||
*/
|
||||
|
||||
struct malloc_tree_chunk {
|
||||
/* The first four fields must be compatible with malloc_chunk */
|
||||
struct MallocTreeChunk {
|
||||
/* The first four fields must be compatible with MallocChunk */
|
||||
size_t prev_foot;
|
||||
size_t head;
|
||||
struct malloc_tree_chunk *fd;
|
||||
struct malloc_tree_chunk *bk;
|
||||
|
||||
struct malloc_tree_chunk *child[2];
|
||||
struct malloc_tree_chunk *parent;
|
||||
struct MallocTreeChunk *fd;
|
||||
struct MallocTreeChunk *bk;
|
||||
struct MallocTreeChunk *child[2];
|
||||
struct MallocTreeChunk *parent;
|
||||
bindex_t index;
|
||||
};
|
||||
|
||||
typedef struct malloc_tree_chunk tchunk;
|
||||
typedef struct malloc_tree_chunk *tchunkptr;
|
||||
typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
|
||||
typedef struct MallocTreeChunk tchunk;
|
||||
typedef struct MallocTreeChunk *tchunkptr;
|
||||
typedef struct MallocTreeChunk *tbinptr; /* The type of bins of trees */
|
||||
|
||||
/* A little helper macro for trees */
|
||||
#define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1])
|
||||
|
@ -490,46 +484,44 @@ typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
|
|||
/* ───────────────────────────── Segments ──────────────────────────────── */
|
||||
|
||||
/*
|
||||
Each malloc space may include non-contiguous segments, held in a
|
||||
list headed by an embedded malloc_segment record representing the
|
||||
top-most space. Segments also include flags holding properties of
|
||||
the space. Large chunks that are directly allocated by mmap are not
|
||||
included in this list. They are instead independently created and
|
||||
destroyed without otherwise keeping track of them.
|
||||
Each malloc space may include non-contiguous segments, held in a list
|
||||
headed by an embedded MallocSegment record representing the top-most
|
||||
space. Segments also include flags holding properties of the space.
|
||||
Large chunks that are directly allocated by mmap are not included in
|
||||
this list. They are instead independently created and destroyed
|
||||
without otherwise keeping track of them.
|
||||
|
||||
Segment management mainly comes into play for spaces allocated by
|
||||
MMAP. Any call to MMAP might or might not return memory that is
|
||||
adjacent to an existing segment. MORECORE normally contiguously
|
||||
MMAP. Any call to MMAP might or might not return memory that is
|
||||
adjacent to an existing segment. MORECORE normally contiguously
|
||||
extends the current space, so this space is almost always adjacent,
|
||||
which is simpler and faster to deal with. (This is why MORECORE is
|
||||
used preferentially to MMAP when both are available -- see
|
||||
sys_alloc.) When allocating using MMAP, we don't use any of the
|
||||
hinting mechanisms (inconsistently) supported in various
|
||||
implementations of unix mmap, or distinguish reserving from
|
||||
committing memory. Instead, we just ask for space, and exploit
|
||||
contiguity when we get it. It is probably possible to do
|
||||
better than this on some systems, but no general scheme seems
|
||||
to be significantly better.
|
||||
used preferentially to MMAP when both are available -- see sys_alloc.)
|
||||
When allocating using MMAP, we don't use any of the hinting mechanisms
|
||||
(inconsistently) supported in various implementations of unix mmap, or
|
||||
distinguish reserving from committing memory. Instead, we just ask for
|
||||
space, and exploit contiguity when we get it. It is probably possible
|
||||
to do better than this on some systems, but no general scheme seems to
|
||||
be significantly better.
|
||||
|
||||
Management entails a simpler variant of the consolidation scheme
|
||||
used for chunks to reduce fragmentation -- new adjacent memory is
|
||||
normally prepended or appended to an existing segment. However,
|
||||
there are limitations compared to chunk consolidation that mostly
|
||||
reflect the fact that segment processing is relatively infrequent
|
||||
(occurring only when getting memory from system) and that we
|
||||
don't expect to have huge numbers of segments:
|
||||
Management entails a simpler variant of the consolidation scheme used
|
||||
for chunks to reduce fragmentation -- new adjacent memory is normally
|
||||
prepended or appended to an existing segment. However, there are
|
||||
limitations compared to chunk consolidation that mostly reflect the
|
||||
fact that segment processing is relatively infrequent (occurring only
|
||||
when getting memory from system) and that we don't expect to have huge
|
||||
numbers of segments:
|
||||
|
||||
* Segments are not indexed, so traversal requires linear scans. (It
|
||||
* Segments are not indexed, so traversal requires linear scans. (It
|
||||
would be possible to index these, but is not worth the extra
|
||||
overhead and complexity for most programs on most platforms.)
|
||||
* New segments are only appended to old ones when holding top-most
|
||||
memory; if they cannot be prepended to others, they are held in
|
||||
different segments.
|
||||
|
||||
Except for the top-most segment of an mstate, each segment record
|
||||
is kept at the tail of its segment. Segments are added by pushing
|
||||
segment records onto the list headed by &mstate.seg for the
|
||||
containing mstate.
|
||||
Except for the top-most segment of an mstate, each segment record is
|
||||
kept at the tail of its segment. Segments are added by pushing segment
|
||||
records onto the list headed by &mstate.seg for the containing mstate.
|
||||
|
||||
Segment flags control allocation/merge/deallocation policies:
|
||||
* If EXTERN_BIT set, then we did not allocate this segment,
|
||||
|
@ -544,18 +536,18 @@ typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
|
|||
and deallocated/trimmed using MORECORE with negative arguments.
|
||||
*/
|
||||
|
||||
struct malloc_segment {
|
||||
char *base; /* base address */
|
||||
size_t size; /* allocated size */
|
||||
struct malloc_segment *next; /* ptr to next segment */
|
||||
flag_t sflags; /* mmap and extern flag */
|
||||
struct MallocSegment {
|
||||
char *base; /* base address */
|
||||
size_t size; /* allocated size */
|
||||
struct MallocSegment *next; /* ptr to next segment */
|
||||
flag_t sflags; /* mmap and extern flag */
|
||||
};
|
||||
|
||||
#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
|
||||
#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
|
||||
|
||||
typedef struct malloc_segment msegment;
|
||||
typedef struct malloc_segment *msegmentptr;
|
||||
typedef struct MallocSegment msegment;
|
||||
typedef struct MallocSegment *msegmentptr;
|
||||
|
||||
/* ──────────────────────────── MallocState ───────────────────────────── */
|
||||
|
||||
|
@ -583,7 +575,7 @@ typedef struct malloc_segment *msegmentptr;
|
|||
An array of bin headers for free chunks. These bins hold chunks
|
||||
with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
|
||||
chunks of all the same size, spaced 8 bytes apart. To simplify
|
||||
use in double-linked lists, each bin header acts as a malloc_chunk
|
||||
use in double-linked lists, each bin header acts as a MallocChunk
|
||||
pointing to the real first node, if it exists (else pointing to
|
||||
itself). This avoids special-casing for headers. But to avoid
|
||||
waste, we allocate only the fd/bk pointers of bins, and then use
|
||||
|
@ -609,7 +601,7 @@ typedef struct malloc_segment *msegmentptr;
|
|||
well as to reduce the number of memory locations read or written.
|
||||
|
||||
Segments
|
||||
A list of segments headed by an embedded malloc_segment record
|
||||
A list of segments headed by an embedded MallocSegment record
|
||||
representing the initial space.
|
||||
|
||||
Address check support
|
||||
|
@ -715,10 +707,12 @@ for k,v in d.items():
|
|||
*/
|
||||
#define MALLOC_TRACE 0
|
||||
|
||||
static inline void *AddressBirthAction(void *p) {
|
||||
forceinline void *AddressBirthAction(void *p) {
|
||||
#if MALLOC_TRACE
|
||||
(dprintf)(2, "BIRTH %p\n", p);
|
||||
if (weaken(PrintBacktraceUsingSymbols) && weaken(GetSymbolTable)) {
|
||||
if (weaken(ShowBacktrace)) {
|
||||
weaken(ShowBacktrace)(2, 0);
|
||||
} else if (weaken(PrintBacktraceUsingSymbols) && weaken(GetSymbolTable)) {
|
||||
weaken(PrintBacktraceUsingSymbols)(2, __builtin_frame_address(0),
|
||||
weaken(GetSymbolTable)());
|
||||
}
|
||||
|
@ -726,10 +720,12 @@ static inline void *AddressBirthAction(void *p) {
|
|||
return p;
|
||||
}
|
||||
|
||||
static inline void *AddressDeathAction(void *p) {
|
||||
forceinline void *AddressDeathAction(void *p) {
|
||||
#if MALLOC_TRACE
|
||||
(dprintf)(2, "DEATH %p\n", p);
|
||||
if (weaken(PrintBacktraceUsingSymbols) && weaken(GetSymbolTable)) {
|
||||
if (weaken(ShowBacktrace)) {
|
||||
weaken(ShowBacktrace)(2, 0);
|
||||
} else if (weaken(PrintBacktraceUsingSymbols) && weaken(GetSymbolTable)) {
|
||||
weaken(PrintBacktraceUsingSymbols)(2, __builtin_frame_address(0),
|
||||
weaken(GetSymbolTable)());
|
||||
}
|
||||
|
@ -766,8 +762,8 @@ static inline void *AddressDeathAction(void *p) {
|
|||
that may be needed to place segment records and fenceposts when new
|
||||
noncontiguous segments are added.
|
||||
*/
|
||||
#define TOP_FOOT_SIZE \
|
||||
(align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \
|
||||
#define TOP_FOOT_SIZE \
|
||||
(align_offset(chunk2mem(0)) + pad_request(sizeof(struct MallocSegment)) + \
|
||||
MIN_CHUNK_SIZE)
|
||||
|
||||
/* ───────────── Global MallocState and MallocParams ─────────────────── */
|
||||
|
@ -1261,26 +1257,26 @@ forceinline msegmentptr segment_holding(mstate m, char *addr) {
|
|||
that may be needed to place segment records and fenceposts when new
|
||||
noncontiguous segments are added.
|
||||
*/
|
||||
#define TOP_FOOT_SIZE \
|
||||
(align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \
|
||||
#define TOP_FOOT_SIZE \
|
||||
(align_offset(chunk2mem(0)) + pad_request(sizeof(struct MallocSegment)) + \
|
||||
MIN_CHUNK_SIZE)
|
||||
|
||||
/* ────────────────────────── Debugging setup ──────────────────────────── */
|
||||
|
||||
#if !(DEBUG + MODE_DBG + 0)
|
||||
#define check_free_chunk(M, P)
|
||||
#define check_inuse_chunk(M, P)
|
||||
#define check_malloced_chunk(M, P, N)
|
||||
#define check_mmapped_chunk(M, P)
|
||||
#define check_malloc_state(M)
|
||||
#define check_top_chunk(M, P)
|
||||
#else /* DEBUG */
|
||||
#ifdef DEBUG
|
||||
#define check_free_chunk(M, P) do_check_free_chunk(M, P)
|
||||
#define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P)
|
||||
#define check_top_chunk(M, P) do_check_top_chunk(M, P)
|
||||
#define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N)
|
||||
#define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P)
|
||||
#define check_malloc_state(M) do_check_malloc_state(M)
|
||||
#else
|
||||
#define check_free_chunk(M, P)
|
||||
#define check_inuse_chunk(M, P)
|
||||
#define check_malloced_chunk(M, P, N)
|
||||
#define check_mmapped_chunk(M, P)
|
||||
#define check_malloc_state(M)
|
||||
#define check_top_chunk(M, P)
|
||||
#endif /* DEBUG */
|
||||
|
||||
void do_check_free_chunk(mstate, mchunkptr) hidden;
|
||||
|
@ -1292,18 +1288,16 @@ void do_check_malloc_state(mstate) hidden;
|
|||
|
||||
/* ─────────────────────────── prototypes ──────────────────────────────── */
|
||||
|
||||
void *dlmalloc(size_t) hidden;
|
||||
void *dlcalloc(size_t, size_t) hidden;
|
||||
void *dlmalloc(size_t) hidden attributeallocsize((1)) mallocesque;
|
||||
void *dlcalloc(size_t, size_t) hidden attributeallocsize((1, 2)) mallocesque;
|
||||
void dlfree(void *) nothrow nocallback hidden;
|
||||
void *dlmemalign$impl(mstate, size_t, size_t) hidden;
|
||||
void *dlrealloc(void *, size_t) hidden;
|
||||
void *dlrealloc_in_place(void *, size_t) hidden;
|
||||
void *dlvalloc(size_t) hidden;
|
||||
void *dlpvalloc(size_t) hidden;
|
||||
void *dlmemalign(size_t, size_t) hidden;
|
||||
void *dlmemalign_impl(mstate, size_t, size_t) hidden;
|
||||
void *dlrealloc(void *, size_t) hidden reallocesque;
|
||||
void *dlrealloc_in_place(void *, size_t) hidden reallocesque;
|
||||
void *dlmemalign(size_t, size_t) hidden attributeallocalign((1))
|
||||
attributeallocsize((2)) returnspointerwithnoaliases libcesque nodiscard;
|
||||
int dlmalloc_trim(size_t) hidden;
|
||||
size_t dlmalloc_usable_size(const void *) hidden;
|
||||
int dlposix_memalign(void **, size_t, size_t) hidden;
|
||||
void **dlindependent_calloc(size_t, size_t, void *[]) hidden;
|
||||
void **dlindependent_comalloc(size_t, size_t[], void *[]) hidden;
|
||||
struct MallocStats dlmalloc_stats(mstate) hidden;
|
||||
|
|
6
third_party/dlmalloc/dlmalloc.mk
vendored
6
third_party/dlmalloc/dlmalloc.mk
vendored
|
@ -52,6 +52,12 @@ $(THIRD_PARTY_DLMALLOC_A_OBJS): \
|
|||
$(NO_MAGIC) \
|
||||
-fno-sanitize=address
|
||||
|
||||
ifneq ($(MODE),dbg)
|
||||
$(THIRD_PARTY_DLMALLOC_A_OBJS): \
|
||||
OVERRIDE_CFLAGS += \
|
||||
-DNDEBUG
|
||||
endif
|
||||
|
||||
THIRD_PARTY_DLMALLOC_LIBS = $(foreach x,$(THIRD_PARTY_DLMALLOC_ARTIFACTS),$($(x)))
|
||||
THIRD_PARTY_DLMALLOC_SRCS = $(foreach x,$(THIRD_PARTY_DLMALLOC_ARTIFACTS),$($(x)_SRCS))
|
||||
THIRD_PARTY_DLMALLOC_HDRS = $(foreach x,$(THIRD_PARTY_DLMALLOC_ARTIFACTS),$($(x)_HDRS))
|
||||
|
|
3
third_party/dlmalloc/dlmalloc_stats.c
vendored
3
third_party/dlmalloc/dlmalloc_stats.c
vendored
|
@ -21,6 +21,7 @@
|
|||
* More information can be obtained by calling mallinfo.
|
||||
*/
|
||||
struct MallocStats dlmalloc_stats(mstate m) {
|
||||
struct MallocChunk *q;
|
||||
struct MallocStats res;
|
||||
bzero(&res, sizeof(res));
|
||||
ensure_initialization();
|
||||
|
@ -32,7 +33,7 @@ struct MallocStats dlmalloc_stats(mstate m) {
|
|||
res.fp = m->footprint;
|
||||
res.used = res.fp - (m->topsize + TOP_FOOT_SIZE);
|
||||
while (s != 0) {
|
||||
mchunkptr q = align_as_chunk(s->base);
|
||||
q = align_as_chunk(s->base);
|
||||
while (segment_holds(s, q) && q != m->top &&
|
||||
q->head != FENCEPOST_HEAD) {
|
||||
if (!is_inuse(q)) res.used -= chunksize(q);
|
||||
|
|
28
third_party/dlmalloc/dlposix_memalign.c
vendored
28
third_party/dlmalloc/dlposix_memalign.c
vendored
|
@ -1,28 +0,0 @@
|
|||
#include "libc/errno.h"
|
||||
#include "libc/mem/mem.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "third_party/dlmalloc/dlmalloc.internal.h"
|
||||
|
||||
int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
|
||||
void* mem;
|
||||
size_t d, r;
|
||||
mem = NULL;
|
||||
if (alignment == MALLOC_ALIGNMENT) {
|
||||
mem = dlmalloc(bytes);
|
||||
} else {
|
||||
d = alignment / sizeof(void*);
|
||||
r = alignment % sizeof(void*);
|
||||
if (r != 0 || d == 0 || (d & (d - SIZE_T_ONE)) != 0) {
|
||||
return einval();
|
||||
} else if (bytes <= MAX_REQUEST - alignment) {
|
||||
if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE;
|
||||
mem = dlmemalign$impl(g_dlmalloc, alignment, bytes);
|
||||
}
|
||||
}
|
||||
if (mem == 0) {
|
||||
return enomem();
|
||||
} else {
|
||||
*pp = mem;
|
||||
return 0;
|
||||
}
|
||||
}
|
10
third_party/dlmalloc/dlpvalloc.c
vendored
10
third_party/dlmalloc/dlpvalloc.c
vendored
|
@ -1,10 +0,0 @@
|
|||
#include "libc/mem/mem.h"
|
||||
#include "third_party/dlmalloc/dlmalloc.internal.h"
|
||||
|
||||
void *dlpvalloc(size_t bytes) {
|
||||
size_t pagesz;
|
||||
ensure_initialization();
|
||||
pagesz = g_mparams.page_size;
|
||||
return dlmemalign(pagesz,
|
||||
(bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
|
||||
}
|
63
third_party/dlmalloc/dlrealloc.c
vendored
63
third_party/dlmalloc/dlrealloc.c
vendored
|
@ -1,42 +1,47 @@
|
|||
#include "libc/bits/likely.h"
|
||||
#include "libc/str/str.h"
|
||||
#include "libc/sysv/errfuns.h"
|
||||
#include "third_party/dlmalloc/dlmalloc.internal.h"
|
||||
|
||||
void *dlrealloc(void *oldmem, size_t bytes) {
|
||||
void *mem = 0;
|
||||
if (oldmem == 0) {
|
||||
mem = dlmalloc(bytes);
|
||||
} else if (bytes >= MAX_REQUEST) {
|
||||
enomem();
|
||||
} else if (bytes == 0) {
|
||||
dlfree(oldmem);
|
||||
} else {
|
||||
size_t nb = request2size(bytes);
|
||||
mchunkptr oldp = mem2chunk(oldmem);
|
||||
size_t oc, nb;
|
||||
struct MallocState *m;
|
||||
struct MallocChunk *oldp, *newp;
|
||||
if (oldmem) {
|
||||
if (LIKELY(bytes < MAX_REQUEST)) {
|
||||
if (bytes) {
|
||||
nb = request2size(bytes);
|
||||
oldp = mem2chunk(oldmem);
|
||||
#if !FOOTERS
|
||||
mstate m = g_dlmalloc;
|
||||
#else /* FOOTERS */
|
||||
mstate m = get_mstate_for(oldp);
|
||||
if (!ok_magic(m)) {
|
||||
USAGE_ERROR_ACTION(m, oldmem);
|
||||
return 0;
|
||||
}
|
||||
#endif /* FOOTERS */
|
||||
if (!PREACTION(m)) {
|
||||
mchunkptr newp = dlmalloc_try_realloc_chunk(m, oldp, nb, 1);
|
||||
POSTACTION(m);
|
||||
if (newp != 0) {
|
||||
check_inuse_chunk(m, newp);
|
||||
mem = chunk2mem(newp);
|
||||
} else {
|
||||
mem = dlmalloc(bytes);
|
||||
if (mem != 0) {
|
||||
size_t oc = chunksize(oldp) - overhead_for(oldp);
|
||||
memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
|
||||
dlfree(oldmem);
|
||||
m = g_dlmalloc;
|
||||
#else /* FOOTERS */
|
||||
m = get_mstate_for(oldp);
|
||||
if (UNLIKELY(!ok_magic(m))) {
|
||||
USAGE_ERROR_ACTION(m, oldmem);
|
||||
return 0;
|
||||
}
|
||||
#endif /* FOOTERS */
|
||||
if (!PREACTION(m)) {
|
||||
newp = dlmalloc_try_realloc_chunk(m, oldp, nb, 1);
|
||||
POSTACTION(m);
|
||||
if (newp) {
|
||||
check_inuse_chunk(m, newp);
|
||||
mem = chunk2mem(newp);
|
||||
} else if ((mem = dlmalloc(bytes))) {
|
||||
oc = chunksize(oldp) - overhead_for(oldp);
|
||||
memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
|
||||
dlfree(oldmem);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dlfree(oldmem);
|
||||
}
|
||||
} else {
|
||||
enomem();
|
||||
}
|
||||
} else {
|
||||
mem = dlmalloc(bytes);
|
||||
}
|
||||
return mem;
|
||||
}
|
||||
|
|
9
third_party/dlmalloc/dlvalloc.c
vendored
9
third_party/dlmalloc/dlvalloc.c
vendored
|
@ -1,9 +0,0 @@
|
|||
#include "libc/mem/mem.h"
|
||||
#include "third_party/dlmalloc/dlmalloc.internal.h"
|
||||
|
||||
void *dlvalloc(size_t bytes) {
|
||||
size_t pagesz;
|
||||
ensure_initialization();
|
||||
pagesz = g_mparams.page_size;
|
||||
return dlmemalign(pagesz, bytes);
|
||||
}
|
21
third_party/dlmalloc/malloc_inspect_all.c
vendored
21
third_party/dlmalloc/malloc_inspect_all.c
vendored
|
@ -2,10 +2,10 @@
|
|||
#include "third_party/dlmalloc/dlmalloc.internal.h"
|
||||
|
||||
static void internal_inspect_all(mstate m,
|
||||
void (*handler)(void* start, void* end,
|
||||
void (*handler)(void *start, void *end,
|
||||
size_t used_bytes,
|
||||
void* callback_arg),
|
||||
void* arg) {
|
||||
void *callback_arg),
|
||||
void *arg) {
|
||||
if (is_initialized(m)) {
|
||||
mchunkptr top = m->top;
|
||||
msegmentptr s;
|
||||
|
@ -15,20 +15,21 @@ static void internal_inspect_all(mstate m,
|
|||
mchunkptr next = next_chunk(q);
|
||||
size_t sz = chunksize(q);
|
||||
size_t used;
|
||||
void* start;
|
||||
void *start;
|
||||
if (is_inuse(q)) {
|
||||
used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
|
||||
start = chunk2mem(q);
|
||||
} else {
|
||||
used = 0;
|
||||
if (is_small(sz)) { /* offset by possible bookkeeping */
|
||||
start = (void*)((char*)q + sizeof(struct malloc_chunk));
|
||||
start = (void *)((char *)q + sizeof(struct MallocChunk));
|
||||
} else {
|
||||
start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
|
||||
start = (void *)((char *)q + sizeof(struct MallocTreeChunk));
|
||||
}
|
||||
}
|
||||
if (start < (void*)next) /* skip if all space is bookkeeping */
|
||||
if (start < (void *)next) { /* skip if all space is bookkeeping */
|
||||
handler(start, next, used, arg);
|
||||
}
|
||||
if (q == top) break;
|
||||
q = next;
|
||||
}
|
||||
|
@ -60,9 +61,9 @@ static void internal_inspect_all(mstate m,
|
|||
*
|
||||
* malloc_inspect_all(count_chunks, NULL);
|
||||
*/
|
||||
void malloc_inspect_all(void (*handler)(void* start, void* end,
|
||||
size_t used_bytes, void* callback_arg),
|
||||
void* arg) {
|
||||
void malloc_inspect_all(void (*handler)(void *start, void *end,
|
||||
size_t used_bytes, void *callback_arg),
|
||||
void *arg) {
|
||||
ensure_initialization();
|
||||
if (!PREACTION(g_dlmalloc)) {
|
||||
internal_inspect_all(g_dlmalloc, handler, arg);
|
||||
|
|
1
third_party/dlmalloc/malloc_trim.c
vendored
1
third_party/dlmalloc/malloc_trim.c
vendored
|
@ -21,6 +21,7 @@
|
|||
* @return 1 if it actually released any memory, else 0
|
||||
*/
|
||||
int dlmalloc_trim(size_t pad) {
|
||||
/* asan runtime depends on this function */
|
||||
int result = 0;
|
||||
ensure_initialization();
|
||||
if (!PREACTION(g_dlmalloc)) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue