Make improvements

- Emulator can now test the αcτµαlly pδrταblε εxεcµταblε bootloader

- Whipped up a webserver named redbean. It services 150k requests per
  second on a single core. Bundling assets inside zip enables extremely
  fast serving for two reasons. The first is that zip central directory
  lookups go faster than stat() system calls. The second is that both
  zip and gzip content-encoding use DEFLATE, therefore, compressed
  responses can be served via the sendfile() system call which does an
  in-kernel copy directly from the zip executable structure. Also note
  that red bean zip executables can be deployed easily to all platforms,
  since these native executables work on Linux, Mac, BSD, and Windows.

- Address sanitizer now works very well
This commit is contained in:
Justine Tunney 2020-09-06 21:39:00 -07:00
parent 7327c345f9
commit 416fd86676
230 changed files with 9835 additions and 5682 deletions

View file

@ -18,7 +18,7 @@ size_t bulk_free(void *array[], size_t nelem) {
* if allocated with ialloc or the array is sorted.
*/
size_t unfreed = 0;
if (!PREACTION(gm)) {
if (!PREACTION(g_dlmalloc)) {
void **a;
void **fence = &(array[nelem]);
for (a = array; a != fence; ++a) {
@ -27,30 +27,32 @@ size_t bulk_free(void *array[], size_t nelem) {
mchunkptr p = mem2chunk(ADDRESS_DEATH_ACTION(mem));
size_t psize = chunksize(p);
#if FOOTERS
if (get_mstate_for(p) != gm) {
if (get_mstate_for(p) != g_dlmalloc) {
++unfreed;
continue;
}
#endif
check_inuse_chunk(gm, p);
check_inuse_chunk(g_dlmalloc, p);
*a = 0;
if (RTCHECK(ok_address(gm, p) && ok_inuse(p))) {
if (RTCHECK(ok_address(g_dlmalloc, p) && ok_inuse(p))) {
void **b = a + 1; /* try to merge with next chunk */
mchunkptr next = next_chunk(p);
if (b != fence && *b == chunk2mem(next)) {
size_t newsize = chunksize(next) + psize;
set_inuse(gm, p, newsize);
set_inuse(g_dlmalloc, p, newsize);
*b = chunk2mem(p);
} else
dlmalloc_dispose_chunk(gm, p, psize);
dlmalloc_dispose_chunk(g_dlmalloc, p, psize);
} else {
CORRUPTION_ERROR_ACTION(gm);
CORRUPTION_ERROR_ACTION(g_dlmalloc);
break;
}
}
}
if (should_trim(gm, gm->topsize)) dlmalloc_sys_trim(gm, 0);
POSTACTION(gm);
if (should_trim(g_dlmalloc, g_dlmalloc->topsize)) {
dlmalloc_sys_trim(g_dlmalloc, 0);
}
POSTACTION(g_dlmalloc);
}
return unfreed;
}

11
third_party/dlmalloc/dlcalloc.c vendored Normal file
View file

@ -0,0 +1,11 @@
#include "libc/str/str.h"
#include "third_party/dlmalloc/dlmalloc.h"
void *dlcalloc(size_t n_elements, size_t elem_size) {
void *mem;
size_t req;
if (__builtin_mul_overflow(n_elements, elem_size, &req)) req = -1;
mem = dlmalloc(req);
if (mem != 0 && calloc_must_clear(mem2chunk(mem))) memset(mem, 0, req);
return mem;
}

View file

@ -164,7 +164,7 @@ static void **ialloc(mstate m, size_t n_elements, size_t *sizes, int opts,
void **dlindependent_calloc(size_t n_elements, size_t elem_size,
void *chunks[]) {
size_t sz = elem_size; /* serves as 1-element array */
return ialloc(gm, n_elements, &sz, 3, chunks);
return ialloc(g_dlmalloc, n_elements, &sz, 3, chunks);
}
/**
@ -224,5 +224,5 @@ void **dlindependent_calloc(size_t n_elements, size_t elem_size,
*/
void **dlindependent_comalloc(size_t n_elements, size_t sizes[],
void *chunks[]) {
return ialloc(gm, n_elements, sizes, 0, chunks);
return ialloc(g_dlmalloc, n_elements, sizes, 0, chunks);
}

View file

@ -29,7 +29,7 @@ void do_check_mmapped_chunk(mstate m, mchunkptr p) {
assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
assert(ok_address(m, p));
assert(!is_small(sz));
assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
assert((len & (g_mparams.page_size - SIZE_T_ONE)) == 0);
assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
}
@ -222,7 +222,7 @@ static size_t traverse_and_check(mstate m) {
return sum;
}
/* Check all properties of malloc_state. */
/* Check all properties of MallocState. */
void do_check_malloc_state(mstate m) {
bindex_t i;
size_t total;

View file

@ -19,10 +19,10 @@
STATIC_YOINK("_init_dlmalloc");
#define OOM_WARNING "warning: running out of physical memory\n"
#define is_global(M) ((M) == &_gm_)
#define is_global(M) ((M) == g_dlmalloc)
struct malloc_params mparams;
struct malloc_state _gm_;
struct MallocState g_dlmalloc[1];
struct MallocParams g_mparams;
/**
* Acquires more system memory for dlmalloc.
@ -63,7 +63,7 @@ static void dlmalloc_init_top(mstate m, mchunkptr p, size_t psize) {
p->head = psize | PINUSE_BIT;
/* set size of fake trailing chunk holding overhead space only once */
chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
m->trim_check = mparams.trim_threshold; /* reset on each update */
m->trim_check = g_mparams.trim_threshold; /* reset on each update */
}
/* Initialize bins for a new mstate that is otherwise zeroed out */
@ -184,10 +184,6 @@ static int has_segment_link(mstate m, msegmentptr ss) {
/* For sys_alloc, enough padding to ensure can malloc request on success */
#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
static size_t mmap_align(size_t s) {
return granularity_align(s);
}
/* Malloc using mmap */
static void *mmap_alloc(mstate m, size_t nb) {
size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
@ -217,41 +213,10 @@ static void *mmap_alloc(mstate m, size_t nb) {
return 0;
}
/* Realloc using mmap */
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
size_t oldsize = chunksize(oldp);
if (is_small(nb)) return 0; /* Can't shrink mmap regions below small size */
/* Keep old chunk if big enough but not too big */
if (oldsize >= nb + SIZE_T_SIZE &&
(oldsize - nb) <= (mparams.granularity << 1)) {
return oldp;
} else {
size_t offset = oldp->prev_foot;
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
char *cp = mremap((char *)oldp - offset, oldmmsize, newmmsize, flags, 0);
if (cp != CMFAIL) {
mchunkptr newp = (mchunkptr)(cp + offset);
size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
newp->head = psize;
mark_inuse_foot(m, newp, psize);
chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
if (cp < m->least_addr) m->least_addr = cp;
if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) {
m->max_footprint = m->footprint;
}
check_mmapped_chunk(m, newp);
return newp;
}
}
return 0;
}
/**
* Gets memory from system.
*/
void *sys_alloc(mstate m, size_t nb) {
static void *dlmalloc_sys_alloc(mstate m, size_t nb) {
char *tbase = CMFAIL;
size_t tsize = 0;
flag_t mmap_flag = 0;
@ -260,7 +225,7 @@ void *sys_alloc(mstate m, size_t nb) {
ensure_initialization();
/* Directly map large chunks, but only if already initialized */
if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
if (use_mmap(m) && nb >= g_mparams.mmap_threshold && m->topsize != 0) {
void *mem = mmap_alloc(m, nb);
if (mem != 0) return mem;
}
@ -290,13 +255,13 @@ void *sys_alloc(mstate m, size_t nb) {
m->seg.base = tbase;
m->seg.size = tsize;
m->seg.sflags = mmap_flag;
m->magic = mparams.magic;
m->magic = g_mparams.magic;
m->release_checks = MAX_RELEASE_CHECK_RATE;
init_bins(m);
if (is_global(m)) {
dlmalloc_init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
} else {
/* Offset top by embedded malloc_state */
/* Offset top by embedded MallocState */
mchunkptr mn = next_chunk(mem2chunk(m));
dlmalloc_init_top(
m, mn, (size_t)((tbase + tsize) - (char *)mn) - TOP_FOOT_SIZE);
@ -401,7 +366,7 @@ int dlmalloc_sys_trim(mstate m, size_t pad) {
pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
if (m->topsize > pad) {
/* Shrink top space in granularity-size units, keeping at least one */
size_t unit = mparams.granularity;
size_t unit = g_mparams.granularity;
size_t extra =
((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - SIZE_T_ONE) * unit;
msegmentptr sp = segment_holding(m, (char *)m->top);
@ -438,13 +403,13 @@ int dlmalloc_sys_trim(mstate m, size_t pad) {
#if LOCK_AT_FORK
static void pre_fork(void) {
ACQUIRE_LOCK(&(gm)->mutex);
ACQUIRE_LOCK(&(g_dlmalloc)->mutex);
}
static void post_fork_parent(void) {
RELEASE_LOCK(&(gm)->mutex);
RELEASE_LOCK(&(g_dlmalloc)->mutex);
}
static void post_fork_child(void) {
INITIAL_LOCK(&(gm)->mutex);
INITIAL_LOCK(&(g_dlmalloc)->mutex);
}
#endif /* LOCK_AT_FORK */
@ -453,7 +418,7 @@ static void post_fork_child(void) {
/* Consolidate and bin a chunk. Differs from exported versions
of free mainly in that the chunk need not be marked as inuse.
*/
hidden void dlmalloc_dispose_chunk(mstate m, mchunkptr p, size_t psize) {
void dlmalloc_dispose_chunk(mstate m, mchunkptr p, size_t psize) {
mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) {
mchunkptr prev;
@ -646,7 +611,7 @@ void *dlmalloc(size_t bytes) {
ensure_initialization(); /* initialize in sys_alloc if not using locks */
#endif
if (!PREACTION(gm)) {
if (!PREACTION(g_dlmalloc)) {
void *mem;
size_t nb;
if (bytes <= MAX_SMALL_REQUEST) {
@ -654,22 +619,22 @@ void *dlmalloc(size_t bytes) {
binmap_t smallbits;
nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
idx = small_index(nb);
smallbits = gm->smallmap >> idx;
smallbits = g_dlmalloc->smallmap >> idx;
if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
mchunkptr b, p;
idx += ~smallbits & 1; /* Uses next bin if idx empty */
b = smallbin_at(gm, idx);
b = smallbin_at(g_dlmalloc, idx);
p = b->fd;
assert(chunksize(p) == small_index2size(idx));
unlink_first_small_chunk(gm, b, p, idx);
set_inuse_and_pinuse(gm, p, small_index2size(idx));
unlink_first_small_chunk(g_dlmalloc, b, p, idx);
set_inuse_and_pinuse(g_dlmalloc, p, small_index2size(idx));
mem = chunk2mem(p);
check_malloced_chunk(gm, mem, nb);
check_malloced_chunk(g_dlmalloc, mem, nb);
goto postaction;
}
else if (nb > gm->dvsize) {
else if (nb > g_dlmalloc->dvsize) {
if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
mchunkptr b, p, r;
size_t rsize;
@ -677,27 +642,28 @@ void *dlmalloc(size_t bytes) {
binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
binmap_t leastbit = least_bit(leftbits);
compute_bit2idx(leastbit, i);
b = smallbin_at(gm, i);
b = smallbin_at(g_dlmalloc, i);
p = b->fd;
assert(chunksize(p) == small_index2size(i));
unlink_first_small_chunk(gm, b, p, i);
unlink_first_small_chunk(g_dlmalloc, b, p, i);
rsize = small_index2size(i) - nb;
/* Fit here cannot be remainderless if 4byte sizes */
if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
set_inuse_and_pinuse(gm, p, small_index2size(i));
set_inuse_and_pinuse(g_dlmalloc, p, small_index2size(i));
else {
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
set_size_and_pinuse_of_inuse_chunk(g_dlmalloc, p, nb);
r = chunk_plus_offset(p, nb);
set_size_and_pinuse_of_free_chunk(r, rsize);
replace_dv(gm, r, rsize);
replace_dv(g_dlmalloc, r, rsize);
}
mem = chunk2mem(p);
check_malloced_chunk(gm, mem, nb);
check_malloced_chunk(g_dlmalloc, mem, nb);
goto postaction;
}
else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
check_malloced_chunk(gm, mem, nb);
else if (g_dlmalloc->treemap != 0 &&
(mem = tmalloc_small(g_dlmalloc, nb)) != 0) {
check_malloced_chunk(g_dlmalloc, mem, nb);
goto postaction;
}
}
@ -705,55 +671,54 @@ void *dlmalloc(size_t bytes) {
nb = SIZE_MAX; /* Too big to allocate. Force failure (in sys alloc) */
} else {
nb = pad_request(bytes);
if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
check_malloced_chunk(gm, mem, nb);
if (g_dlmalloc->treemap != 0 &&
(mem = tmalloc_large(g_dlmalloc, nb)) != 0) {
check_malloced_chunk(g_dlmalloc, mem, nb);
goto postaction;
}
}
if (nb <= gm->dvsize) {
size_t rsize = gm->dvsize - nb;
mchunkptr p = gm->dv;
if (nb <= g_dlmalloc->dvsize) {
size_t rsize = g_dlmalloc->dvsize - nb;
mchunkptr p = g_dlmalloc->dv;
if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
gm->dvsize = rsize;
mchunkptr r = g_dlmalloc->dv = chunk_plus_offset(p, nb);
g_dlmalloc->dvsize = rsize;
set_size_and_pinuse_of_free_chunk(r, rsize);
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
set_size_and_pinuse_of_inuse_chunk(g_dlmalloc, p, nb);
} else { /* exhaust dv */
size_t dvs = gm->dvsize;
gm->dvsize = 0;
gm->dv = 0;
set_inuse_and_pinuse(gm, p, dvs);
size_t dvs = g_dlmalloc->dvsize;
g_dlmalloc->dvsize = 0;
g_dlmalloc->dv = 0;
set_inuse_and_pinuse(g_dlmalloc, p, dvs);
}
mem = chunk2mem(p);
check_malloced_chunk(gm, mem, nb);
check_malloced_chunk(g_dlmalloc, mem, nb);
goto postaction;
}
else if (nb < gm->topsize) { /* Split top */
size_t rsize = gm->topsize -= nb;
mchunkptr p = gm->top;
mchunkptr r = gm->top = chunk_plus_offset(p, nb);
else if (nb < g_dlmalloc->topsize) { /* Split top */
size_t rsize = g_dlmalloc->topsize -= nb;
mchunkptr p = g_dlmalloc->top;
mchunkptr r = g_dlmalloc->top = chunk_plus_offset(p, nb);
r->head = rsize | PINUSE_BIT;
set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
set_size_and_pinuse_of_inuse_chunk(g_dlmalloc, p, nb);
mem = chunk2mem(p);
check_top_chunk(gm, gm->top);
check_malloced_chunk(gm, mem, nb);
check_top_chunk(g_dlmalloc, g_dlmalloc->top);
check_malloced_chunk(g_dlmalloc, mem, nb);
goto postaction;
}
mem = sys_alloc(gm, nb);
mem = dlmalloc_sys_alloc(g_dlmalloc, nb);
postaction:
POSTACTION(gm);
POSTACTION(g_dlmalloc);
return ADDRESS_BIRTH_ACTION(mem);
}
return 0;
}
/* ──────────────────────────── free ─────────────────────────── */
void dlfree(void *mem) {
/*
Consolidate freed chunks with preceeding or succeeding bordering
@ -775,7 +740,7 @@ void dlfree(void *mem) {
return;
}
#else /* FOOTERS */
#define fm gm
#define fm g_dlmalloc
#endif /* FOOTERS */
if (!PREACTION(fm)) {
@ -834,8 +799,9 @@ void dlfree(void *mem) {
goto postaction;
}
}
} else
} else {
set_free_with_pinuse(p, psize, next);
}
if (is_small(psize)) {
insert_small_chunk(fm, p, psize);
@ -860,151 +826,12 @@ void dlfree(void *mem) {
#endif /* FOOTERS */
}
/**
* Multiplies sizes w/ saturation and overflow detection.
*
* @param count may be 0 to for realloc() free() behavior
* @param opt_out set to count*itemsize or SIZE_MAX on overflow
* @return true on success or false on overflow
*/
static bool sizemultiply(size_t *opt_out, size_t count, size_t itemsize) {
size_t result;
bool overflowed;
overflowed = __builtin_mul_overflow(count, itemsize, &result);
if (overflowed) result = SIZE_MAX;
if (opt_out) *opt_out = result;
return !overflowed;
}
void *dlcalloc(size_t n_elements, size_t elem_size) {
void *mem;
size_t req;
sizemultiply(&req, n_elements, elem_size); /* punts error */
mem = dlmalloc(req);
if (mem != 0 && calloc_must_clear(mem2chunk(mem))) memset(mem, 0, req);
return mem;
}
/* ──────────── Internal support for realloc, memalign, etc ────────────── */
/* Try to realloc; only in-place unless can_move true */
hidden mchunkptr dlmalloc_try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
int can_move) {
mchunkptr newp = 0;
size_t oldsize = chunksize(p);
mchunkptr next = chunk_plus_offset(p, oldsize);
if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) &&
ok_pinuse(next))) {
if (is_mmapped(p)) {
newp = mmap_resize(m, p, nb, can_move);
} else if (oldsize >= nb) { /* already big enough */
size_t rsize = oldsize - nb;
if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
mchunkptr r = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
set_inuse(m, r, rsize);
dlmalloc_dispose_chunk(m, r, rsize);
}
newp = p;
} else if (next == m->top) { /* extend into top */
if (oldsize + m->topsize > nb) {
size_t newsize = oldsize + m->topsize;
size_t newtopsize = newsize - nb;
mchunkptr newtop = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
newtop->head = newtopsize | PINUSE_BIT;
m->top = newtop;
m->topsize = newtopsize;
newp = p;
}
} else if (next == m->dv) { /* extend into dv */
size_t dvs = m->dvsize;
if (oldsize + dvs >= nb) {
size_t dsize = oldsize + dvs - nb;
if (dsize >= MIN_CHUNK_SIZE) {
mchunkptr r = chunk_plus_offset(p, nb);
mchunkptr n = chunk_plus_offset(r, dsize);
set_inuse(m, p, nb);
set_size_and_pinuse_of_free_chunk(r, dsize);
clear_pinuse(n);
m->dvsize = dsize;
m->dv = r;
} else { /* exhaust dv */
size_t newsize = oldsize + dvs;
set_inuse(m, p, newsize);
m->dvsize = 0;
m->dv = 0;
}
newp = p;
}
} else if (!cinuse(next)) { /* extend into next free chunk */
size_t nextsize = chunksize(next);
if (oldsize + nextsize >= nb) {
size_t rsize = oldsize + nextsize - nb;
unlink_chunk(m, next, nextsize);
if (rsize < MIN_CHUNK_SIZE) {
size_t newsize = oldsize + nextsize;
set_inuse(m, p, newsize);
} else {
mchunkptr r = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
set_inuse(m, r, rsize);
dlmalloc_dispose_chunk(m, r, rsize);
}
newp = p;
}
}
} else {
USAGE_ERROR_ACTION(m, chunk2mem(p));
}
return newp;
}
void *dlrealloc(void *oldmem, size_t bytes) {
void *mem = 0;
if (oldmem == 0) {
mem = dlmalloc(bytes);
} else if (bytes >= MAX_REQUEST) {
enomem();
} else if (bytes == 0) {
dlfree(oldmem);
} else {
size_t nb = request2size(bytes);
mchunkptr oldp = mem2chunk(oldmem);
#if !FOOTERS
mstate m = gm;
#else /* FOOTERS */
mstate m = get_mstate_for(oldp);
if (!ok_magic(m)) {
USAGE_ERROR_ACTION(m, oldmem);
return 0;
}
#endif /* FOOTERS */
if (!PREACTION(m)) {
mchunkptr newp = dlmalloc_try_realloc_chunk(m, oldp, nb, 1);
POSTACTION(m);
if (newp != 0) {
check_inuse_chunk(m, newp);
mem = chunk2mem(newp);
} else {
mem = dlmalloc(bytes);
if (mem != 0) {
size_t oc = chunksize(oldp) - overhead_for(oldp);
memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
dlfree(oldmem);
}
}
}
}
return mem;
}
textstartup void dlmalloc_init(void) {
#ifdef NEED_GLOBAL_LOCK_INIT
if (malloc_global_mutex_status <= 0) init_malloc_global_mutex();
#endif
ACQUIRE_MALLOC_GLOBAL_LOCK();
if (mparams.magic == 0) {
if (g_mparams.magic == 0) {
size_t magic;
size_t psize = PAGESIZE;
size_t gsize = MAX(g_ntsysteminfo.dwAllocationGranularity, 64 * 1024);
@ -1021,15 +848,15 @@ textstartup void dlmalloc_init(void) {
((gsize & (gsize - SIZE_T_ONE)) != 0) ||
((psize & (psize - SIZE_T_ONE)) != 0))
MALLOC_ABORT;
mparams.granularity = gsize;
mparams.page_size = psize;
mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
mparams.default_mflags =
g_mparams.granularity = gsize;
g_mparams.page_size = psize;
g_mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
g_mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
g_mparams.default_mflags =
USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
/* Set up lock for main malloc area */
gm->mflags = mparams.default_mflags;
(void)INITIAL_LOCK(&gm->mutex);
g_dlmalloc->mflags = g_mparams.default_mflags;
(void)INITIAL_LOCK(&g_dlmalloc->mutex);
#if LOCK_AT_FORK
pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
#endif
@ -1037,7 +864,7 @@ textstartup void dlmalloc_init(void) {
magic |= (size_t)8U; /* ensure nonzero */
magic &= ~(size_t)7U; /* improve chances of fault for bad values */
/* Until memory modes commonly available, use volatile-write */
(*(volatile size_t *)(&(mparams.magic))) = magic;
(*(volatile size_t *)(&(g_mparams.magic))) = magic;
}
RELEASE_MALLOC_GLOBAL_LOCK();
}

View file

@ -553,10 +553,10 @@ struct malloc_segment {
typedef struct malloc_segment msegment;
typedef struct malloc_segment *msegmentptr;
/* ──────────────────────────── malloc_state ───────────────────────────── */
/* ──────────────────────────── MallocState ───────────────────────────── */
/*
A malloc_state holds all of the bookkeeping for a space.
A MallocState holds all of the bookkeeping for a space.
The main fields are:
Top
@ -640,7 +640,7 @@ typedef struct malloc_segment *msegmentptr;
extensions to this malloc.
*/
struct malloc_state {
struct MallocState {
binmap_t smallmap;
binmap_t treemap;
size_t dvsize;
@ -657,22 +657,21 @@ struct malloc_state {
size_t max_footprint;
size_t footprint_limit; /* zero means no limit */
flag_t mflags;
msegment seg;
void *extp; /* Unused but available for extensions */
size_t exts;
};
#define gm (&_gm_)
extern struct malloc_state _gm_;
typedef struct malloc_state *mstate;
struct MallocStats {
size_t maxfp;
size_t fp;
size_t used;
};
typedef struct MallocState *mstate;
extern struct MallocState g_dlmalloc[1];
/* ─────────────────────────────── Hooks ──────────────────────────────── */
#ifdef MTRACE /* TODO(jart): Add --mtrace flag for this */
@ -718,16 +717,16 @@ void *AddressDeathAction(void *);
(align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + \
MIN_CHUNK_SIZE)
/* ───────────── Global malloc_state and malloc_params ─────────────────── */
/* ───────────── Global MallocState and MallocParams ─────────────────── */
/*
malloc_params holds global properties, including those that can be
MallocParams holds global properties, including those that can be
dynamically set using mallopt. There is a single instance, mparams,
initialized in init_mparams. Note that the non-zeroness of "magic"
also serves as an initialization flag.
*/
struct malloc_params {
struct MallocParams {
size_t magic;
size_t page_size;
size_t granularity;
@ -736,18 +735,18 @@ struct malloc_params {
flag_t default_mflags;
};
extern struct malloc_params mparams;
extern struct MallocParams g_mparams;
#define ensure_initialization() \
/* we use a constructor [jart] */ \
assert(mparams.magic != 0)
/* (void)(mparams.magic != 0 || init_mparams()) */
assert(g_mparams.magic != 0)
/* (void)(g_mparams.magic != 0 || init_mparams()) */
#define is_initialized(M) ((M)->top != 0)
#define is_page_aligned(S) \
(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
(((size_t)(S) & (g_mparams.page_size - SIZE_T_ONE)) == 0)
#define is_granularity_aligned(S) \
(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
(((size_t)(S) & (g_mparams.granularity - SIZE_T_ONE)) == 0)
/* ────────────────────────── system alloc setup ───────────────────────── */
@ -777,13 +776,16 @@ extern struct malloc_params mparams;
(L) ? ((M)->mflags | USE_LOCK_BIT) : ((M)->mflags & ~USE_LOCK_BIT))
/* page-align a size */
#define page_align(S) \
(((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
#define page_align(S) \
(((S) + (g_mparams.page_size - SIZE_T_ONE)) & \
~(g_mparams.page_size - SIZE_T_ONE))
/* granularity-align a size */
#define granularity_align(S) \
(((S) + (mparams.granularity - SIZE_T_ONE)) & \
~(mparams.granularity - SIZE_T_ONE))
#define granularity_align(S) \
(((S) + (g_mparams.granularity - SIZE_T_ONE)) & \
~(g_mparams.granularity - SIZE_T_ONE))
#define mmap_align(s) granularity_align((size_t)(s))
/* ──────────────────────── Operations on bin maps ─────────────────────── */
@ -849,10 +851,10 @@ extern struct malloc_params mparams;
/*
For security, the main invariant is that malloc/free/etc never
writes to a static address other than malloc_state, unless static
malloc_state itself has been corrupted, which cannot occur via
writes to a static address other than MallocState, unless static
MallocState itself has been corrupted, which cannot occur via
malloc (because of these checks). In essence this means that we
believe all pointers, sizes, maps etc held in malloc_state, but
believe all pointers, sizes, maps etc held in MallocState, but
check all of those linked or offsetted from other embedded data
structures. These checks are interspersed with main code in a way
that tends to minimize their run-time cost.
@ -893,7 +895,7 @@ extern struct malloc_params mparams;
#if (FOOTERS && !IsTrustworthy())
/* Check if (alleged) mstate m has expected magic field */
#define ok_magic(M) \
((uintptr_t)(M) <= 0x00007ffffffffffful && (M)->magic == mparams.magic)
((uintptr_t)(M) <= 0x00007ffffffffffful && (M)->magic == g_mparams.magic)
#else /* (FOOTERS && !IsTrustworthy()) */
#define ok_magic(M) (1)
#endif /* (FOOTERS && !IsTrustworthy()) */
@ -934,12 +936,13 @@ extern struct malloc_params mparams;
#else /* FOOTERS */
/* Set foot of inuse chunk to be xor of mstate and seed */
#define mark_inuse_foot(M, p, s) \
(((mchunkptr)((char *)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
#define mark_inuse_foot(M, p, s) \
(((mchunkptr)((char *)(p) + (s)))->prev_foot = \
((size_t)(M) ^ g_mparams.magic))
#define get_mstate_for(p) \
((mstate)(((mchunkptr)((char *)(p) + (chunksize(p))))->prev_foot ^ \
mparams.magic))
g_mparams.magic))
#define set_inuse(M, p, s) \
((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \

View file

@ -0,0 +1,105 @@
#include "third_party/dlmalloc/dlmalloc.h"
/* Realloc using mmap */
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
size_t oldsize = chunksize(oldp);
if (is_small(nb)) return 0; /* Can't shrink mmap regions below small size */
/* Keep old chunk if big enough but not too big */
if (oldsize >= nb + SIZE_T_SIZE &&
(oldsize - nb) <= (g_mparams.granularity << 1)) {
return oldp;
} else {
size_t offset = oldp->prev_foot;
size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
char *cp = mremap((char *)oldp - offset, oldmmsize, newmmsize, flags, 0);
if (cp != CMFAIL) {
mchunkptr newp = (mchunkptr)(cp + offset);
size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
newp->head = psize;
mark_inuse_foot(m, newp, psize);
chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
if (cp < m->least_addr) m->least_addr = cp;
if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) {
m->max_footprint = m->footprint;
}
check_mmapped_chunk(m, newp);
return newp;
}
}
return 0;
}
/* Try to realloc; only in-place unless can_move true */
mchunkptr dlmalloc_try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
int can_move) {
mchunkptr newp = 0;
size_t oldsize = chunksize(p);
mchunkptr next = chunk_plus_offset(p, oldsize);
if (RTCHECK(ok_address(m, p) && ok_inuse(p) && ok_next(p, next) &&
ok_pinuse(next))) {
if (is_mmapped(p)) {
newp = mmap_resize(m, p, nb, can_move);
} else if (oldsize >= nb) { /* already big enough */
size_t rsize = oldsize - nb;
if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
mchunkptr r = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
set_inuse(m, r, rsize);
dlmalloc_dispose_chunk(m, r, rsize);
}
newp = p;
} else if (next == m->top) { /* extend into top */
if (oldsize + m->topsize > nb) {
size_t newsize = oldsize + m->topsize;
size_t newtopsize = newsize - nb;
mchunkptr newtop = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
newtop->head = newtopsize | PINUSE_BIT;
m->top = newtop;
m->topsize = newtopsize;
newp = p;
}
} else if (next == m->dv) { /* extend into dv */
size_t dvs = m->dvsize;
if (oldsize + dvs >= nb) {
size_t dsize = oldsize + dvs - nb;
if (dsize >= MIN_CHUNK_SIZE) {
mchunkptr r = chunk_plus_offset(p, nb);
mchunkptr n = chunk_plus_offset(r, dsize);
set_inuse(m, p, nb);
set_size_and_pinuse_of_free_chunk(r, dsize);
clear_pinuse(n);
m->dvsize = dsize;
m->dv = r;
} else { /* exhaust dv */
size_t newsize = oldsize + dvs;
set_inuse(m, p, newsize);
m->dvsize = 0;
m->dv = 0;
}
newp = p;
}
} else if (!cinuse(next)) { /* extend into next free chunk */
size_t nextsize = chunksize(next);
if (oldsize + nextsize >= nb) {
size_t rsize = oldsize + nextsize - nb;
unlink_chunk(m, next, nextsize);
if (rsize < MIN_CHUNK_SIZE) {
size_t newsize = oldsize + nextsize;
set_inuse(m, p, newsize);
} else {
mchunkptr r = chunk_plus_offset(p, nb);
set_inuse(m, p, nb);
set_inuse(m, r, rsize);
dlmalloc_dispose_chunk(m, r, rsize);
}
newp = p;
}
}
} else {
USAGE_ERROR_ACTION(m, chunk2mem(p));
}
return newp;
}

View file

@ -3,5 +3,5 @@
void *dlmemalign(size_t alignment, size_t bytes) {
if (alignment <= MALLOC_ALIGNMENT) return dlmalloc(bytes);
return dlmemalign$impl(gm, alignment, bytes);
return dlmemalign$impl(g_dlmalloc, alignment, bytes);
}

View file

@ -16,7 +16,7 @@ int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
return einval();
} else if (bytes <= MAX_REQUEST - alignment) {
if (alignment < MIN_CHUNK_SIZE) alignment = MIN_CHUNK_SIZE;
mem = dlmemalign$impl(gm, alignment, bytes);
mem = dlmemalign$impl(g_dlmalloc, alignment, bytes);
}
}
if (mem == 0) {

View file

@ -4,7 +4,7 @@
void *dlpvalloc(size_t bytes) {
size_t pagesz;
ensure_initialization();
pagesz = mparams.page_size;
pagesz = g_mparams.page_size;
return dlmemalign(pagesz,
(bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
}

42
third_party/dlmalloc/dlrealloc.c vendored Normal file
View file

@ -0,0 +1,42 @@
#include "libc/str/str.h"
#include "libc/sysv/errfuns.h"
#include "third_party/dlmalloc/dlmalloc.h"
void *dlrealloc(void *oldmem, size_t bytes) {
void *mem = 0;
if (oldmem == 0) {
mem = dlmalloc(bytes);
} else if (bytes >= MAX_REQUEST) {
enomem();
} else if (bytes == 0) {
dlfree(oldmem);
} else {
size_t nb = request2size(bytes);
mchunkptr oldp = mem2chunk(oldmem);
#if !FOOTERS
mstate m = g_dlmalloc;
#else /* FOOTERS */
mstate m = get_mstate_for(oldp);
if (!ok_magic(m)) {
USAGE_ERROR_ACTION(m, oldmem);
return 0;
}
#endif /* FOOTERS */
if (!PREACTION(m)) {
mchunkptr newp = dlmalloc_try_realloc_chunk(m, oldp, nb, 1);
POSTACTION(m);
if (newp != 0) {
check_inuse_chunk(m, newp);
mem = chunk2mem(newp);
} else {
mem = dlmalloc(bytes);
if (mem != 0) {
size_t oc = chunksize(oldp) - overhead_for(oldp);
memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
dlfree(oldmem);
}
}
}
}
return mem;
}

View file

@ -11,7 +11,7 @@ void *dlrealloc_in_place(void *oldmem, size_t bytes) {
size_t nb = request2size(bytes);
mchunkptr oldp = mem2chunk(oldmem);
#if !FOOTERS
mstate m = gm;
mstate m = g_dlmalloc;
#else /* FOOTERS */
mstate m = get_mstate_for(oldp);
if (!ok_magic(m)) {

View file

@ -4,6 +4,6 @@
void *dlvalloc(size_t bytes) {
size_t pagesz;
ensure_initialization();
pagesz = mparams.page_size;
pagesz = g_mparams.page_size;
return dlmemalign(pagesz, bytes);
}

View file

@ -1,5 +1,5 @@
#include "third_party/dlmalloc/dlmalloc.h"
#include "libc/mem/mem.h"
#include "third_party/dlmalloc/dlmalloc.h"
/**
* Returns (by copy) a struct containing various summary statistics:
@ -25,16 +25,16 @@
struct mallinfo mallinfo(void) {
struct mallinfo nm = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
ensure_initialization();
if (!PREACTION(gm)) {
check_malloc_state(gm);
if (is_initialized(gm)) {
if (!PREACTION(g_dlmalloc)) {
check_malloc_state(g_dlmalloc);
if (is_initialized(g_dlmalloc)) {
size_t nfree = SIZE_T_ONE; /* top always free */
size_t mfree = gm->topsize + TOP_FOOT_SIZE;
size_t mfree = g_dlmalloc->topsize + TOP_FOOT_SIZE;
size_t sum = mfree;
msegmentptr s = &gm->seg;
msegmentptr s = &g_dlmalloc->seg;
while (s != 0) {
mchunkptr q = align_as_chunk(s->base);
while (segment_holds(s, q) && q != gm->top &&
while (segment_holds(s, q) && q != g_dlmalloc->top &&
q->head != FENCEPOST_HEAD) {
size_t sz = chunksize(q);
sum += sz;
@ -48,13 +48,13 @@ struct mallinfo mallinfo(void) {
}
nm.arena = sum;
nm.ordblks = nfree;
nm.hblkhd = gm->footprint - sum;
nm.usmblks = gm->max_footprint;
nm.uordblks = gm->footprint - mfree;
nm.hblkhd = g_dlmalloc->footprint - sum;
nm.usmblks = g_dlmalloc->max_footprint;
nm.uordblks = g_dlmalloc->footprint - mfree;
nm.fordblks = mfree;
nm.keepcost = gm->topsize;
nm.keepcost = g_dlmalloc->topsize;
}
POSTACTION(gm);
POSTACTION(g_dlmalloc);
}
return nm;
}

View file

@ -1,5 +1,5 @@
#include "third_party/dlmalloc/dlmalloc.h"
#include "libc/mem/mem.h"
#include "third_party/dlmalloc/dlmalloc.h"
/**
* Returns the number of bytes obtained from the system. The total
@ -9,4 +9,6 @@
* Even if locks are otherwise defined, this function does not use them,
* so results might not be up to date.
*/
size_t malloc_footprint(void) { return gm->footprint; }
size_t malloc_footprint(void) {
return g_dlmalloc->footprint;
}

View file

@ -10,6 +10,6 @@
* this number of bytes can actually be obtained from the system.
*/
size_t malloc_footprint_limit(void) {
size_t maf = gm->footprint_limit;
size_t maf = g_dlmalloc->footprint_limit;
return maf == 0 ? SIZE_MAX : maf;
}

View file

@ -64,8 +64,8 @@ void malloc_inspect_all(void (*handler)(void* start, void* end,
size_t used_bytes, void* callback_arg),
void* arg) {
ensure_initialization();
if (!PREACTION(gm)) {
internal_inspect_all(gm, handler, arg);
POSTACTION(gm);
if (!PREACTION(g_dlmalloc)) {
internal_inspect_all(g_dlmalloc, handler, arg);
POSTACTION(g_dlmalloc);
}
}

View file

@ -1,5 +1,5 @@
#include "third_party/dlmalloc/dlmalloc.h"
#include "libc/mem/mem.h"
#include "third_party/dlmalloc/dlmalloc.h"
/**
* Returns the maximum number of bytes obtained from the system. This
@ -11,4 +11,6 @@
* defined, this function does not use them, so results might not be up
* to date.
*/
size_t malloc_max_footprint(void) { return gm->max_footprint; }
size_t malloc_max_footprint(void) {
return g_dlmalloc->max_footprint;
}

View file

@ -21,5 +21,5 @@ size_t malloc_set_footprint_limit(size_t bytes) {
} else {
result = granularity_align(bytes);
}
return gm->footprint_limit = result;
return g_dlmalloc->footprint_limit = result;
}

View file

@ -23,9 +23,9 @@
int malloc_trim(size_t pad) {
int result = 0;
ensure_initialization();
if (!PREACTION(gm)) {
result = dlmalloc_sys_trim(gm, pad);
POSTACTION(gm);
if (!PREACTION(g_dlmalloc)) {
result = dlmalloc_sys_trim(g_dlmalloc, pad);
POSTACTION(g_dlmalloc);
}
return result;
}

View file

@ -24,17 +24,17 @@ bool32 mallopt(int param_number, int value) {
val = (value == -1) ? SIZE_MAX : (size_t)value;
switch (param_number) {
case M_TRIM_THRESHOLD:
mparams.trim_threshold = val;
g_mparams.trim_threshold = val;
return true;
case M_GRANULARITY:
if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
mparams.granularity = val;
if (val >= g_mparams.page_size && ((val & (val - 1)) == 0)) {
g_mparams.granularity = val;
return true;
} else {
return false;
}
case M_MMAP_THRESHOLD:
mparams.mmap_threshold = val;
g_mparams.mmap_threshold = val;
return true;
default:
return false;