Make malloc() go 200x faster

If pthread_create() is linked into the binary, then the cosmo runtime
will create an independent dlmalloc arena for each core. Whenever the
malloc() function is used it will index `g_heaps[sched_getcpu() / 2]`
to find the arena with the greatest hyperthread / numa locality. This
may be configured via an environment variable. For example if you say
`export COSMOPOLITAN_HEAP_COUNT=1` then you can restore the old ways.
Your process may be configured to have anywhere between 1 - 128 heaps

We need this revision because it makes multithreaded C++ applications
faster. For example, an HTTP server I'm working on that makes extreme
use of the STL went from 16k to 2000k requests per second, after this
change was made. To understand why, try out the malloc_test benchmark
which calls malloc() + realloc() in a loop across many threads, which
sees a a 250x improvement in process clock time and 200x on wall time

The tradeoff is this adds ~25ns of latency to individual malloc calls
compared to MODE=tiny, once the cosmo runtime has transitioned into a
fully multi-threaded state. If you don't need malloc() to be scalable
then cosmo provides many options for you. For starters the heap count
variable above can be set to put the process back in single heap mode
plus you can go even faster still, if you include tinymalloc.inc like
many of the programs in tool/build/.. are already doing since that'll
shave tens of kb off your binary footprint too. Theres also MODE=tiny
which is configured to use just 1 plain old dlmalloc arena by default

Another tradeoff is we need more memory now (except in MODE=tiny), to
track the provenance of memory allocation. This is so allocations can
be freely shared across threads, and because OSes can reschedule code
to different CPUs at any time.
This commit is contained in:
Justine Tunney 2024-06-05 01:31:21 -07:00
parent 9906f299bb
commit 3609f65de3
No known key found for this signature in database
GPG key ID: BE714B4575D6E328
60 changed files with 858 additions and 1064 deletions

View file

@ -26,25 +26,29 @@
#include "libc/thread/tls.h"
#include "third_party/nsync/mu.h"
#if !IsTiny()
#define FOOTERS 1
#define MSPACES 1
#define ONLY_MSPACES 1 // enables scalable multi-threaded malloc
#else
#define INSECURE 1
#define PROCEED_ON_ERROR 1
#define FOOTERS 0
#define MSPACES 0
#define ONLY_MSPACES 0
#endif
#define HAVE_MMAP 1
#define HAVE_MREMAP 0
#define HAVE_MORECORE 0
#define USE_LOCKS 2
#define USE_SPIN_LOCKS 0
#define USE_SPIN_LOCKS 1
#define MORECORE_CONTIGUOUS 0
#define MALLOC_INSPECT_ALL 1
#define ABORT_ON_ASSERT_FAILURE 0
#define LOCK_AT_FORK 1
#define NO_MALLOC_STATS 1
#if IsTiny()
#define INSECURE 1
#define PROCEED_ON_ERROR 1
#endif
#if IsModeDbg()
#define DEBUG 1
#endif
@ -56,24 +60,29 @@
#define assert(x) if(!(x)) ABORT
#endif
#include "third_party/dlmalloc/platform.inc"
#include "third_party/dlmalloc/locks.inc"
#include "third_party/dlmalloc/chunks.inc"
#include "third_party/dlmalloc/headfoot.inc"
#include "third_party/dlmalloc/global.inc"
#include "third_party/dlmalloc/system.inc"
#include "third_party/dlmalloc/hooks.inc"
#include "third_party/dlmalloc/debugging.inc"
#include "third_party/dlmalloc/indexing.inc"
#include "third_party/dlmalloc/binmaps.inc"
#include "third_party/dlmalloc/runtimechecks.inc"
#include "third_party/dlmalloc/init.inc"
#include "third_party/dlmalloc/debuglib.inc"
#include "third_party/dlmalloc/statistics.inc"
#include "third_party/dlmalloc/smallbins.inc"
#include "third_party/dlmalloc/directmap.inc"
#include "third_party/dlmalloc/trees.inc"
#include "third_party/dlmalloc/management.inc"
#include "platform.inc"
#include "locks.inc"
#include "chunks.inc"
#include "headfoot.inc"
#if ONLY_MSPACES
#include "threaded.inc"
#endif
#include "global.inc"
#include "system.inc"
#include "hooks.inc"
#include "debugging.inc"
#include "indexing.inc"
#include "binmaps.inc"
#include "runtimechecks.inc"
#include "init.inc"
#include "debuglib.inc"
#include "statistics.inc"
#include "smallbins.inc"
#include "directmap.inc"
#include "trees.inc"
#include "management.inc"
/* -------------------------- System allocation -------------------------- */
@ -585,29 +594,7 @@ static void* tmalloc_small(mstate m, size_t nb) {
#if !ONLY_MSPACES
#define FREEBIE_COUNT 32
#define FREEBIE_MAXSIZE 2048
void* dlmalloc(size_t bytes) {
#if FREEBIE_COUNT && !defined(MODE_DBG)
/* Allocate from thread-local freelist. */
if (__threaded && bytes && bytes <= FREEBIE_MAXSIZE) {
unsigned need = bytes;
unsigned best_index = FREEBIE_COUNT;
unsigned best_delta = FREEBIE_MAXSIZE + 1;
struct CosmoTib *tib = __get_tls();
for (int i = 0; i < FREEBIE_COUNT; ++i) {
unsigned d = tib->tib_freelen[i] - need;
best_index = d < best_delta ? i : best_index;
best_delta = d < best_delta ? d : best_delta;
}
if (best_index < FREEBIE_COUNT) {
tib->tib_freelen[best_index] = 0;
return tib->tib_freemem[best_index];
}
}
#endif
void* dlmalloc_single(size_t bytes) {
/*
Basic algorithm:
@ -769,26 +756,6 @@ void dlfree(void* mem) {
#define fm gm
#endif /* FOOTERS */
#if FREEBIE_COUNT && !defined(MODE_DBG)
/* Free small allocations locally. */
if (__threaded) {
struct CosmoTib *tib = __get_tls();
for (int i = 0; i < FREEBIE_COUNT; ++i) {
if (!tib->tib_freelen[i]) {
if (is_inuse(p)) {
size_t len = chunksize(p) - overhead_for(p);
if (len && len < FREEBIE_MAXSIZE) {
tib->tib_freelen[i] = len;
tib->tib_freemem[i] = mem;
return;
}
}
break;
}
}
}
#endif
/* Otherwise free memory globally. */
if (!PREACTION(fm)) {
check_inuse_chunk(fm, p);
@ -881,7 +848,7 @@ void dlfree(void* mem) {
#endif /* FOOTERS */
}
void* dlcalloc(size_t n_elements, size_t elem_size) {
void* dlcalloc_single(size_t n_elements, size_t elem_size) {
void* mem;
size_t req = 0;
if (ckd_mul(&req, n_elements, elem_size)) req = -1;
@ -977,10 +944,10 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
void* mem = 0;
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
alignment = MIN_CHUNK_SIZE;
/* alignment is 32+ bytes rounded up to nearest two power */
alignment = 2ul << bsrl(MAX(MIN_CHUNK_SIZE, alignment) - 1);
alignment = 2ul << bsrl(alignment - 1);
if (bytes >= MAX_REQUEST - alignment) {
if (m != 0) { /* Test isn't needed but avoids compiler warning */
MALLOC_FAILURE_ACTION;
@ -1267,7 +1234,7 @@ static void internal_inspect_all(mstate m,
#if !ONLY_MSPACES
void* dlrealloc(void* oldmem, size_t bytes) {
void* dlrealloc_single(void* oldmem, size_t bytes) {
void* mem = 0;
if (oldmem == 0) {
mem = dlmalloc(bytes);
@ -1343,7 +1310,7 @@ void* dlrealloc_in_place(void* oldmem, size_t bytes) {
return mem;
}
void* dlmemalign(size_t alignment, size_t bytes) {
void* dlmemalign_single(size_t alignment, size_t bytes) {
if (alignment <= MALLOC_ALIGNMENT) {
return dlmalloc(bytes);
}
@ -1421,7 +1388,7 @@ size_t dlmalloc_set_footprint_limit(size_t bytes) {
}
#if !NO_MALLINFO
struct mallinfo dlmallinfo(void) {
struct mallinfo dlmallinfo_single(void) {
return internal_mallinfo(gm);
}
#endif /* NO_MALLINFO */
@ -1454,6 +1421,20 @@ size_t dlmalloc_usable_size(void* mem) {
#endif /* !ONLY_MSPACES */
#if ONLY_MSPACES
void *(*dlmalloc)(size_t);
void *(*dlcalloc)(size_t, size_t);
void *(*dlrealloc)(void *, size_t);
void *(*dlmemalign)(size_t, size_t);
struct mallinfo (*dlmallinfo)(void);
#else
void *(*dlmalloc)(size_t) = dlmalloc_single;
void *(*dlcalloc)(size_t, size_t) = dlcalloc_single;
void *(*dlrealloc)(void *, size_t) = dlrealloc_single;
void *(*dlmemalign)(size_t, size_t) = dlmemalign_single;
struct mallinfo (*dlmallinfo)(void) = dlmallinfo_single;
#endif
/* ----------------------------- user mspaces ---------------------------- */
#if MSPACES

View file

@ -25,6 +25,28 @@
#define dlrealloc_in_place __dlrealloc_in_place
#define dlrealloc_in_place __dlrealloc_in_place
#define create_mspace_with_base __create_mspace_with_base
#define mspace_bulk_free __mspace_bulk_free
#define mspace_calloc __mspace_calloc
#define mspace_footprint __mspace_footprint
#define mspace_footprint_limit __mspace_footprint_limit
#define mspace_free __mspace_free
#define mspace_independent_calloc __mspace_independent_calloc
#define mspace_independent_comalloc __mspace_independent_comalloc
#define mspace_inspect_all __mspace_inspect_all
#define mspace_mallinfo __mspace_mallinfo
#define mspace_malloc __mspace_malloc
#define mspace_malloc_stats __mspace_malloc_stats
#define mspace_mallopt __mspace_mallopt
#define mspace_max_footprint __mspace_max_footprint
#define mspace_memalign __mspace_memalign
#define mspace_realloc __mspace_realloc
#define mspace_realloc_in_place __mspace_realloc_in_place
#define mspace_set_footprint_limit __mspace_set_footprint_limit
#define mspace_track_large_chunks __mspace_track_large_chunks
#define mspace_trim __mspace_trim
#define mspace_usable_size __mspace_usable_size
COSMOPOLITAN_C_START_
/*
@ -41,7 +63,7 @@ COSMOPOLITAN_C_START_
maximum supported value of n differs across systems, but is in all
cases less than the maximum representable value of a size_t.
*/
void* dlmalloc(size_t);
extern void* (*dlmalloc)(size_t);
/*
free(void* p)
@ -57,7 +79,7 @@ void dlfree(void*);
Returns a pointer to n_elements * element_size bytes, with all locations
set to zero.
*/
void* dlcalloc(size_t, size_t);
extern void* (*dlcalloc)(size_t, size_t);
/*
realloc(void* p, size_t n)
@ -81,7 +103,7 @@ void* dlcalloc(size_t, size_t);
The old unix realloc convention of allowing the last-free'd chunk
to be used as an argument to realloc is not supported.
*/
void* dlrealloc(void*, size_t);
extern void* (*dlrealloc)(void*, size_t);
/*
realloc_in_place(void* p, size_t n)
@ -110,7 +132,7 @@ void* dlrealloc_in_place(void*, size_t);
Overreliance on memalign is a sure way to fragment space.
*/
void* dlmemalign(size_t, size_t);
extern void* (*dlmemalign)(size_t, size_t);
/*
mallopt(int parameter_number, int parameter_value)
@ -233,7 +255,7 @@ void dlmalloc_inspect_all(void (*handler)(void*, void*, size_t, void*),
thus be inaccurate.
*/
struct mallinfo dlmallinfo(void);
extern struct mallinfo (*dlmallinfo)(void);
/*
independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);

View file

@ -2,13 +2,38 @@
/* ---------------------------- setting mparams -------------------------- */
#if LOCK_AT_FORK
#if ONLY_MSPACES
static void dlmalloc_pre_fork(void) {
mstate h;
for (unsigned i = 0; i < ARRAYLEN(g_heaps); ++i)
if ((h = atomic_load_explicit(&g_heaps[i], memory_order_acquire)))
ACQUIRE_LOCK(&h->mutex);
}
static void dlmalloc_post_fork_parent(void) {
mstate h;
for (unsigned i = 0; i < ARRAYLEN(g_heaps); ++i)
if ((h = atomic_load_explicit(&g_heaps[i], memory_order_acquire)))
RELEASE_LOCK(&h->mutex);
}
static void dlmalloc_post_fork_child(void) {
mstate h;
for (unsigned i = 0; i < ARRAYLEN(g_heaps); ++i)
if ((h = atomic_load_explicit(&g_heaps[i], memory_order_acquire)))
(void)INITIAL_LOCK(&h->mutex);
}
#else
static void dlmalloc_pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
static void dlmalloc_post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
static void dlmalloc_post_fork_child(void) { (void)INITIAL_LOCK(&(gm)->mutex); }
#endif /* ONLY_MSPACES */
#endif /* LOCK_AT_FORK */
/* Initialize mparams */
__attribute__((__constructor__(50))) int init_mparams(void) {
__attribute__((__constructor__(49))) int init_mparams(void) {
#ifdef NEED_GLOBAL_LOCK_INIT
if (malloc_global_mutex_status <= 0)
init_malloc_global_mutex();
@ -95,6 +120,10 @@ __attribute__((__constructor__(50))) int init_mparams(void) {
// RELEASE_MALLOC_GLOBAL_LOCK();
#if ONLY_MSPACES
threaded_dlmalloc();
#endif
__runlevel = RUNLEVEL_MALLOC;
return 1;
}

View file

@ -78,7 +78,7 @@ static int malloc_unlock(MLOCK_T *lk) {
#define ACQUIRE_LOCK(lk) malloc_lock(lk)
#define RELEASE_LOCK(lk) malloc_unlock(lk)
#define INITIAL_LOCK(lk) malloc_wipe(lk)
#define DESTROY_LOCK(lk)
#define DESTROY_LOCK(lk) malloc_wipe(lk)
#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);

View file

@ -5,7 +5,7 @@ static mstate init_user_mstate(char* tbase, size_t tsize) {
mchunkptr mn;
mchunkptr msp = align_as_chunk(tbase);
mstate m = (mstate)(chunk2mem(msp));
bzero(m, msize);
// bzero(m, msize); // [jart] it is not needed
(void)INITIAL_LOCK(&m->mutex);
msp->head = (msize|INUSE_BITS);
m->seg.base = m->least_addr = tbase;
@ -32,7 +32,7 @@ mspace create_mspace(size_t capacity, int locked) {
size_t rs = ((capacity == 0)? mparams.granularity :
(capacity + TOP_FOOT_SIZE + msize));
size_t tsize = granularity_align(rs);
char* tbase = (char*)(dlmalloc_requires_more_vespene_gas(tsize));
char* tbase = (char*)dlmalloc_requires_more_vespene_gas(tsize);
if (tbase != CMFAIL) {
m = init_user_mstate(tbase, tsize);
m->seg.sflags = USE_MMAP_BIT;

200
third_party/dlmalloc/threaded.inc vendored Normal file
View file

@ -0,0 +1,200 @@
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-
vi: set et ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi
╞══════════════════════════════════════════════════════════════════════════════╡
Copyright 2024 Justine Alexandra Roberts Tunney
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
╚─────────────────────────────────────────────────────────────────────────────*/
#include "libc/dce.h"
#include "libc/intrin/magicu.h"
#include "libc/intrin/strace.internal.h"
#include "libc/intrin/weaken.h"
#include "libc/macros.internal.h"
#include "libc/nexgen32e/rdtscp.h"
#include "libc/nexgen32e/x86feature.h"
#include "libc/runtime/runtime.h"
#include "libc/thread/thread.h"
#include "third_party/dlmalloc/dlmalloc.h"
#if !FOOTERS || !MSPACES
#error "threaded dlmalloc needs footers and mspaces"
#endif
static struct magicu magiu;
static unsigned g_heapslen;
static mstate g_heaps[128];
void dlfree(void *p) {
return mspace_free(0, p);
}
size_t dlmalloc_usable_size(void* mem) {
return mspace_usable_size(mem);
}
void* dlrealloc_in_place(void *p, size_t n) {
return mspace_realloc_in_place(0, p, n);
}
int dlmallopt(int param_number, int value) {
return mspace_mallopt(param_number, value);
}
int dlmalloc_trim(size_t pad) {
int got_some = 0;
for (unsigned i = 0; i < g_heapslen; ++i)
if (g_heaps[i])
got_some |= mspace_trim(g_heaps[i], pad);
return got_some;
}
size_t dlbulk_free(void *array[], size_t nelem) {
for (size_t i = 0; i < nelem; ++i)
mspace_free(0, array[i]);
return 0;
}
void dlmalloc_inspect_all(void handler(void *start, void *end,
size_t used_bytes, void *callback_arg),
void *arg) {
for (unsigned i = 0; i < g_heapslen; ++i)
if (g_heaps[i])
mspace_inspect_all(g_heaps[i], handler, arg);
}
forceinline mstate get_arena(void) {
unsigned cpu;
#ifdef __x86_64__
unsigned tsc_aux;
rdtscp(&tsc_aux);
cpu = TSC_AUX_CORE(tsc_aux);
#else
long tpidr_el0;
asm("mrs\t%0,tpidr_el0" : "=r"(tpidr_el0));
cpu = tpidr_el0 & 255;
#endif
return g_heaps[__magicu_div(cpu, magiu) % g_heapslen];
}
static void *dlmalloc_single(size_t n) {
return mspace_malloc(g_heaps[0], n);
}
static void *dlmalloc_threaded(size_t n) {
return mspace_malloc(get_arena(), n);
}
static void *dlcalloc_single(size_t n, size_t z) {
return mspace_calloc(g_heaps[0], n, z);
}
static void *dlcalloc_threaded(size_t n, size_t z) {
return mspace_calloc(get_arena(), n, z);
}
static void *dlrealloc_single(void *p, size_t n) {
return mspace_realloc(g_heaps[0], p, n);
}
static void *dlrealloc_threaded(void *p, size_t n) {
if (p)
return mspace_realloc(0, p, n);
else
return mspace_malloc(get_arena(), n);
}
static void *dlmemalign_single(size_t a, size_t n) {
return mspace_memalign(g_heaps[0], a, n);
}
static void *dlmemalign_threaded(size_t a, size_t n) {
return mspace_memalign(get_arena(), a, n);
}
static struct mallinfo dlmallinfo_single(void) {
return mspace_mallinfo(g_heaps[0]);
}
static struct mallinfo dlmallinfo_threaded(void) {
return mspace_mallinfo(get_arena());
}
static int dlmalloc_atoi(const char *s) {
int c, x = 0;
while ((c = *s++)) {
x *= 10;
x += c - '0';
}
return x;
}
static void use_single_heap(bool uses_locks) {
g_heapslen = 1;
dlmalloc = dlmalloc_single;
dlcalloc = dlcalloc_single;
dlrealloc = dlrealloc_single;
dlmemalign = dlmemalign_single;
dlmallinfo = dlmallinfo_single;
if (!(g_heaps[0] = create_mspace(0, uses_locks)))
__builtin_trap();
}
static void threaded_dlmalloc(void) {
int heaps, cpus;
const char *var;
if (!_weaken(pthread_create))
return use_single_heap(false);
if (!IsAarch64() && !X86_HAVE(RDTSCP))
return use_single_heap(true);
// determine how many independent heaps we should install
// by default we do an approximation of one heap per core
// this code makes the c++ stl go 164x faster on my ryzen
cpus = __get_cpu_count();
if (cpus == -1)
heaps = 1;
else if (!IsAarch64() && !X86_HAVE(RDTSCP))
heaps = 1;
else if ((var = getenv("COSMOPOLITAN_HEAP_COUNT")))
heaps = dlmalloc_atoi(var);
else
heaps = cpus >> 1;
if (heaps <= 1)
return use_single_heap(true);
if (heaps > ARRAYLEN(g_heaps))
heaps = ARRAYLEN(g_heaps);
// find 𝑑 such that sched_getcpu() / 𝑑 is within [0,heaps)
// turn 𝑑 into a fast magic that can divide by multiplying
magiu = __magicu_get(cpus / heaps);
// we need this too due to linux's cpu count affinity hack
g_heapslen = heaps;
// create the arenas
for (size_t i = 0; i < g_heapslen; ++i)
if (!(g_heaps[i] = create_mspace(0, true)))
__builtin_trap();
// install function pointers
dlmalloc = dlmalloc_threaded;
dlcalloc = dlcalloc_threaded;
dlrealloc = dlrealloc_threaded;
dlmemalign = dlmemalign_threaded;
dlmallinfo = dlmallinfo_threaded;
STRACE("created %d dlmalloc arenas for %d cpus", heaps, cpus);
}