cosmopolitan/third_party/dlmalloc/init.inc
Justine Tunney 3609f65de3
Make malloc() go 200x faster
If pthread_create() is linked into the binary, then the cosmo runtime
will create an independent dlmalloc arena for each core. Whenever the
malloc() function is used it will index `g_heaps[sched_getcpu() / 2]`
to find the arena with the greatest hyperthread / numa locality. This
may be configured via an environment variable. For example if you say
`export COSMOPOLITAN_HEAP_COUNT=1` then you can restore the old ways.
Your process may be configured to have anywhere between 1 - 128 heaps

We need this revision because it makes multithreaded C++ applications
faster. For example, an HTTP server I'm working on that makes extreme
use of the STL went from 16k to 2000k requests per second, after this
change was made. To understand why, try out the malloc_test benchmark
which calls malloc() + realloc() in a loop across many threads, which
sees a a 250x improvement in process clock time and 200x on wall time

The tradeoff is this adds ~25ns of latency to individual malloc calls
compared to MODE=tiny, once the cosmo runtime has transitioned into a
fully multi-threaded state. If you don't need malloc() to be scalable
then cosmo provides many options for you. For starters the heap count
variable above can be set to put the process back in single heap mode
plus you can go even faster still, if you include tinymalloc.inc like
many of the programs in tool/build/.. are already doing since that'll
shave tens of kb off your binary footprint too. Theres also MODE=tiny
which is configured to use just 1 plain old dlmalloc arena by default

Another tradeoff is we need more memory now (except in MODE=tiny), to
track the provenance of memory allocation. This is so allocations can
be freely shared across threads, and because OSes can reschedule code
to different CPUs at any time.
2024-06-05 02:02:14 -07:00

153 lines
4.5 KiB
C++

/* ---------------------------- setting mparams -------------------------- */
#if LOCK_AT_FORK
#if ONLY_MSPACES
static void dlmalloc_pre_fork(void) {
mstate h;
for (unsigned i = 0; i < ARRAYLEN(g_heaps); ++i)
if ((h = atomic_load_explicit(&g_heaps[i], memory_order_acquire)))
ACQUIRE_LOCK(&h->mutex);
}
static void dlmalloc_post_fork_parent(void) {
mstate h;
for (unsigned i = 0; i < ARRAYLEN(g_heaps); ++i)
if ((h = atomic_load_explicit(&g_heaps[i], memory_order_acquire)))
RELEASE_LOCK(&h->mutex);
}
static void dlmalloc_post_fork_child(void) {
mstate h;
for (unsigned i = 0; i < ARRAYLEN(g_heaps); ++i)
if ((h = atomic_load_explicit(&g_heaps[i], memory_order_acquire)))
(void)INITIAL_LOCK(&h->mutex);
}
#else
static void dlmalloc_pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
static void dlmalloc_post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
static void dlmalloc_post_fork_child(void) { (void)INITIAL_LOCK(&(gm)->mutex); }
#endif /* ONLY_MSPACES */
#endif /* LOCK_AT_FORK */
/* Initialize mparams */
__attribute__((__constructor__(49))) int init_mparams(void) {
#ifdef NEED_GLOBAL_LOCK_INIT
if (malloc_global_mutex_status <= 0)
init_malloc_global_mutex();
#endif
// ACQUIRE_MALLOC_GLOBAL_LOCK();
if (mparams.magic == 0) {
size_t magic;
size_t psize;
size_t gsize;
#if defined(__COSMOPOLITAN__)
psize = FRAMESIZE;
gsize = FRAMESIZE;
#elif !defined(WIN32)
psize = malloc_getpagesize;
gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
#else /* WIN32 */
{
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
psize = system_info.dwPageSize;
gsize = ((DEFAULT_GRANULARITY != 0)?
DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
}
#endif /* WIN32 */
/* Sanity-check configuration:
size_t must be unsigned and as wide as pointer type.
ints must be at least 4 bytes.
alignment must be at least 8.
Alignment, min chunk size, and page size must all be powers of 2.
*/
if ((sizeof(size_t) != sizeof(char*)) ||
(MAX_SIZE_T < MIN_CHUNK_SIZE) ||
(sizeof(int) < 4) ||
(MALLOC_ALIGNMENT < (size_t)8U) ||
((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
((gsize & (gsize-SIZE_T_ONE)) != 0) ||
((psize & (psize-SIZE_T_ONE)) != 0))
ABORT;
mparams.granularity = gsize;
mparams.page_size = psize;
mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
#if MORECORE_CONTIGUOUS
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
#else /* MORECORE_CONTIGUOUS */
mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
#endif /* MORECORE_CONTIGUOUS */
#if !ONLY_MSPACES
/* Set up lock for main malloc area */
gm->mflags = mparams.default_mflags;
(void)INITIAL_LOCK(&gm->mutex);
#endif
#if LOCK_AT_FORK
pthread_atfork(&dlmalloc_pre_fork,
&dlmalloc_post_fork_parent,
&dlmalloc_post_fork_child);
#endif
{
#if USE_DEV_RANDOM
int fd;
unsigned char buf[sizeof(size_t)];
/* Try to use /dev/urandom, else fall back on using time */
if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
read(fd, buf, sizeof(buf)) == sizeof(buf)) {
magic = *((size_t *) buf);
close(fd);
}
else
#endif /* USE_DEV_RANDOM */
magic = (size_t)(_rand64() ^ (size_t)0x55555555U);
magic |= (size_t)8U; /* ensure nonzero */
magic &= ~(size_t)7U; /* improve chances of fault for bad values */
/* Until memory modes commonly available, use volatile-write */
(*(volatile size_t *)(&(mparams.magic))) = magic;
}
}
// RELEASE_MALLOC_GLOBAL_LOCK();
#if ONLY_MSPACES
threaded_dlmalloc();
#endif
__runlevel = RUNLEVEL_MALLOC;
return 1;
}
/* support for mallopt */
static int change_mparam(int param_number, int value) {
size_t val;
ensure_initialization();
val = (value == -1)? MAX_SIZE_T : (size_t)value;
switch(param_number) {
case M_TRIM_THRESHOLD:
mparams.trim_threshold = val;
return 1;
case M_GRANULARITY:
if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
mparams.granularity = val;
return 1;
}
else
return 0;
case M_MMAP_THRESHOLD:
mparams.mmap_threshold = val;
return 1;
default:
return 0;
}
}