mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-06-02 18:52:29 +00:00
Make malloc() go 200x faster
If pthread_create() is linked into the binary, then the cosmo runtime will create an independent dlmalloc arena for each core. Whenever the malloc() function is used it will index `g_heaps[sched_getcpu() / 2]` to find the arena with the greatest hyperthread / numa locality. This may be configured via an environment variable. For example if you say `export COSMOPOLITAN_HEAP_COUNT=1` then you can restore the old ways. Your process may be configured to have anywhere between 1 - 128 heaps We need this revision because it makes multithreaded C++ applications faster. For example, an HTTP server I'm working on that makes extreme use of the STL went from 16k to 2000k requests per second, after this change was made. To understand why, try out the malloc_test benchmark which calls malloc() + realloc() in a loop across many threads, which sees a a 250x improvement in process clock time and 200x on wall time The tradeoff is this adds ~25ns of latency to individual malloc calls compared to MODE=tiny, once the cosmo runtime has transitioned into a fully multi-threaded state. If you don't need malloc() to be scalable then cosmo provides many options for you. For starters the heap count variable above can be set to put the process back in single heap mode plus you can go even faster still, if you include tinymalloc.inc like many of the programs in tool/build/.. are already doing since that'll shave tens of kb off your binary footprint too. Theres also MODE=tiny which is configured to use just 1 plain old dlmalloc arena by default Another tradeoff is we need more memory now (except in MODE=tiny), to track the provenance of memory allocation. This is so allocations can be freely shared across threads, and because OSes can reschedule code to different CPUs at any time.
This commit is contained in:
parent
9906f299bb
commit
3609f65de3
60 changed files with 858 additions and 1064 deletions
127
third_party/dlmalloc/dlmalloc.c
vendored
127
third_party/dlmalloc/dlmalloc.c
vendored
|
@ -26,25 +26,29 @@
|
|||
#include "libc/thread/tls.h"
|
||||
#include "third_party/nsync/mu.h"
|
||||
|
||||
#if !IsTiny()
|
||||
#define FOOTERS 1
|
||||
#define MSPACES 1
|
||||
#define ONLY_MSPACES 1 // enables scalable multi-threaded malloc
|
||||
#else
|
||||
#define INSECURE 1
|
||||
#define PROCEED_ON_ERROR 1
|
||||
#define FOOTERS 0
|
||||
#define MSPACES 0
|
||||
#define ONLY_MSPACES 0
|
||||
#endif
|
||||
|
||||
#define HAVE_MMAP 1
|
||||
#define HAVE_MREMAP 0
|
||||
#define HAVE_MORECORE 0
|
||||
#define USE_LOCKS 2
|
||||
#define USE_SPIN_LOCKS 0
|
||||
#define USE_SPIN_LOCKS 1
|
||||
#define MORECORE_CONTIGUOUS 0
|
||||
#define MALLOC_INSPECT_ALL 1
|
||||
#define ABORT_ON_ASSERT_FAILURE 0
|
||||
#define LOCK_AT_FORK 1
|
||||
#define NO_MALLOC_STATS 1
|
||||
|
||||
#if IsTiny()
|
||||
#define INSECURE 1
|
||||
#define PROCEED_ON_ERROR 1
|
||||
#endif
|
||||
|
||||
#if IsModeDbg()
|
||||
#define DEBUG 1
|
||||
#endif
|
||||
|
@ -56,24 +60,29 @@
|
|||
#define assert(x) if(!(x)) ABORT
|
||||
#endif
|
||||
|
||||
#include "third_party/dlmalloc/platform.inc"
|
||||
#include "third_party/dlmalloc/locks.inc"
|
||||
#include "third_party/dlmalloc/chunks.inc"
|
||||
#include "third_party/dlmalloc/headfoot.inc"
|
||||
#include "third_party/dlmalloc/global.inc"
|
||||
#include "third_party/dlmalloc/system.inc"
|
||||
#include "third_party/dlmalloc/hooks.inc"
|
||||
#include "third_party/dlmalloc/debugging.inc"
|
||||
#include "third_party/dlmalloc/indexing.inc"
|
||||
#include "third_party/dlmalloc/binmaps.inc"
|
||||
#include "third_party/dlmalloc/runtimechecks.inc"
|
||||
#include "third_party/dlmalloc/init.inc"
|
||||
#include "third_party/dlmalloc/debuglib.inc"
|
||||
#include "third_party/dlmalloc/statistics.inc"
|
||||
#include "third_party/dlmalloc/smallbins.inc"
|
||||
#include "third_party/dlmalloc/directmap.inc"
|
||||
#include "third_party/dlmalloc/trees.inc"
|
||||
#include "third_party/dlmalloc/management.inc"
|
||||
#include "platform.inc"
|
||||
#include "locks.inc"
|
||||
#include "chunks.inc"
|
||||
#include "headfoot.inc"
|
||||
|
||||
#if ONLY_MSPACES
|
||||
#include "threaded.inc"
|
||||
#endif
|
||||
|
||||
#include "global.inc"
|
||||
#include "system.inc"
|
||||
#include "hooks.inc"
|
||||
#include "debugging.inc"
|
||||
#include "indexing.inc"
|
||||
#include "binmaps.inc"
|
||||
#include "runtimechecks.inc"
|
||||
#include "init.inc"
|
||||
#include "debuglib.inc"
|
||||
#include "statistics.inc"
|
||||
#include "smallbins.inc"
|
||||
#include "directmap.inc"
|
||||
#include "trees.inc"
|
||||
#include "management.inc"
|
||||
|
||||
/* -------------------------- System allocation -------------------------- */
|
||||
|
||||
|
@ -585,29 +594,7 @@ static void* tmalloc_small(mstate m, size_t nb) {
|
|||
|
||||
#if !ONLY_MSPACES
|
||||
|
||||
#define FREEBIE_COUNT 32
|
||||
#define FREEBIE_MAXSIZE 2048
|
||||
|
||||
void* dlmalloc(size_t bytes) {
|
||||
|
||||
#if FREEBIE_COUNT && !defined(MODE_DBG)
|
||||
/* Allocate from thread-local freelist. */
|
||||
if (__threaded && bytes && bytes <= FREEBIE_MAXSIZE) {
|
||||
unsigned need = bytes;
|
||||
unsigned best_index = FREEBIE_COUNT;
|
||||
unsigned best_delta = FREEBIE_MAXSIZE + 1;
|
||||
struct CosmoTib *tib = __get_tls();
|
||||
for (int i = 0; i < FREEBIE_COUNT; ++i) {
|
||||
unsigned d = tib->tib_freelen[i] - need;
|
||||
best_index = d < best_delta ? i : best_index;
|
||||
best_delta = d < best_delta ? d : best_delta;
|
||||
}
|
||||
if (best_index < FREEBIE_COUNT) {
|
||||
tib->tib_freelen[best_index] = 0;
|
||||
return tib->tib_freemem[best_index];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
void* dlmalloc_single(size_t bytes) {
|
||||
|
||||
/*
|
||||
Basic algorithm:
|
||||
|
@ -769,26 +756,6 @@ void dlfree(void* mem) {
|
|||
#define fm gm
|
||||
#endif /* FOOTERS */
|
||||
|
||||
#if FREEBIE_COUNT && !defined(MODE_DBG)
|
||||
/* Free small allocations locally. */
|
||||
if (__threaded) {
|
||||
struct CosmoTib *tib = __get_tls();
|
||||
for (int i = 0; i < FREEBIE_COUNT; ++i) {
|
||||
if (!tib->tib_freelen[i]) {
|
||||
if (is_inuse(p)) {
|
||||
size_t len = chunksize(p) - overhead_for(p);
|
||||
if (len && len < FREEBIE_MAXSIZE) {
|
||||
tib->tib_freelen[i] = len;
|
||||
tib->tib_freemem[i] = mem;
|
||||
return;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Otherwise free memory globally. */
|
||||
if (!PREACTION(fm)) {
|
||||
check_inuse_chunk(fm, p);
|
||||
|
@ -881,7 +848,7 @@ void dlfree(void* mem) {
|
|||
#endif /* FOOTERS */
|
||||
}
|
||||
|
||||
void* dlcalloc(size_t n_elements, size_t elem_size) {
|
||||
void* dlcalloc_single(size_t n_elements, size_t elem_size) {
|
||||
void* mem;
|
||||
size_t req = 0;
|
||||
if (ckd_mul(&req, n_elements, elem_size)) req = -1;
|
||||
|
@ -977,10 +944,10 @@ static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
|
|||
|
||||
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
|
||||
void* mem = 0;
|
||||
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
|
||||
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
|
||||
alignment = MIN_CHUNK_SIZE;
|
||||
/* alignment is 32+ bytes rounded up to nearest two power */
|
||||
alignment = 2ul << bsrl(MAX(MIN_CHUNK_SIZE, alignment) - 1);
|
||||
alignment = 2ul << bsrl(alignment - 1);
|
||||
if (bytes >= MAX_REQUEST - alignment) {
|
||||
if (m != 0) { /* Test isn't needed but avoids compiler warning */
|
||||
MALLOC_FAILURE_ACTION;
|
||||
|
@ -1267,7 +1234,7 @@ static void internal_inspect_all(mstate m,
|
|||
|
||||
#if !ONLY_MSPACES
|
||||
|
||||
void* dlrealloc(void* oldmem, size_t bytes) {
|
||||
void* dlrealloc_single(void* oldmem, size_t bytes) {
|
||||
void* mem = 0;
|
||||
if (oldmem == 0) {
|
||||
mem = dlmalloc(bytes);
|
||||
|
@ -1343,7 +1310,7 @@ void* dlrealloc_in_place(void* oldmem, size_t bytes) {
|
|||
return mem;
|
||||
}
|
||||
|
||||
void* dlmemalign(size_t alignment, size_t bytes) {
|
||||
void* dlmemalign_single(size_t alignment, size_t bytes) {
|
||||
if (alignment <= MALLOC_ALIGNMENT) {
|
||||
return dlmalloc(bytes);
|
||||
}
|
||||
|
@ -1421,7 +1388,7 @@ size_t dlmalloc_set_footprint_limit(size_t bytes) {
|
|||
}
|
||||
|
||||
#if !NO_MALLINFO
|
||||
struct mallinfo dlmallinfo(void) {
|
||||
struct mallinfo dlmallinfo_single(void) {
|
||||
return internal_mallinfo(gm);
|
||||
}
|
||||
#endif /* NO_MALLINFO */
|
||||
|
@ -1454,6 +1421,20 @@ size_t dlmalloc_usable_size(void* mem) {
|
|||
|
||||
#endif /* !ONLY_MSPACES */
|
||||
|
||||
#if ONLY_MSPACES
|
||||
void *(*dlmalloc)(size_t);
|
||||
void *(*dlcalloc)(size_t, size_t);
|
||||
void *(*dlrealloc)(void *, size_t);
|
||||
void *(*dlmemalign)(size_t, size_t);
|
||||
struct mallinfo (*dlmallinfo)(void);
|
||||
#else
|
||||
void *(*dlmalloc)(size_t) = dlmalloc_single;
|
||||
void *(*dlcalloc)(size_t, size_t) = dlcalloc_single;
|
||||
void *(*dlrealloc)(void *, size_t) = dlrealloc_single;
|
||||
void *(*dlmemalign)(size_t, size_t) = dlmemalign_single;
|
||||
struct mallinfo (*dlmallinfo)(void) = dlmallinfo_single;
|
||||
#endif
|
||||
|
||||
/* ----------------------------- user mspaces ---------------------------- */
|
||||
|
||||
#if MSPACES
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue