mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-06-03 11:12:27 +00:00
Make malloc() go 200x faster
If pthread_create() is linked into the binary, then the cosmo runtime will create an independent dlmalloc arena for each core. Whenever the malloc() function is used it will index `g_heaps[sched_getcpu() / 2]` to find the arena with the greatest hyperthread / numa locality. This may be configured via an environment variable. For example if you say `export COSMOPOLITAN_HEAP_COUNT=1` then you can restore the old ways. Your process may be configured to have anywhere between 1 - 128 heaps We need this revision because it makes multithreaded C++ applications faster. For example, an HTTP server I'm working on that makes extreme use of the STL went from 16k to 2000k requests per second, after this change was made. To understand why, try out the malloc_test benchmark which calls malloc() + realloc() in a loop across many threads, which sees a a 250x improvement in process clock time and 200x on wall time The tradeoff is this adds ~25ns of latency to individual malloc calls compared to MODE=tiny, once the cosmo runtime has transitioned into a fully multi-threaded state. If you don't need malloc() to be scalable then cosmo provides many options for you. For starters the heap count variable above can be set to put the process back in single heap mode plus you can go even faster still, if you include tinymalloc.inc like many of the programs in tool/build/.. are already doing since that'll shave tens of kb off your binary footprint too. Theres also MODE=tiny which is configured to use just 1 plain old dlmalloc arena by default Another tradeoff is we need more memory now (except in MODE=tiny), to track the provenance of memory allocation. This is so allocations can be freely shared across threads, and because OSes can reschedule code to different CPUs at any time.
This commit is contained in:
parent
9906f299bb
commit
3609f65de3
60 changed files with 858 additions and 1064 deletions
32
third_party/dlmalloc/dlmalloc.h
vendored
32
third_party/dlmalloc/dlmalloc.h
vendored
|
@ -25,6 +25,28 @@
|
|||
#define dlrealloc_in_place __dlrealloc_in_place
|
||||
#define dlrealloc_in_place __dlrealloc_in_place
|
||||
|
||||
#define create_mspace_with_base __create_mspace_with_base
|
||||
#define mspace_bulk_free __mspace_bulk_free
|
||||
#define mspace_calloc __mspace_calloc
|
||||
#define mspace_footprint __mspace_footprint
|
||||
#define mspace_footprint_limit __mspace_footprint_limit
|
||||
#define mspace_free __mspace_free
|
||||
#define mspace_independent_calloc __mspace_independent_calloc
|
||||
#define mspace_independent_comalloc __mspace_independent_comalloc
|
||||
#define mspace_inspect_all __mspace_inspect_all
|
||||
#define mspace_mallinfo __mspace_mallinfo
|
||||
#define mspace_malloc __mspace_malloc
|
||||
#define mspace_malloc_stats __mspace_malloc_stats
|
||||
#define mspace_mallopt __mspace_mallopt
|
||||
#define mspace_max_footprint __mspace_max_footprint
|
||||
#define mspace_memalign __mspace_memalign
|
||||
#define mspace_realloc __mspace_realloc
|
||||
#define mspace_realloc_in_place __mspace_realloc_in_place
|
||||
#define mspace_set_footprint_limit __mspace_set_footprint_limit
|
||||
#define mspace_track_large_chunks __mspace_track_large_chunks
|
||||
#define mspace_trim __mspace_trim
|
||||
#define mspace_usable_size __mspace_usable_size
|
||||
|
||||
COSMOPOLITAN_C_START_
|
||||
|
||||
/*
|
||||
|
@ -41,7 +63,7 @@ COSMOPOLITAN_C_START_
|
|||
maximum supported value of n differs across systems, but is in all
|
||||
cases less than the maximum representable value of a size_t.
|
||||
*/
|
||||
void* dlmalloc(size_t);
|
||||
extern void* (*dlmalloc)(size_t);
|
||||
|
||||
/*
|
||||
free(void* p)
|
||||
|
@ -57,7 +79,7 @@ void dlfree(void*);
|
|||
Returns a pointer to n_elements * element_size bytes, with all locations
|
||||
set to zero.
|
||||
*/
|
||||
void* dlcalloc(size_t, size_t);
|
||||
extern void* (*dlcalloc)(size_t, size_t);
|
||||
|
||||
/*
|
||||
realloc(void* p, size_t n)
|
||||
|
@ -81,7 +103,7 @@ void* dlcalloc(size_t, size_t);
|
|||
The old unix realloc convention of allowing the last-free'd chunk
|
||||
to be used as an argument to realloc is not supported.
|
||||
*/
|
||||
void* dlrealloc(void*, size_t);
|
||||
extern void* (*dlrealloc)(void*, size_t);
|
||||
|
||||
/*
|
||||
realloc_in_place(void* p, size_t n)
|
||||
|
@ -110,7 +132,7 @@ void* dlrealloc_in_place(void*, size_t);
|
|||
|
||||
Overreliance on memalign is a sure way to fragment space.
|
||||
*/
|
||||
void* dlmemalign(size_t, size_t);
|
||||
extern void* (*dlmemalign)(size_t, size_t);
|
||||
|
||||
/*
|
||||
mallopt(int parameter_number, int parameter_value)
|
||||
|
@ -233,7 +255,7 @@ void dlmalloc_inspect_all(void (*handler)(void*, void*, size_t, void*),
|
|||
thus be inaccurate.
|
||||
*/
|
||||
|
||||
struct mallinfo dlmallinfo(void);
|
||||
extern struct mallinfo (*dlmallinfo)(void);
|
||||
|
||||
/*
|
||||
independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue