mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-01-31 11:37:35 +00:00
3609f65de3
If pthread_create() is linked into the binary, then the cosmo runtime will create an independent dlmalloc arena for each core. Whenever the malloc() function is used it will index `g_heaps[sched_getcpu() / 2]` to find the arena with the greatest hyperthread / numa locality. This may be configured via an environment variable. For example if you say `export COSMOPOLITAN_HEAP_COUNT=1` then you can restore the old ways. Your process may be configured to have anywhere between 1 - 128 heaps We need this revision because it makes multithreaded C++ applications faster. For example, an HTTP server I'm working on that makes extreme use of the STL went from 16k to 2000k requests per second, after this change was made. To understand why, try out the malloc_test benchmark which calls malloc() + realloc() in a loop across many threads, which sees a a 250x improvement in process clock time and 200x on wall time The tradeoff is this adds ~25ns of latency to individual malloc calls compared to MODE=tiny, once the cosmo runtime has transitioned into a fully multi-threaded state. If you don't need malloc() to be scalable then cosmo provides many options for you. For starters the heap count variable above can be set to put the process back in single heap mode plus you can go even faster still, if you include tinymalloc.inc like many of the programs in tool/build/.. are already doing since that'll shave tens of kb off your binary footprint too. Theres also MODE=tiny which is configured to use just 1 plain old dlmalloc arena by default Another tradeoff is we need more memory now (except in MODE=tiny), to track the provenance of memory allocation. This is so allocations can be freely shared across threads, and because OSes can reschedule code to different CPUs at any time.
101 lines
3 KiB
C++
101 lines
3 KiB
C++
|
|
/* --------------------------- Lock preliminaries ------------------------ */
|
|
|
|
/*
|
|
When locks are defined, there is one global lock, plus
|
|
one per-mspace lock.
|
|
|
|
The global lock_ensures that mparams.magic and other unique
|
|
mparams values are initialized only once. It also protects
|
|
sequences of calls to MORECORE. In many cases sys_alloc requires
|
|
two calls, that should not be interleaved with calls by other
|
|
threads. This does not protect against direct calls to MORECORE
|
|
by other threads not using this lock, so there is still code to
|
|
cope the best we can on interference.
|
|
|
|
Per-mspace locks surround calls to malloc, free, etc.
|
|
By default, locks are simple non-reentrant mutexes.
|
|
|
|
Because lock-protected regions generally have bounded times, it is
|
|
OK to use the supplied simple spinlocks. Spinlocks are likely to
|
|
improve performance for lightly contended applications, but worsen
|
|
performance under heavy contention.
|
|
|
|
If USE_LOCKS is > 1, the definitions of lock routines here are
|
|
bypassed, in which case you will need to define the type MLOCK_T,
|
|
and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
|
|
and TRY_LOCK. You must also declare a
|
|
static MLOCK_T malloc_global_mutex = { initialization values };.
|
|
|
|
*/
|
|
|
|
#if USE_SPIN_LOCKS
|
|
|
|
#define MLOCK_T atomic_uint
|
|
|
|
static int malloc_wipe(MLOCK_T *lk) {
|
|
bzero(lk, sizeof(*lk));
|
|
return 0;
|
|
}
|
|
|
|
static int malloc_lock(MLOCK_T *lk) {
|
|
if (!__threaded) return 0;
|
|
while (atomic_exchange_explicit(lk, 1, memory_order_acquire)) {
|
|
pthread_pause_np();
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int malloc_unlock(MLOCK_T *lk) {
|
|
if (!__threaded) return 0;
|
|
atomic_store_explicit(lk, 0, memory_order_release);
|
|
return 0;
|
|
}
|
|
|
|
#else
|
|
|
|
#define MLOCK_T nsync_mu
|
|
|
|
static int malloc_wipe(MLOCK_T *lk) {
|
|
bzero(lk, sizeof(*lk));
|
|
return 0;
|
|
}
|
|
|
|
static int malloc_lock(MLOCK_T *lk) {
|
|
if (!__threaded) return 0;
|
|
nsync_mu_lock(lk);
|
|
return 0;
|
|
}
|
|
|
|
static int malloc_unlock(MLOCK_T *lk) {
|
|
if (!__threaded) return 0;
|
|
nsync_mu_unlock(lk);
|
|
return 0;
|
|
}
|
|
|
|
#endif
|
|
|
|
#define ACQUIRE_LOCK(lk) malloc_lock(lk)
|
|
#define RELEASE_LOCK(lk) malloc_unlock(lk)
|
|
#define INITIAL_LOCK(lk) malloc_wipe(lk)
|
|
#define DESTROY_LOCK(lk) malloc_wipe(lk)
|
|
#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
|
|
#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
|
|
|
|
static MLOCK_T malloc_global_mutex;
|
|
|
|
#define USE_LOCK_BIT (2U)
|
|
|
|
struct malloc_chunk {
|
|
size_t prev_foot; /* Size of previous chunk (if free). */
|
|
size_t head; /* Size and inuse bits. */
|
|
struct malloc_chunk* fd; /* double links -- used only if free. */
|
|
struct malloc_chunk* bk;
|
|
};
|
|
|
|
typedef struct malloc_chunk mchunk;
|
|
typedef struct malloc_chunk* mchunkptr;
|
|
typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
|
|
typedef unsigned int bindex_t; /* Described below */
|
|
typedef unsigned int binmap_t; /* Described below */
|
|
typedef unsigned int flag_t; /* The type of various bit flag sets */
|